aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorhyukjinkwon <gurwls223@gmail.com>2016-04-14 09:43:41 +0100
committerSean Owen <sowen@cloudera.com>2016-04-14 09:43:41 +0100
commit6fc3dc8839eaed673c64ec87af6dfe24f8cebe0c (patch)
treedb47cd619d84a7890ff1cacc78a44046ace85633 /sql
parent478af2f45595913c9b8f560d13e8d88447486f99 (diff)
downloadspark-6fc3dc8839eaed673c64ec87af6dfe24f8cebe0c.tar.gz
spark-6fc3dc8839eaed673c64ec87af6dfe24f8cebe0c.tar.bz2
spark-6fc3dc8839eaed673c64ec87af6dfe24f8cebe0c.zip
[MINOR][SQL] Remove extra anonymous closure within functional transformations
## What changes were proposed in this pull request? This PR removes extra anonymous closure within functional transformations. For example, ```scala .map(item => { ... }) ``` which can be just simply as below: ```scala .map { item => ... } ``` ## How was this patch tested? Related unit tests and `sbt scalastyle`. Author: hyukjinkwon <gurwls223@gmail.com> Closes #12382 from HyukjinKwon/minor-extra-closers.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala27
5 files changed, 19 insertions, 24 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
index ee7f4fadca..f43626ca81 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
@@ -519,7 +519,7 @@ class CodegenContext {
// Get all the expressions that appear at least twice and set up the state for subexpression
// elimination.
val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1)
- commonExprs.foreach(e => {
+ commonExprs.foreach { e =>
val expr = e.head
val fnName = freshName("evalExpr")
val isNull = s"${fnName}IsNull"
@@ -561,7 +561,7 @@ class CodegenContext {
subexprFunctions += s"$fnName($INPUT_ROW);"
val state = SubExprEliminationState(isNull, value)
e.foreach(subExprEliminationExprs.put(_, state))
- })
+ }
}
/**
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index 438cbabdbb..aeb1842677 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -286,10 +286,10 @@ object SetOperationPushDown extends Rule[LogicalPlan] with PredicateHelper {
assert(children.nonEmpty)
if (projectList.forall(_.deterministic)) {
val newFirstChild = Project(projectList, children.head)
- val newOtherChildren = children.tail.map ( child => {
+ val newOtherChildren = children.tail.map { child =>
val rewrites = buildRewrites(children.head, child)
Project(projectList.map(pushToRight(_, rewrites)), child)
- } )
+ }
Union(newFirstChild +: newOtherChildren)
} else {
p
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
index aba500ad8d..344aaff348 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicOperators.scala
@@ -400,7 +400,7 @@ case class Range(
sqlContext
.sparkContext
.parallelize(0 until numSlices, numSlices)
- .mapPartitionsWithIndex((i, _) => {
+ .mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
@@ -444,7 +444,7 @@ case class Range(
unsafeRow
}
}
- })
+ }
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
index b7ff5f7242..065c8572b0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
@@ -251,12 +251,12 @@ object JdbcUtils extends Logging {
def schemaString(df: DataFrame, url: String): String = {
val sb = new StringBuilder()
val dialect = JdbcDialects.get(url)
- df.schema.fields foreach { field => {
+ df.schema.fields foreach { field =>
val name = field.name
val typ: String = getJdbcType(field.dataType, dialect).databaseTypeDefinition
val nullable = if (field.nullable) "" else "NOT NULL"
sb.append(s", $name $typ $nullable")
- }}
+ }
if (sb.length < 2) "" else sb.substring(2)
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 589862c7c0..585befe378 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -450,9 +450,7 @@ private[hive] trait HiveInspectors {
if (o != null) {
val array = o.asInstanceOf[ArrayData]
val values = new java.util.ArrayList[Any](array.numElements())
- array.foreach(elementType, (_, e) => {
- values.add(wrapper(e))
- })
+ array.foreach(elementType, (_, e) => values.add(wrapper(e)))
values
} else {
null
@@ -468,9 +466,8 @@ private[hive] trait HiveInspectors {
if (o != null) {
val map = o.asInstanceOf[MapData]
val jmap = new java.util.HashMap[Any, Any](map.numElements())
- map.foreach(mt.keyType, mt.valueType, (k, v) => {
- jmap.put(keyWrapper(k), valueWrapper(v))
- })
+ map.foreach(mt.keyType, mt.valueType, (k, v) =>
+ jmap.put(keyWrapper(k), valueWrapper(v)))
jmap
} else {
null
@@ -587,9 +584,9 @@ private[hive] trait HiveInspectors {
case x: ListObjectInspector =>
val list = new java.util.ArrayList[Object]
val tpe = dataType.asInstanceOf[ArrayType].elementType
- a.asInstanceOf[ArrayData].foreach(tpe, (_, e) => {
+ a.asInstanceOf[ArrayData].foreach(tpe, (_, e) =>
list.add(wrap(e, x.getListElementObjectInspector, tpe))
- })
+ )
list
case x: MapObjectInspector =>
val keyType = dataType.asInstanceOf[MapType].keyType
@@ -599,10 +596,10 @@ private[hive] trait HiveInspectors {
// Some UDFs seem to assume we pass in a HashMap.
val hashMap = new java.util.HashMap[Any, Any](map.numElements())
- map.foreach(keyType, valueType, (k, v) => {
+ map.foreach(keyType, valueType, (k, v) =>
hashMap.put(wrap(k, x.getMapKeyObjectInspector, keyType),
wrap(v, x.getMapValueObjectInspector, valueType))
- })
+ )
hashMap
}
@@ -704,9 +701,8 @@ private[hive] trait HiveInspectors {
ObjectInspectorFactory.getStandardConstantListObjectInspector(listObjectInspector, null)
} else {
val list = new java.util.ArrayList[Object]()
- value.asInstanceOf[ArrayData].foreach(dt, (_, e) => {
- list.add(wrap(e, listObjectInspector, dt))
- })
+ value.asInstanceOf[ArrayData].foreach(dt, (_, e) =>
+ list.add(wrap(e, listObjectInspector, dt)))
ObjectInspectorFactory.getStandardConstantListObjectInspector(listObjectInspector, list)
}
case Literal(value, MapType(keyType, valueType, _)) =>
@@ -718,9 +714,8 @@ private[hive] trait HiveInspectors {
val map = value.asInstanceOf[MapData]
val jmap = new java.util.HashMap[Any, Any](map.numElements())
- map.foreach(keyType, valueType, (k, v) => {
- jmap.put(wrap(k, keyOI, keyType), wrap(v, valueOI, valueType))
- })
+ map.foreach(keyType, valueType, (k, v) =>
+ jmap.put(wrap(k, keyOI, keyType), wrap(v, valueOI, valueType)))
ObjectInspectorFactory.getStandardConstantMapObjectInspector(keyOI, valueOI, jmap)
}