aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2016-01-12 00:51:00 -0800
committerReynold Xin <rxin@databricks.com>2016-01-12 00:51:00 -0800
commit8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d (patch)
tree31e99044d63b89311821df7a61e6f4882114677a
parent112abf9100f05be436e449817468c50174712c78 (diff)
downloadspark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.gz
spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.bz2
spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.zip
[SPARK-12692][BUILD][SQL] Scala style: Fix the style violation (Space before "," or ":")
Fix the style violation (space before , and :). This PR is a followup for #10643. Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #10718 from sarutak/SPARK-12692-followup-sql.
-rw-r--r--scalastyle-config.xml2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala4
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala2
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala2
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala6
-rw-r--r--sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala36
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala18
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala12
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala7
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala26
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala4
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ReflectionUtils.scala2
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala8
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala4
54 files changed, 150 insertions, 141 deletions
diff --git a/scalastyle-config.xml b/scalastyle-config.xml
index 2439a1f715..b873b62721 100644
--- a/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -218,7 +218,7 @@ This file is divided into 3 sections:
<check level="error" class="org.scalastyle.scalariform.EqualsHashCodeChecker" enabled="false"></check>
<!-- Should turn this on, but we have a few places that need to be fixed first -->
- <check level="warning" class="org.scalastyle.scalariform.DisallowSpaceBeforeTokenChecker" enabled="true">
+ <check customId="whitespacebeforetoken" level="warning" class="org.scalastyle.scalariform.DisallowSpaceBeforeTokenChecker" enabled="true">
<parameters>
<parameter name="tokens">COLON, COMMA</parameter>
</parameters>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
index 79f723cf9b..23fea0e283 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
@@ -49,7 +49,7 @@ object ScalaReflection extends ScalaReflection {
* Unlike `schemaFor`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers
*/
- def dataTypeFor[T : TypeTag]: DataType = dataTypeFor(localTypeOf[T])
+ def dataTypeFor[T: TypeTag]: DataType = dataTypeFor(localTypeOf[T])
private def dataTypeFor(tpe: `Type`): DataType = ScalaReflectionLock.synchronized {
tpe match {
@@ -116,7 +116,7 @@ object ScalaReflection extends ScalaReflection {
* from ordinal 0 (since there are no names to map to). The actual location can be moved by
* calling resolve/bind with a new schema.
*/
- def constructorFor[T : TypeTag]: Expression = {
+ def constructorFor[T: TypeTag]: Expression = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil
@@ -386,7 +386,7 @@ object ScalaReflection extends ScalaReflection {
* * the element type of [[Array]] or [[Seq]]: `array element class: "abc.xyz.MyClass"`
* * the field of [[Product]]: `field (class: "abc.xyz.MyClass", name: "myField")`
*/
- def extractorsFor[T : TypeTag](inputObject: Expression): CreateNamedStruct = {
+ def extractorsFor[T: TypeTag](inputObject: Expression): CreateNamedStruct = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index 2a132d8b82..6ec408a673 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -203,7 +203,7 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
)
protected lazy val ordering: Parser[Seq[SortOrder]] =
- ( rep1sep(expression ~ direction.? , ",") ^^ {
+ ( rep1sep(expression ~ direction.?, ",") ^^ {
case exps => exps.map(pair => SortOrder(pair._1, pair._2.getOrElse(Ascending)))
}
)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 8a33af8207..d16880bc4a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -84,7 +84,7 @@ class Analyzer(
ResolveAggregateFunctions ::
DistinctAggregationRewriter(conf) ::
HiveTypeCoercion.typeCoercionRules ++
- extendedResolutionRules : _*),
+ extendedResolutionRules: _*),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
@@ -110,7 +110,7 @@ class Analyzer(
// Taking into account the reasonableness and the implementation complexity,
// here use the CTE definition first, check table name only and ignore database name
// see https://github.com/apache/spark/pull/4929#discussion_r27186638 for more info
- case u : UnresolvedRelation =>
+ case u: UnresolvedRelation =>
val substituted = cteRelations.get(u.tableIdentifier.table).map { relation =>
val withAlias = u.alias.map(Subquery(_, relation))
withAlias.getOrElse(relation)
@@ -889,7 +889,7 @@ class Analyzer(
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
- case wf : WindowFunction =>
+ case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
index 5c2aa3c06b..7c3d45b1e4 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala
@@ -323,13 +323,13 @@ object FunctionRegistry {
} else {
// Otherwise, find an ctor method that matches the number of arguments, and use that.
val params = Seq.fill(expressions.size)(classOf[Expression])
- val f = Try(tag.runtimeClass.getDeclaredConstructor(params : _*)) match {
+ val f = Try(tag.runtimeClass.getDeclaredConstructor(params: _*)) match {
case Success(e) =>
e
case Failure(e) =>
throw new AnalysisException(s"Invalid number of arguments for function $name")
}
- Try(f.newInstance(expressions : _*).asInstanceOf[Expression]) match {
+ Try(f.newInstance(expressions: _*).asInstanceOf[Expression]) match {
case Success(e) => e
case Failure(e) => throw new AnalysisException(e.getMessage)
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
index dbcbd6854b..e326ea7827 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
@@ -529,7 +529,7 @@ object HiveTypeCoercion {
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualTo(left @ BooleanType(), right @ NumericType()) =>
- transform(left , right)
+ transform(left, right)
case EqualTo(left @ NumericType(), right @ BooleanType()) =>
transform(right, left)
case EqualNullSafe(left @ BooleanType(), right @ NumericType()) =>
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
index 5ac1984043..c4dbcb7b60 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala
@@ -61,9 +61,11 @@ package object dsl {
trait ImplicitOperators {
def expr: Expression
+ // scalastyle:off whitespacebeforetoken
def unary_- : Expression = UnaryMinus(expr)
def unary_! : Predicate = Not(expr)
def unary_~ : Expression = BitwiseNot(expr)
+ // scalastyle:on whitespacebeforetoken
def + (other: Expression): Expression = Add(expr, other)
def - (other: Expression): Expression = Subtract(expr, other)
@@ -141,7 +143,7 @@ package object dsl {
// Note that if we make ExpressionConversions an object rather than a trait, we can
// then make this a value class to avoid the small penalty of runtime instantiation.
def $(args: Any*): analysis.UnresolvedAttribute = {
- analysis.UnresolvedAttribute(sc.s(args : _*))
+ analysis.UnresolvedAttribute(sc.s(args: _*))
}
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
index 05f746e72b..fa4c2d93ec 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala
@@ -44,7 +44,7 @@ import org.apache.spark.util.Utils
* to the name `value`.
*/
object ExpressionEncoder {
- def apply[T : TypeTag](): ExpressionEncoder[T] = {
+ def apply[T: TypeTag](): ExpressionEncoder[T] = {
// We convert the not-serializable TypeTag into StructType and ClassTag.
val mirror = typeTag[T].mirror
val cls = mirror.runtimeClass(typeTag[T].tpe)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
index 9e283f5eb6..08ada1f38b 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
@@ -27,7 +27,7 @@ package object encoders {
* references from a specific schema.) This requirement allows us to preserve whether a given
* object type is being bound by name or by ordinal when doing resolution.
*/
- private[sql] def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
+ private[sql] def encoderFor[A: Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
case e: ExpressionEncoder[A] =>
e.assertUnresolved()
e
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index d6219514b7..4ffbfa57e7 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -164,7 +164,7 @@ abstract class Expression extends TreeNode[Expression] {
* Returns the hash for this expression. Expressions that compute the same result, even if
* they differ cosmetically should return the same hash.
*/
- def semanticHash() : Int = {
+ def semanticHash(): Int = {
def computeHash(e: Seq[Any]): Int = {
// See http://stackoverflow.com/questions/113511/hash-code-implementation
var hash: Int = 17
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala
index 931f752b4d..bf41f85f79 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala
@@ -46,7 +46,7 @@ case class Concat(children: Seq[Expression]) extends Expression with ImplicitCas
override def eval(input: InternalRow): Any = {
val inputs = children.map(_.eval(input).asInstanceOf[UTF8String])
- UTF8String.concat(inputs : _*)
+ UTF8String.concat(inputs: _*)
}
override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
@@ -99,7 +99,7 @@ case class ConcatWs(children: Seq[Expression])
case null => Iterator(null.asInstanceOf[UTF8String])
}
}
- UTF8String.concatWs(flatInputs.head, flatInputs.tail : _*)
+ UTF8String.concatWs(flatInputs.head, flatInputs.tail: _*)
}
override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
@@ -990,7 +990,7 @@ case class FormatNumber(x: Expression, d: Expression)
def typeHelper(p: String): String = {
x.dataType match {
- case _ : DecimalType => s"""$p.toJavaBigDecimal()"""
+ case _: DecimalType => s"""$p.toJavaBigDecimal()"""
case _ => s"$p"
}
}
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
index 64957db6b4..5489051e95 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala
@@ -496,7 +496,7 @@ case class MapPartitions[T, U](
/** Factory for constructing new `AppendColumn` nodes. */
object AppendColumns {
- def apply[T, U : Encoder](
+ def apply[T, U: Encoder](
func: T => U,
tEncoder: ExpressionEncoder[T],
child: LogicalPlan): AppendColumns[T, U] = {
@@ -522,7 +522,7 @@ case class AppendColumns[T, U](
/** Factory for constructing new `MapGroups` nodes. */
object MapGroups {
- def apply[K, T, U : Encoder](
+ def apply[K, T, U: Encoder](
func: (K, Iterator[T]) => TraversableOnce[U],
kEncoder: ExpressionEncoder[K],
tEncoder: ExpressionEncoder[T],
@@ -557,7 +557,7 @@ case class MapGroups[K, T, U](
/** Factory for constructing new `CoGroup` nodes. */
object CoGroup {
- def apply[Key, Left, Right, Result : Encoder](
+ def apply[Key, Left, Right, Result: Encoder](
func: (Key, Iterator[Left], Iterator[Right]) => TraversableOnce[Result],
keyEnc: ExpressionEncoder[Key],
leftEnc: ExpressionEncoder[Left],
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
index 9fefc5656a..e4417e0955 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala
@@ -122,7 +122,7 @@ object NumberConverter {
* unsigned, otherwise it is signed.
* NB: This logic is borrowed from org.apache.hadoop.hive.ql.ud.UDFConv
*/
- def convert(n: Array[Byte] , fromBase: Int, toBase: Int ): UTF8String = {
+ def convert(n: Array[Byte], fromBase: Int, toBase: Int ): UTF8String = {
if (fromBase < Character.MIN_RADIX || fromBase > Character.MAX_RADIX
|| Math.abs(toBase) < Character.MIN_RADIX
|| Math.abs(toBase) > Character.MAX_RADIX) {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala
index 520e344361..be7573b95d 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala
@@ -90,7 +90,7 @@ case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataT
private[sql] lazy val interpretedOrdering: Ordering[ArrayData] = new Ordering[ArrayData] {
private[this] val elementOrdering: Ordering[Any] = elementType match {
case dt: AtomicType => dt.ordering.asInstanceOf[Ordering[Any]]
- case a : ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
+ case a: ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case s: StructType => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
case other =>
throw new IllegalArgumentException(s"Type $other does not support ordered operations")
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
index 38ce1604b1..864b47a2a0 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala
@@ -310,6 +310,7 @@ final class Decimal extends Ordered[Decimal] with Serializable {
def remainder(that: Decimal): Decimal = this % that
+ // scalastyle:off whitespacebeforetoken
def unary_- : Decimal = {
if (decimalVal.ne(null)) {
Decimal(-decimalVal, precision, scale)
@@ -317,6 +318,7 @@ final class Decimal extends Ordered[Decimal] with Serializable {
Decimal(-longVal, precision, scale)
}
}
+ // scalastyle:on whitespacebeforetoken
def abs: Decimal = if (this.compare(Decimal.ZERO) < 0) this.unary_- else this
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala
index 8c766ef829..a1c4a861c6 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala
@@ -98,5 +98,5 @@ class EncoderErrorMessageSuite extends SparkFunSuite {
s"""array element class: "${clsName[NonEncodable]}""""))
}
- private def clsName[T : ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName
+ private def clsName[T: ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName
}
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
index 88c558d80a..67f4dc98be 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala
@@ -80,7 +80,7 @@ class JavaSerializable(val value: Int) extends Serializable {
class ExpressionEncoderSuite extends SparkFunSuite {
OuterScopes.outerScopes.put(getClass.getName, this)
- implicit def encoder[T : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder()
+ implicit def encoder[T: TypeTag]: ExpressionEncoder[T] = ExpressionEncoder()
// test flat encoders
encodeDecodeTest(false, "primitive boolean")
@@ -145,7 +145,7 @@ class ExpressionEncoderSuite extends SparkFunSuite {
encoderFor(Encoders.javaSerialization[JavaSerializable]))
// test product encoders
- private def productTest[T <: Product : ExpressionEncoder](input: T): Unit = {
+ private def productTest[T <: Product: ExpressionEncoder](input: T): Unit = {
encodeDecodeTest(input, input.getClass.getSimpleName)
}
@@ -286,7 +286,7 @@ class ExpressionEncoderSuite extends SparkFunSuite {
}
}
- private def encodeDecodeTest[T : ExpressionEncoder](
+ private def encodeDecodeTest[T: ExpressionEncoder](
input: T,
testName: String): Unit = {
test(s"encode/decode for $testName: $input") {
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
index 000a3b7ecb..6932f185b9 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala
@@ -80,7 +80,7 @@ class BooleanSimplificationSuite extends PlanTest with PredicateHelper {
checkCondition(('a < 2 || 'a > 3 || 'b > 5) && 'a < 2, 'a < 2)
- checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5) , 'a < 2)
+ checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5), 'a < 2)
checkCondition(('a < 2 || 'b > 3) && ('a < 2 || 'c > 5), 'a < 2 || ('b > 3 && 'c > 5))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index e8c61d6e01..a434d03332 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -152,7 +152,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* results into the correct JVM types.
* @since 1.6.0
*/
- def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
+ def as[U: Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
@@ -171,6 +171,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
+ // scalastyle:off whitespacebeforetoken
/**
* Unary minus, i.e. negate the expression.
* {{{
@@ -202,6 +203,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
+ // scalastyle:on whitespacebeforetoken
/**
* Equality test.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 60d2f05b86..fac8950aee 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -204,7 +204,7 @@ class DataFrame private[sql](
* @since 1.6.0
*/
@Experimental
- def as[U : Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
+ def as[U: Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
/**
* Returns a new [[DataFrame]] with columns renamed. This can be quite convenient in conversion
@@ -227,7 +227,7 @@ class DataFrame private[sql](
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
- select(newCols : _*)
+ select(newCols: _*)
}
/**
@@ -579,7 +579,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): DataFrame = {
- sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
+ sortWithinPartitions((sortCol +: sortCols).map(Column(_)): _*)
}
/**
@@ -608,7 +608,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): DataFrame = {
- sort((sortCol +: sortCols).map(apply) : _*)
+ sort((sortCol +: sortCols).map(apply): _*)
}
/**
@@ -631,7 +631,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols : _*)
+ def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols: _*)
/**
* Returns a new [[DataFrame]] sorted by the given expressions.
@@ -640,7 +640,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs : _*)
+ def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs: _*)
/**
* Selects column based on the column name and return it as a [[Column]].
@@ -720,7 +720,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
+ def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)): _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
@@ -948,7 +948,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
- groupBy().agg(aggExpr, aggExprs : _*)
+ groupBy().agg(aggExpr, aggExprs: _*)
}
/**
@@ -986,7 +986,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
+ def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs: _*)
/**
* Returns a new [[DataFrame]] by taking the first `n` rows. The difference between this function
@@ -1118,7 +1118,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
- def explode[A <: Product : TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
+ def explode[A <: Product: TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val elementTypes = schema.toAttributes.map {
@@ -1147,7 +1147,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
- def explode[A, B : TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
+ def explode[A, B: TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
: DataFrame = {
val dataType = ScalaReflection.schemaFor[B].dataType
val attributes = AttributeReference(outputColumn, dataType)() :: Nil
@@ -1186,7 +1186,7 @@ class DataFrame private[sql](
Column(field)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
select(Column("*"), col.as(colName))
}
@@ -1207,7 +1207,7 @@ class DataFrame private[sql](
Column(field)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
select(Column("*"), col.as(colName, metadata))
}
@@ -1231,7 +1231,7 @@ class DataFrame private[sql](
Column(col)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
this
}
@@ -1244,7 +1244,7 @@ class DataFrame private[sql](
* @since 1.4.0
*/
def drop(colName: String): DataFrame = {
- drop(Seq(colName) : _*)
+ drop(Seq(colName): _*)
}
/**
@@ -1283,7 +1283,7 @@ class DataFrame private[sql](
val colsAfterDrop = attrs.filter { attr =>
attr != expression
}.map(attr => Column(attr))
- select(colsAfterDrop : _*)
+ select(colsAfterDrop: _*)
}
/**
@@ -1479,7 +1479,7 @@ class DataFrame private[sql](
* @group action
* @since 1.6.0
*/
- def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n) : _*)
+ def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n): _*)
/**
* Returns an array that contains all of [[Row]]s in this [[DataFrame]].
@@ -1505,7 +1505,7 @@ class DataFrame private[sql](
*/
def collectAsList(): java.util.List[Row] = withCallback("collectAsList", this) { _ =>
withNewExecutionId {
- java.util.Arrays.asList(rdd.collect() : _*)
+ java.util.Arrays.asList(rdd.collect(): _*)
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
index 3b30337f1f..4441a634be 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
@@ -33,5 +33,5 @@ case class DataFrameHolder private[sql](private val df: DataFrame) {
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = df
- def toDF(colNames: String*): DataFrame = df.toDF(colNames : _*)
+ def toDF(colNames: String*): DataFrame = df.toDF(colNames: _*)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
index f7be5f6b37..43500b09e0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
@@ -164,7 +164,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
@@ -191,7 +191,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
@@ -364,7 +364,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
private def fill0(values: Seq[(String, Any)]): DataFrame = {
@@ -395,7 +395,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
}
}.getOrElse(df.col(f.name))
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index d948e48942..1ed451d5a8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -203,7 +203,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
- JDBCPartition(part, i) : Partition
+ JDBCPartition(part, i): Partition
}
jdbc(url, table, parts, connectionProperties)
}
@@ -262,7 +262,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*
* @since 1.6.0
*/
- def json(paths: String*): DataFrame = format("json").load(paths : _*)
+ def json(paths: String*): DataFrame = format("json").load(paths: _*)
/**
* Loads an `JavaRDD[String]` storing JSON objects (one object per record) and
@@ -355,7 +355,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* @since 1.6.0
*/
@scala.annotation.varargs
- def text(paths: String*): DataFrame = format("text").load(paths : _*)
+ def text(paths: String*): DataFrame = format("text").load(paths: _*)
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 42f01e9359..9ffb5b94b2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -131,7 +131,7 @@ class Dataset[T] private[sql](
* along with `alias` or `as` to rearrange or rename as required.
* @since 1.6.0
*/
- def as[U : Encoder]: Dataset[U] = {
+ def as[U: Encoder]: Dataset[U] = {
new Dataset(sqlContext, queryExecution, encoderFor[U])
}
@@ -318,7 +318,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each element.
* @since 1.6.0
*/
- def map[U : Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
+ def map[U: Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
/**
* (Java-specific)
@@ -333,7 +333,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each partition.
* @since 1.6.0
*/
- def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
+ def mapPartitions[U: Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapPartitions[T, U](
@@ -360,7 +360,7 @@ class Dataset[T] private[sql](
* and then flattening the results.
* @since 1.6.0
*/
- def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
+ def flatMap[U: Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
@@ -432,7 +432,7 @@ class Dataset[T] private[sql](
* Returns a [[GroupedDataset]] where the data is grouped by the given key `func`.
* @since 1.6.0
*/
- def groupBy[K : Encoder](func: T => K): GroupedDataset[K, T] = {
+ def groupBy[K: Encoder](func: T => K): GroupedDataset[K, T] = {
val inputPlan = logicalPlan
val withGroupingKey = AppendColumns(func, resolvedTEncoder, inputPlan)
val executed = sqlContext.executePlan(withGroupingKey)
@@ -566,14 +566,14 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] by sampling a fraction of records.
* @since 1.6.0
*/
- def sample(withReplacement: Boolean, fraction: Double, seed: Long) : Dataset[T] =
+ def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] =
withPlan(Sample(0.0, fraction, withReplacement, seed, _))
/**
* Returns a new [[Dataset]] by sampling a fraction of records, using a random seed.
* @since 1.6.0
*/
- def sample(withReplacement: Boolean, fraction: Double) : Dataset[T] = {
+ def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
@@ -731,7 +731,7 @@ class Dataset[T] private[sql](
* a very large `num` can crash the driver process with OutOfMemoryError.
* @since 1.6.0
*/
- def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num) : _*)
+ def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num): _*)
/**
* Persist this [[Dataset]] with the default storage level (`MEMORY_AND_DISK`).
@@ -786,7 +786,7 @@ class Dataset[T] private[sql](
private[sql] def withPlan(f: LogicalPlan => LogicalPlan): Dataset[T] =
new Dataset[T](sqlContext, sqlContext.executePlan(f(logicalPlan)), tEncoder)
- private[sql] def withPlan[R : Encoder](
+ private[sql] def withPlan[R: Encoder](
other: Dataset[_])(
f: (LogicalPlan, LogicalPlan) => LogicalPlan): Dataset[R] =
new Dataset[R](sqlContext, f(logicalPlan, other.logicalPlan))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index c74ef2c035..f5cbf013bc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -229,7 +229,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def mean(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Average)
+ aggregateNumericColumns(colNames: _*)(Average)
}
/**
@@ -241,7 +241,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def max(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Max)
+ aggregateNumericColumns(colNames: _*)(Max)
}
/**
@@ -253,7 +253,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def avg(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Average)
+ aggregateNumericColumns(colNames: _*)(Average)
}
/**
@@ -265,7 +265,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def min(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Min)
+ aggregateNumericColumns(colNames: _*)(Min)
}
/**
@@ -277,7 +277,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def sum(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Sum)
+ aggregateNumericColumns(colNames: _*)(Sum)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
index a819ddceb1..12179367fa 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
@@ -73,7 +73,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def keyAs[L : Encoder]: GroupedDataset[L, V] =
+ def keyAs[L: Encoder]: GroupedDataset[L, V] =
new GroupedDataset(
encoderFor[L],
unresolvedVEncoder,
@@ -110,7 +110,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def flatMapGroups[U : Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
+ def flatMapGroups[U: Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapGroups(
@@ -158,7 +158,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def mapGroups[U : Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
+ def mapGroups[U: Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
val func = (key: K, it: Iterator[V]) => Iterator(f(key, it))
flatMapGroups(func)
}
@@ -302,7 +302,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def cogroup[U, R : Encoder](
+ def cogroup[U, R: Encoder](
other: GroupedDataset[K, U])(
f: (K, Iterator[V], Iterator[U]) => TraversableOnce[R]): Dataset[R] = {
new Dataset[R](
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index e827427c19..61c74f8340 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -409,7 +409,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
- def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = {
+ def createDataFrame[A <: Product: TypeTag](rdd: RDD[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@@ -425,7 +425,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
- def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = {
+ def createDataFrame[A <: Product: TypeTag](data: Seq[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@@ -498,7 +498,7 @@ class SQLContext private[sql](
}
- def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: Seq[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d).copy())
@@ -507,7 +507,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
- def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: RDD[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d))
@@ -516,7 +516,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
- def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: java.util.List[T]): Dataset[T] = {
createDataset(data.asScala)
}
@@ -945,7 +945,7 @@ class SQLContext private[sql](
}
}
- // Register a succesfully instantiatd context to the singleton. This should be at the end of
+ // Register a successfully instantiated context to the singleton. This should be at the end of
// the class definition so that the singleton is updated only if there is no exception in the
// construction of the instance.
sparkContext.addSparkListener(new SparkListener {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index ab414799f1..a7f7997df1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -37,7 +37,7 @@ abstract class SQLImplicits {
protected def _sqlContext: SQLContext
/** @since 1.6.0 */
- implicit def newProductEncoder[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
+ implicit def newProductEncoder[T <: Product: TypeTag]: Encoder[T] = ExpressionEncoder()
/** @since 1.6.0 */
implicit def newIntEncoder: Encoder[Int] = ExpressionEncoder()
@@ -67,7 +67,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from an RDD.
* @since 1.6.0
*/
- implicit def rddToDatasetHolder[T : Encoder](rdd: RDD[T]): DatasetHolder[T] = {
+ implicit def rddToDatasetHolder[T: Encoder](rdd: RDD[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(rdd))
}
@@ -75,7 +75,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from a local Seq.
* @since 1.6.0
*/
- implicit def localSeqToDatasetHolder[T : Encoder](s: Seq[T]): DatasetHolder[T] = {
+ implicit def localSeqToDatasetHolder[T: Encoder](s: Seq[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(s))
}
@@ -89,7 +89,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from an RDD of Product (e.g. case classes, tuples).
* @since 1.3.0
*/
- implicit def rddToDataFrameHolder[A <: Product : TypeTag](rdd: RDD[A]): DataFrameHolder = {
+ implicit def rddToDataFrameHolder[A <: Product: TypeTag](rdd: RDD[A]): DataFrameHolder = {
DataFrameHolder(_sqlContext.createDataFrame(rdd))
}
@@ -97,7 +97,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from a local Seq of Product.
* @since 1.3.0
*/
- implicit def localSeqToDataFrameHolder[A <: Product : TypeTag](data: Seq[A]): DataFrameHolder =
+ implicit def localSeqToDataFrameHolder[A <: Product: TypeTag](data: Seq[A]): DataFrameHolder =
{
DataFrameHolder(_sqlContext.createDataFrame(data))
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
index d912aeb70d..a8e6a40169 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
@@ -39,7 +39,7 @@ private[r] object SQLUtils {
new JavaSparkContext(sqlCtx.sparkContext)
}
- def createStructType(fields : Seq[StructField]): StructType = {
+ def createStructType(fields: Seq[StructField]): StructType = {
StructType(fields)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
index 6b10057707..058d147c7d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
@@ -223,7 +223,7 @@ case class Exchange(
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
- protected override def doExecute(): RDD[InternalRow] = attachTree(this , "execute") {
+ protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
index 38263af0f7..bb55161477 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
@@ -71,7 +71,7 @@ private[sql] trait Queryable {
private[sql] def formatString (
rows: Seq[Seq[String]],
numRows: Int,
- hasMoreData : Boolean,
+ hasMoreData: Boolean,
truncate: Boolean = true): String = {
val sb = new StringBuilder
val numCols = schema.fieldNames.length
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
index 1df38f7ff5..b5ac530444 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.types._
object TypedAggregateExpression {
- def apply[A, B : Encoder, C : Encoder](
+ def apply[A, B: Encoder, C: Encoder](
aggregator: Aggregator[A, B, C]): TypedAggregateExpression = {
new TypedAggregateExpression(
aggregator.asInstanceOf[Aggregator[Any, Any, Any]],
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
index d45d2db62f..d5e0d80076 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
@@ -256,7 +256,7 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag](
val infos = c.newGetLocationInfo.invoke(split).asInstanceOf[Array[AnyRef]]
Some(HadoopRDD.convertSplitLocationInfo(infos))
} catch {
- case e : Exception =>
+ case e: Exception =>
logDebug("Failed to use InputSplit#getLocationInfo.", e)
None
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
index fb97a03df6..c4b125e9d5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
@@ -557,7 +557,7 @@ private[parquet] object CatalystSchemaConverter {
}
}
- private def computeMinBytesForPrecision(precision : Int) : Int = {
+ private def computeMinBytesForPrecision(precision: Int): Int = {
var numBytes = 1
while (math.pow(2.0, 8 * numBytes - 1) < math.pow(10.0, precision)) {
numBytes += 1
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
index 93d32e1fb9..a567457dba 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter
* materialize the right RDD (in case of the right RDD is nondeterministic).
*/
private[spark]
-class UnsafeCartesianRDD(left : RDD[UnsafeRow], right : RDD[UnsafeRow], numFieldsOfRight: Int)
+class UnsafeCartesianRDD(left: RDD[UnsafeRow], right: RDD[UnsafeRow], numFieldsOfRight: Int)
extends CartesianRDD[UnsafeRow, UnsafeRow](left.sparkContext, left, right) {
override def compute(split: Partition, context: TaskContext): Iterator[(UnsafeRow, UnsafeRow)] = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
index 52735c9d7f..8c68d9ee0a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
@@ -64,7 +64,7 @@ private[sql] trait SQLMetricValue[T] extends Serializable {
/**
* A wrapper of Long to avoid boxing and unboxing when using Accumulator
*/
-private[sql] class LongSQLMetricValue(private var _value : Long) extends SQLMetricValue[Long] {
+private[sql] class LongSQLMetricValue(private var _value: Long) extends SQLMetricValue[Long] {
def add(incr: Long): LongSQLMetricValue = {
_value += incr
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
index a191759813..a4cb54e2bf 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
@@ -94,7 +94,7 @@ private[sql] object FrequentItems extends Logging {
(name, originalSchema.fields(index).dataType)
}.toArray
- val freqItems = df.select(cols.map(Column(_)) : _*).rdd.aggregate(countMaps)(
+ val freqItems = df.select(cols.map(Column(_)): _*).rdd.aggregate(countMaps)(
seqOp = (counts, row) => {
var i = 0
while (i < numCols) {
@@ -115,7 +115,7 @@ private[sql] object FrequentItems extends Logging {
}
)
val justItems = freqItems.map(m => m.baseMap.keys.toArray)
- val resultRow = Row(justItems : _*)
+ val resultRow = Row(justItems: _*)
// append frequent Items to the column name for easy debugging
val outputCols = colInfo.map { v =>
StructField(v._1 + "_freqItems", ArrayType(v._2, false))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
index e9b60841fc..05a9f377b9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
@@ -44,7 +44,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(colName: String, colNames: String*): WindowSpec = {
- spec.partitionBy(colName, colNames : _*)
+ spec.partitionBy(colName, colNames: _*)
}
/**
@@ -53,7 +53,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(cols: Column*): WindowSpec = {
- spec.partitionBy(cols : _*)
+ spec.partitionBy(cols: _*)
}
/**
@@ -62,7 +62,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(colName: String, colNames: String*): WindowSpec = {
- spec.orderBy(colName, colNames : _*)
+ spec.orderBy(colName, colNames: _*)
}
/**
@@ -71,7 +71,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(cols: Column*): WindowSpec = {
- spec.orderBy(cols : _*)
+ spec.orderBy(cols: _*)
}
private def spec: WindowSpec = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 592d79df31..1ac62883a6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -306,7 +306,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def countDistinct(columnName: String, columnNames: String*): Column =
- countDistinct(Column(columnName), columnNames.map(Column.apply) : _*)
+ countDistinct(Column(columnName), columnNames.map(Column.apply): _*)
/**
* Aggregate function: returns the first value in a group.
@@ -768,7 +768,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def array(colName: String, colNames: String*): Column = {
- array((colName +: colNames).map(col) : _*)
+ array((colName +: colNames).map(col): _*)
}
/**
@@ -977,7 +977,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def struct(colName: String, colNames: String*): Column = {
- struct((colName +: colNames).map(col) : _*)
+ struct((colName +: colNames).map(col): _*)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
index 467d8d62d1..d2c31d6e04 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
@@ -30,7 +30,7 @@ private class AggregatedDialect(dialects: List[JdbcDialect]) extends JdbcDialect
require(dialects.nonEmpty)
- override def canHandle(url : String): Boolean =
+ override def canHandle(url: String): Boolean =
dialects.map(_.canHandle(url)).reduce(_ && _)
override def getCatalystType(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index ca2d909e2c..8d58321d48 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -31,7 +31,7 @@ import org.apache.spark.sql.types._
* send a null value to the database.
*/
@DeveloperApi
-case class JdbcType(databaseTypeDefinition : String, jdbcNullType : Int)
+case class JdbcType(databaseTypeDefinition: String, jdbcNullType: Int)
/**
* :: DeveloperApi ::
@@ -60,7 +60,7 @@ abstract class JdbcDialect extends Serializable {
* @return True if the dialect can be applied on the given jdbc url.
* @throws NullPointerException if the url is null.
*/
- def canHandle(url : String): Boolean
+ def canHandle(url: String): Boolean
/**
* Get the custom datatype mapping for the given jdbc meta information.
@@ -130,7 +130,7 @@ object JdbcDialects {
*
* @param dialect The new dialect.
*/
- def registerDialect(dialect: JdbcDialect) : Unit = {
+ def registerDialect(dialect: JdbcDialect): Unit = {
dialects = dialect :: dialects.filterNot(_ == dialect)
}
@@ -139,7 +139,7 @@ object JdbcDialects {
*
* @param dialect The jdbc dialect.
*/
- def unregisterDialect(dialect : JdbcDialect) : Unit = {
+ def unregisterDialect(dialect: JdbcDialect): Unit = {
dialects = dialects.filterNot(_ == dialect)
}
@@ -169,5 +169,5 @@ object JdbcDialects {
* NOOP dialect object, always returning the neutral element.
*/
private object NoopDialect extends JdbcDialect {
- override def canHandle(url : String): Boolean = true
+ override def canHandle(url: String): Boolean = true
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
index e1717049f3..faae54e605 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
@@ -23,10 +23,13 @@ import org.apache.spark.sql.types.{BooleanType, DataType, LongType, MetadataBuil
private case object MySQLDialect extends JdbcDialect {
- override def canHandle(url : String): Boolean = url.startsWith("jdbc:mysql")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:mysql")
override def getCatalystType(
- sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
+ sqlType: Int,
+ typeName: String,
+ size: Int,
+ md: MetadataBuilder): Option[DataType] = {
if (sqlType == Types.VARBINARY && typeName.equals("BIT") && size != 1) {
// This could instead be a BinaryType if we'd rather return bit-vectors of up to 64 bits as
// byte arrays instead of longs.
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
index 3258f3782d..f952fc07fd 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
/** An `Aggregator` that adds up any numeric type returned by the given function. */
-class SumOf[I, N : Numeric](f: I => N) extends Aggregator[I, N, N] {
+class SumOf[I, N: Numeric](f: I => N) extends Aggregator[I, N, N] {
val numeric = implicitly[Numeric[N]]
override def zero: N = numeric.zero
@@ -113,7 +113,7 @@ class DatasetAggregatorSuite extends QueryTest with SharedSQLContext {
import testImplicits._
- def sum[I, N : Numeric : Encoder](f: I => N): TypedColumn[I, N] =
+ def sum[I, N: Numeric: Encoder](f: I => N): TypedColumn[I, N] =
new SumOf(f).toColumn
test("typed aggregation: TypedAggregator") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
index 3a283a4e1f..848f1af655 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
@@ -27,7 +27,7 @@ class DatasetCacheSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("persist and unpersist") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val cached = ds.cache()
// count triggers the caching action. It should not throw.
cached.count()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 53b5f45c2d..a3ed2e0616 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -30,7 +30,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("toDS") {
- val data = Seq(("a", 1) , ("b", 2), ("c", 3))
+ val data = Seq(("a", 1), ("b", 2), ("c", 3))
checkAnswer(
data.toDS(),
data: _*)
@@ -87,7 +87,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("as case class / collect") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkAnswer(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
@@ -105,7 +105,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("map") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
@@ -124,23 +124,23 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("select 2") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
- expr("_2").as[Int]) : Dataset[(String, Int)],
+ expr("_2").as[Int]): Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -149,7 +149,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -158,7 +158,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class, fields reordered") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDecoding(
ds.select(
expr("_1").as[String],
@@ -167,28 +167,28 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("filter") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.filter(_._1 == "b"),
("b", 2))
}
test("foreach") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreach(v => acc += v._2)
assert(acc.value == 6)
}
test("foreachPartition") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreachPartition(_.foreach(v => acc += v._2))
assert(acc.value == 6)
}
test("reduce") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 4ab148065a..860e07c68c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -206,7 +206,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
- StructType(StructField("f1", LongType, true) :: Nil) ,
+ StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index ab48e971b5..f2e0a868f4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -72,7 +72,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
- protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
+ protected def checkParquetFile[T <: Product: ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 1fa22e2933..984e3fbc05 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -46,7 +46,7 @@ class JDBCSuite extends SparkFunSuite
val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte)
val testH2Dialect = new JdbcDialect {
- override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
Some(StringType)
@@ -489,7 +489,7 @@ class JDBCSuite extends SparkFunSuite
test("Aggregated dialects") {
val agg = new AggregatedDialect(List(new JdbcDialect {
- override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2:")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2:")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
if (sqlType % 2 == 0) {
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ReflectionUtils.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ReflectionUtils.scala
index 599294dfbb..d1d8a68f6d 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ReflectionUtils.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ReflectionUtils.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
private[hive] object ReflectionUtils {
- def setSuperField(obj : Object, fieldName: String, fieldValue: Object) {
+ def setSuperField(obj: Object, fieldName: String, fieldValue: Object) {
setAncestorField(obj, 1, fieldName, fieldValue)
}
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index 03bc830df2..9f9efe33e1 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -325,7 +325,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
if (ret != 0) {
// For analysis exception, only the error is printed out to the console.
rc.getException() match {
- case e : AnalysisException =>
+ case e: AnalysisException =>
err.println(s"""Error in query: ${e.getMessage}""")
case _ => err.println(rc.getErrorMessage())
}
@@ -369,7 +369,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
if (counter != 0) {
responseMsg += s", Fetched $counter row(s)"
}
- console.printInfo(responseMsg , null)
+ console.printInfo(responseMsg, null)
// Destroy the driver to release all the locks.
driver.destroy()
} else {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 7a260e72eb..c9df3c4a82 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -657,8 +657,8 @@ private[hive] trait HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
- java.util.Arrays.asList(fields.map(f => f.name) : _*),
- java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) : _*))
+ java.util.Arrays.asList(fields.map(f => f.name): _*),
+ java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)): _*))
}
/**
@@ -905,8 +905,8 @@ private[hive] trait HiveInspectors {
getListTypeInfo(elemType.toTypeInfo)
case StructType(fields) =>
getStructTypeInfo(
- java.util.Arrays.asList(fields.map(_.name) : _*),
- java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) : _*))
+ java.util.Arrays.asList(fields.map(_.name): _*),
+ java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo): _*))
case MapType(keyType, valueType, _) =>
getMapTypeInfo(keyType.toTypeInfo, valueType.toTypeInfo)
case BinaryType => binaryTypeInfo
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
index 56cab1aee8..912cd41173 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
@@ -181,7 +181,7 @@ private[hive] case class HiveSimpleUDF(
val ret = FunctionRegistry.invoke(
method,
function,
- conversionHelper.convertIfNecessary(inputs : _*): _*)
+ conversionHelper.convertIfNecessary(inputs: _*): _*)
unwrap(ret, returnInspector)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
index 3b867bbfa1..ad28345a66 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveInspectorSuite.scala
@@ -118,8 +118,8 @@ class HiveInspectorSuite extends SparkFunSuite with HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
- java.util.Arrays.asList(fields.map(f => f.name) : _*),
- java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) : _*))
+ java.util.Arrays.asList(fields.map(f => f.name): _*),
+ java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)): _*))
}
def checkDataType(dt1: Seq[DataType], dt2: Seq[DataType]): Unit = {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index da7303c791..40e9c9362c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -154,8 +154,8 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
}
val expected = List(
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=2"::Nil,
- "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil ,
- "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil ,
+ "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil,
+ "p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=4"::Nil
)
assert(listFolders(tmpDir, List()).sortBy(_.toString()) === expected.sortBy(_.toString))