diff options
author | Kousuke Saruta <sarutak@oss.nttdata.co.jp> | 2016-01-12 00:51:00 -0800 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-01-12 00:51:00 -0800 |
commit | 8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d (patch) | |
tree | 31e99044d63b89311821df7a61e6f4882114677a /sql/catalyst | |
parent | 112abf9100f05be436e449817468c50174712c78 (diff) | |
download | spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.gz spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.bz2 spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.zip |
[SPARK-12692][BUILD][SQL] Scala style: Fix the style violation (Space before "," or ":")
Fix the style violation (space before , and :).
This PR is a followup for #10643.
Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp>
Closes #10718 from sarutak/SPARK-12692-followup-sql.
Diffstat (limited to 'sql/catalyst')
17 files changed, 31 insertions, 27 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala index 79f723cf9b..23fea0e283 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala @@ -49,7 +49,7 @@ object ScalaReflection extends ScalaReflection { * Unlike `schemaFor`, this function doesn't do any massaging of types into the Spark SQL type * system. As a result, ObjectType will be returned for things like boxed Integers */ - def dataTypeFor[T : TypeTag]: DataType = dataTypeFor(localTypeOf[T]) + def dataTypeFor[T: TypeTag]: DataType = dataTypeFor(localTypeOf[T]) private def dataTypeFor(tpe: `Type`): DataType = ScalaReflectionLock.synchronized { tpe match { @@ -116,7 +116,7 @@ object ScalaReflection extends ScalaReflection { * from ordinal 0 (since there are no names to map to). The actual location can be moved by * calling resolve/bind with a new schema. */ - def constructorFor[T : TypeTag]: Expression = { + def constructorFor[T: TypeTag]: Expression = { val tpe = localTypeOf[T] val clsName = getClassNameFromType(tpe) val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil @@ -386,7 +386,7 @@ object ScalaReflection extends ScalaReflection { * * the element type of [[Array]] or [[Seq]]: `array element class: "abc.xyz.MyClass"` * * the field of [[Product]]: `field (class: "abc.xyz.MyClass", name: "myField")` */ - def extractorsFor[T : TypeTag](inputObject: Expression): CreateNamedStruct = { + def extractorsFor[T: TypeTag](inputObject: Expression): CreateNamedStruct = { val tpe = localTypeOf[T] val clsName = getClassNameFromType(tpe) val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala index 2a132d8b82..6ec408a673 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala @@ -203,7 +203,7 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser { ) protected lazy val ordering: Parser[Seq[SortOrder]] = - ( rep1sep(expression ~ direction.? , ",") ^^ { + ( rep1sep(expression ~ direction.?, ",") ^^ { case exps => exps.map(pair => SortOrder(pair._1, pair._2.getOrElse(Ascending))) } ) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala index 8a33af8207..d16880bc4a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala @@ -84,7 +84,7 @@ class Analyzer( ResolveAggregateFunctions :: DistinctAggregationRewriter(conf) :: HiveTypeCoercion.typeCoercionRules ++ - extendedResolutionRules : _*), + extendedResolutionRules: _*), Batch("Nondeterministic", Once, PullOutNondeterministic), Batch("UDF", Once, @@ -110,7 +110,7 @@ class Analyzer( // Taking into account the reasonableness and the implementation complexity, // here use the CTE definition first, check table name only and ignore database name // see https://github.com/apache/spark/pull/4929#discussion_r27186638 for more info - case u : UnresolvedRelation => + case u: UnresolvedRelation => val substituted = cteRelations.get(u.tableIdentifier.table).map { relation => val withAlias = u.alias.map(Subquery(_, relation)) withAlias.getOrElse(relation) @@ -889,7 +889,7 @@ class Analyzer( _.transform { // Extracts children expressions of a WindowFunction (input parameters of // a WindowFunction). - case wf : WindowFunction => + case wf: WindowFunction => val newChildren = wf.children.map(extractExpr) wf.withNewChildren(newChildren) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala index 5c2aa3c06b..7c3d45b1e4 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/FunctionRegistry.scala @@ -323,13 +323,13 @@ object FunctionRegistry { } else { // Otherwise, find an ctor method that matches the number of arguments, and use that. val params = Seq.fill(expressions.size)(classOf[Expression]) - val f = Try(tag.runtimeClass.getDeclaredConstructor(params : _*)) match { + val f = Try(tag.runtimeClass.getDeclaredConstructor(params: _*)) match { case Success(e) => e case Failure(e) => throw new AnalysisException(s"Invalid number of arguments for function $name") } - Try(f.newInstance(expressions : _*).asInstanceOf[Expression]) match { + Try(f.newInstance(expressions: _*).asInstanceOf[Expression]) match { case Success(e) => e case Failure(e) => throw new AnalysisException(e.getMessage) } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala index dbcbd6854b..e326ea7827 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala @@ -529,7 +529,7 @@ object HiveTypeCoercion { if falseValues.contains(value) => And(IsNotNull(bool), Not(bool)) case EqualTo(left @ BooleanType(), right @ NumericType()) => - transform(left , right) + transform(left, right) case EqualTo(left @ NumericType(), right @ BooleanType()) => transform(right, left) case EqualNullSafe(left @ BooleanType(), right @ NumericType()) => diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala index 5ac1984043..c4dbcb7b60 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/dsl/package.scala @@ -61,9 +61,11 @@ package object dsl { trait ImplicitOperators { def expr: Expression + // scalastyle:off whitespacebeforetoken def unary_- : Expression = UnaryMinus(expr) def unary_! : Predicate = Not(expr) def unary_~ : Expression = BitwiseNot(expr) + // scalastyle:on whitespacebeforetoken def + (other: Expression): Expression = Add(expr, other) def - (other: Expression): Expression = Subtract(expr, other) @@ -141,7 +143,7 @@ package object dsl { // Note that if we make ExpressionConversions an object rather than a trait, we can // then make this a value class to avoid the small penalty of runtime instantiation. def $(args: Any*): analysis.UnresolvedAttribute = { - analysis.UnresolvedAttribute(sc.s(args : _*)) + analysis.UnresolvedAttribute(sc.s(args: _*)) } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala index 05f746e72b..fa4c2d93ec 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoder.scala @@ -44,7 +44,7 @@ import org.apache.spark.util.Utils * to the name `value`. */ object ExpressionEncoder { - def apply[T : TypeTag](): ExpressionEncoder[T] = { + def apply[T: TypeTag](): ExpressionEncoder[T] = { // We convert the not-serializable TypeTag into StructType and ClassTag. val mirror = typeTag[T].mirror val cls = mirror.runtimeClass(typeTag[T].tpe) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala index 9e283f5eb6..08ada1f38b 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala @@ -27,7 +27,7 @@ package object encoders { * references from a specific schema.) This requirement allows us to preserve whether a given * object type is being bound by name or by ordinal when doing resolution. */ - private[sql] def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match { + private[sql] def encoderFor[A: Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match { case e: ExpressionEncoder[A] => e.assertUnresolved() e diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala index d6219514b7..4ffbfa57e7 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala @@ -164,7 +164,7 @@ abstract class Expression extends TreeNode[Expression] { * Returns the hash for this expression. Expressions that compute the same result, even if * they differ cosmetically should return the same hash. */ - def semanticHash() : Int = { + def semanticHash(): Int = { def computeHash(e: Seq[Any]): Int = { // See http://stackoverflow.com/questions/113511/hash-code-implementation var hash: Int = 17 diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala index 931f752b4d..bf41f85f79 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala @@ -46,7 +46,7 @@ case class Concat(children: Seq[Expression]) extends Expression with ImplicitCas override def eval(input: InternalRow): Any = { val inputs = children.map(_.eval(input).asInstanceOf[UTF8String]) - UTF8String.concat(inputs : _*) + UTF8String.concat(inputs: _*) } override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = { @@ -99,7 +99,7 @@ case class ConcatWs(children: Seq[Expression]) case null => Iterator(null.asInstanceOf[UTF8String]) } } - UTF8String.concatWs(flatInputs.head, flatInputs.tail : _*) + UTF8String.concatWs(flatInputs.head, flatInputs.tail: _*) } override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = { @@ -990,7 +990,7 @@ case class FormatNumber(x: Expression, d: Expression) def typeHelper(p: String): String = { x.dataType match { - case _ : DecimalType => s"""$p.toJavaBigDecimal()""" + case _: DecimalType => s"""$p.toJavaBigDecimal()""" case _ => s"$p" } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala index 64957db6b4..5489051e95 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicOperators.scala @@ -496,7 +496,7 @@ case class MapPartitions[T, U]( /** Factory for constructing new `AppendColumn` nodes. */ object AppendColumns { - def apply[T, U : Encoder]( + def apply[T, U: Encoder]( func: T => U, tEncoder: ExpressionEncoder[T], child: LogicalPlan): AppendColumns[T, U] = { @@ -522,7 +522,7 @@ case class AppendColumns[T, U]( /** Factory for constructing new `MapGroups` nodes. */ object MapGroups { - def apply[K, T, U : Encoder]( + def apply[K, T, U: Encoder]( func: (K, Iterator[T]) => TraversableOnce[U], kEncoder: ExpressionEncoder[K], tEncoder: ExpressionEncoder[T], @@ -557,7 +557,7 @@ case class MapGroups[K, T, U]( /** Factory for constructing new `CoGroup` nodes. */ object CoGroup { - def apply[Key, Left, Right, Result : Encoder]( + def apply[Key, Left, Right, Result: Encoder]( func: (Key, Iterator[Left], Iterator[Right]) => TraversableOnce[Result], keyEnc: ExpressionEncoder[Key], leftEnc: ExpressionEncoder[Left], diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala index 9fefc5656a..e4417e0955 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/NumberConverter.scala @@ -122,7 +122,7 @@ object NumberConverter { * unsigned, otherwise it is signed. * NB: This logic is borrowed from org.apache.hadoop.hive.ql.ud.UDFConv */ - def convert(n: Array[Byte] , fromBase: Int, toBase: Int ): UTF8String = { + def convert(n: Array[Byte], fromBase: Int, toBase: Int ): UTF8String = { if (fromBase < Character.MIN_RADIX || fromBase > Character.MAX_RADIX || Math.abs(toBase) < Character.MIN_RADIX || Math.abs(toBase) > Character.MAX_RADIX) { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala index 520e344361..be7573b95d 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/ArrayType.scala @@ -90,7 +90,7 @@ case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataT private[sql] lazy val interpretedOrdering: Ordering[ArrayData] = new Ordering[ArrayData] { private[this] val elementOrdering: Ordering[Any] = elementType match { case dt: AtomicType => dt.ordering.asInstanceOf[Ordering[Any]] - case a : ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]] + case a: ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]] case s: StructType => s.interpretedOrdering.asInstanceOf[Ordering[Any]] case other => throw new IllegalArgumentException(s"Type $other does not support ordered operations") diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala index 38ce1604b1..864b47a2a0 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/Decimal.scala @@ -310,6 +310,7 @@ final class Decimal extends Ordered[Decimal] with Serializable { def remainder(that: Decimal): Decimal = this % that + // scalastyle:off whitespacebeforetoken def unary_- : Decimal = { if (decimalVal.ne(null)) { Decimal(-decimalVal, precision, scale) @@ -317,6 +318,7 @@ final class Decimal extends Ordered[Decimal] with Serializable { Decimal(-longVal, precision, scale) } } + // scalastyle:on whitespacebeforetoken def abs: Decimal = if (this.compare(Decimal.ZERO) < 0) this.unary_- else this diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala index 8c766ef829..a1c4a861c6 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/EncoderErrorMessageSuite.scala @@ -98,5 +98,5 @@ class EncoderErrorMessageSuite extends SparkFunSuite { s"""array element class: "${clsName[NonEncodable]}"""")) } - private def clsName[T : ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName + private def clsName[T: ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName } diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala index 88c558d80a..67f4dc98be 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/ExpressionEncoderSuite.scala @@ -80,7 +80,7 @@ class JavaSerializable(val value: Int) extends Serializable { class ExpressionEncoderSuite extends SparkFunSuite { OuterScopes.outerScopes.put(getClass.getName, this) - implicit def encoder[T : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder() + implicit def encoder[T: TypeTag]: ExpressionEncoder[T] = ExpressionEncoder() // test flat encoders encodeDecodeTest(false, "primitive boolean") @@ -145,7 +145,7 @@ class ExpressionEncoderSuite extends SparkFunSuite { encoderFor(Encoders.javaSerialization[JavaSerializable])) // test product encoders - private def productTest[T <: Product : ExpressionEncoder](input: T): Unit = { + private def productTest[T <: Product: ExpressionEncoder](input: T): Unit = { encodeDecodeTest(input, input.getClass.getSimpleName) } @@ -286,7 +286,7 @@ class ExpressionEncoderSuite extends SparkFunSuite { } } - private def encodeDecodeTest[T : ExpressionEncoder]( + private def encodeDecodeTest[T: ExpressionEncoder]( input: T, testName: String): Unit = { test(s"encode/decode for $testName: $input") { diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala index 000a3b7ecb..6932f185b9 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/BooleanSimplificationSuite.scala @@ -80,7 +80,7 @@ class BooleanSimplificationSuite extends PlanTest with PredicateHelper { checkCondition(('a < 2 || 'a > 3 || 'b > 5) && 'a < 2, 'a < 2) - checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5) , 'a < 2) + checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5), 'a < 2) checkCondition(('a < 2 || 'b > 3) && ('a < 2 || 'c > 5), 'a < 2 || ('b > 3 && 'c > 5)) |