aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2016-01-12 00:51:00 -0800
committerReynold Xin <rxin@databricks.com>2016-01-12 00:51:00 -0800
commit8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d (patch)
tree31e99044d63b89311821df7a61e6f4882114677a /sql/core
parent112abf9100f05be436e449817468c50174712c78 (diff)
downloadspark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.gz
spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.tar.bz2
spark-8cfa218f4f1b05f4d076ec15dd0a033ad3e4500d.zip
[SPARK-12692][BUILD][SQL] Scala style: Fix the style violation (Space before "," or ":")
Fix the style violation (space before , and :). This PR is a followup for #10643. Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #10718 from sarutak/SPARK-12692-followup-sql.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala36
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala18
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala12
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala7
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala26
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala4
30 files changed, 106 insertions, 101 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index e8c61d6e01..a434d03332 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -152,7 +152,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* results into the correct JVM types.
* @since 1.6.0
*/
- def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
+ def as[U: Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
@@ -171,6 +171,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
+ // scalastyle:off whitespacebeforetoken
/**
* Unary minus, i.e. negate the expression.
* {{{
@@ -202,6 +203,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
+ // scalastyle:on whitespacebeforetoken
/**
* Equality test.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 60d2f05b86..fac8950aee 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -204,7 +204,7 @@ class DataFrame private[sql](
* @since 1.6.0
*/
@Experimental
- def as[U : Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
+ def as[U: Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
/**
* Returns a new [[DataFrame]] with columns renamed. This can be quite convenient in conversion
@@ -227,7 +227,7 @@ class DataFrame private[sql](
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
- select(newCols : _*)
+ select(newCols: _*)
}
/**
@@ -579,7 +579,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): DataFrame = {
- sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
+ sortWithinPartitions((sortCol +: sortCols).map(Column(_)): _*)
}
/**
@@ -608,7 +608,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): DataFrame = {
- sort((sortCol +: sortCols).map(apply) : _*)
+ sort((sortCol +: sortCols).map(apply): _*)
}
/**
@@ -631,7 +631,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols : _*)
+ def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols: _*)
/**
* Returns a new [[DataFrame]] sorted by the given expressions.
@@ -640,7 +640,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs : _*)
+ def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs: _*)
/**
* Selects column based on the column name and return it as a [[Column]].
@@ -720,7 +720,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
+ def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)): _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
@@ -948,7 +948,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
- groupBy().agg(aggExpr, aggExprs : _*)
+ groupBy().agg(aggExpr, aggExprs: _*)
}
/**
@@ -986,7 +986,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
- def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
+ def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs: _*)
/**
* Returns a new [[DataFrame]] by taking the first `n` rows. The difference between this function
@@ -1118,7 +1118,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
- def explode[A <: Product : TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
+ def explode[A <: Product: TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val elementTypes = schema.toAttributes.map {
@@ -1147,7 +1147,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
- def explode[A, B : TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
+ def explode[A, B: TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
: DataFrame = {
val dataType = ScalaReflection.schemaFor[B].dataType
val attributes = AttributeReference(outputColumn, dataType)() :: Nil
@@ -1186,7 +1186,7 @@ class DataFrame private[sql](
Column(field)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
select(Column("*"), col.as(colName))
}
@@ -1207,7 +1207,7 @@ class DataFrame private[sql](
Column(field)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
select(Column("*"), col.as(colName, metadata))
}
@@ -1231,7 +1231,7 @@ class DataFrame private[sql](
Column(col)
}
}
- select(columns : _*)
+ select(columns: _*)
} else {
this
}
@@ -1244,7 +1244,7 @@ class DataFrame private[sql](
* @since 1.4.0
*/
def drop(colName: String): DataFrame = {
- drop(Seq(colName) : _*)
+ drop(Seq(colName): _*)
}
/**
@@ -1283,7 +1283,7 @@ class DataFrame private[sql](
val colsAfterDrop = attrs.filter { attr =>
attr != expression
}.map(attr => Column(attr))
- select(colsAfterDrop : _*)
+ select(colsAfterDrop: _*)
}
/**
@@ -1479,7 +1479,7 @@ class DataFrame private[sql](
* @group action
* @since 1.6.0
*/
- def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n) : _*)
+ def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n): _*)
/**
* Returns an array that contains all of [[Row]]s in this [[DataFrame]].
@@ -1505,7 +1505,7 @@ class DataFrame private[sql](
*/
def collectAsList(): java.util.List[Row] = withCallback("collectAsList", this) { _ =>
withNewExecutionId {
- java.util.Arrays.asList(rdd.collect() : _*)
+ java.util.Arrays.asList(rdd.collect(): _*)
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
index 3b30337f1f..4441a634be 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
@@ -33,5 +33,5 @@ case class DataFrameHolder private[sql](private val df: DataFrame) {
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = df
- def toDF(colNames: String*): DataFrame = df.toDF(colNames : _*)
+ def toDF(colNames: String*): DataFrame = df.toDF(colNames: _*)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
index f7be5f6b37..43500b09e0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameNaFunctions.scala
@@ -164,7 +164,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
@@ -191,7 +191,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
@@ -364,7 +364,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
private def fill0(values: Seq[(String, Any)]): DataFrame = {
@@ -395,7 +395,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
}
}.getOrElse(df.col(f.name))
}
- df.select(projections : _*)
+ df.select(projections: _*)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index d948e48942..1ed451d5a8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -203,7 +203,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
- JDBCPartition(part, i) : Partition
+ JDBCPartition(part, i): Partition
}
jdbc(url, table, parts, connectionProperties)
}
@@ -262,7 +262,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*
* @since 1.6.0
*/
- def json(paths: String*): DataFrame = format("json").load(paths : _*)
+ def json(paths: String*): DataFrame = format("json").load(paths: _*)
/**
* Loads an `JavaRDD[String]` storing JSON objects (one object per record) and
@@ -355,7 +355,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* @since 1.6.0
*/
@scala.annotation.varargs
- def text(paths: String*): DataFrame = format("text").load(paths : _*)
+ def text(paths: String*): DataFrame = format("text").load(paths: _*)
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 42f01e9359..9ffb5b94b2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -131,7 +131,7 @@ class Dataset[T] private[sql](
* along with `alias` or `as` to rearrange or rename as required.
* @since 1.6.0
*/
- def as[U : Encoder]: Dataset[U] = {
+ def as[U: Encoder]: Dataset[U] = {
new Dataset(sqlContext, queryExecution, encoderFor[U])
}
@@ -318,7 +318,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each element.
* @since 1.6.0
*/
- def map[U : Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
+ def map[U: Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
/**
* (Java-specific)
@@ -333,7 +333,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each partition.
* @since 1.6.0
*/
- def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
+ def mapPartitions[U: Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapPartitions[T, U](
@@ -360,7 +360,7 @@ class Dataset[T] private[sql](
* and then flattening the results.
* @since 1.6.0
*/
- def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
+ def flatMap[U: Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
@@ -432,7 +432,7 @@ class Dataset[T] private[sql](
* Returns a [[GroupedDataset]] where the data is grouped by the given key `func`.
* @since 1.6.0
*/
- def groupBy[K : Encoder](func: T => K): GroupedDataset[K, T] = {
+ def groupBy[K: Encoder](func: T => K): GroupedDataset[K, T] = {
val inputPlan = logicalPlan
val withGroupingKey = AppendColumns(func, resolvedTEncoder, inputPlan)
val executed = sqlContext.executePlan(withGroupingKey)
@@ -566,14 +566,14 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] by sampling a fraction of records.
* @since 1.6.0
*/
- def sample(withReplacement: Boolean, fraction: Double, seed: Long) : Dataset[T] =
+ def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] =
withPlan(Sample(0.0, fraction, withReplacement, seed, _))
/**
* Returns a new [[Dataset]] by sampling a fraction of records, using a random seed.
* @since 1.6.0
*/
- def sample(withReplacement: Boolean, fraction: Double) : Dataset[T] = {
+ def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
@@ -731,7 +731,7 @@ class Dataset[T] private[sql](
* a very large `num` can crash the driver process with OutOfMemoryError.
* @since 1.6.0
*/
- def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num) : _*)
+ def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num): _*)
/**
* Persist this [[Dataset]] with the default storage level (`MEMORY_AND_DISK`).
@@ -786,7 +786,7 @@ class Dataset[T] private[sql](
private[sql] def withPlan(f: LogicalPlan => LogicalPlan): Dataset[T] =
new Dataset[T](sqlContext, sqlContext.executePlan(f(logicalPlan)), tEncoder)
- private[sql] def withPlan[R : Encoder](
+ private[sql] def withPlan[R: Encoder](
other: Dataset[_])(
f: (LogicalPlan, LogicalPlan) => LogicalPlan): Dataset[R] =
new Dataset[R](sqlContext, f(logicalPlan, other.logicalPlan))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index c74ef2c035..f5cbf013bc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -229,7 +229,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def mean(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Average)
+ aggregateNumericColumns(colNames: _*)(Average)
}
/**
@@ -241,7 +241,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def max(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Max)
+ aggregateNumericColumns(colNames: _*)(Max)
}
/**
@@ -253,7 +253,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def avg(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Average)
+ aggregateNumericColumns(colNames: _*)(Average)
}
/**
@@ -265,7 +265,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def min(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Min)
+ aggregateNumericColumns(colNames: _*)(Min)
}
/**
@@ -277,7 +277,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def sum(colNames: String*): DataFrame = {
- aggregateNumericColumns(colNames : _*)(Sum)
+ aggregateNumericColumns(colNames: _*)(Sum)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
index a819ddceb1..12179367fa 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedDataset.scala
@@ -73,7 +73,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def keyAs[L : Encoder]: GroupedDataset[L, V] =
+ def keyAs[L: Encoder]: GroupedDataset[L, V] =
new GroupedDataset(
encoderFor[L],
unresolvedVEncoder,
@@ -110,7 +110,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def flatMapGroups[U : Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
+ def flatMapGroups[U: Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapGroups(
@@ -158,7 +158,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def mapGroups[U : Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
+ def mapGroups[U: Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
val func = (key: K, it: Iterator[V]) => Iterator(f(key, it))
flatMapGroups(func)
}
@@ -302,7 +302,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
- def cogroup[U, R : Encoder](
+ def cogroup[U, R: Encoder](
other: GroupedDataset[K, U])(
f: (K, Iterator[V], Iterator[U]) => TraversableOnce[R]): Dataset[R] = {
new Dataset[R](
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index e827427c19..61c74f8340 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -409,7 +409,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
- def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = {
+ def createDataFrame[A <: Product: TypeTag](rdd: RDD[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@@ -425,7 +425,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
- def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = {
+ def createDataFrame[A <: Product: TypeTag](data: Seq[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@@ -498,7 +498,7 @@ class SQLContext private[sql](
}
- def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: Seq[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d).copy())
@@ -507,7 +507,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
- def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: RDD[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d))
@@ -516,7 +516,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
- def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = {
+ def createDataset[T: Encoder](data: java.util.List[T]): Dataset[T] = {
createDataset(data.asScala)
}
@@ -945,7 +945,7 @@ class SQLContext private[sql](
}
}
- // Register a succesfully instantiatd context to the singleton. This should be at the end of
+ // Register a successfully instantiated context to the singleton. This should be at the end of
// the class definition so that the singleton is updated only if there is no exception in the
// construction of the instance.
sparkContext.addSparkListener(new SparkListener {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index ab414799f1..a7f7997df1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -37,7 +37,7 @@ abstract class SQLImplicits {
protected def _sqlContext: SQLContext
/** @since 1.6.0 */
- implicit def newProductEncoder[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
+ implicit def newProductEncoder[T <: Product: TypeTag]: Encoder[T] = ExpressionEncoder()
/** @since 1.6.0 */
implicit def newIntEncoder: Encoder[Int] = ExpressionEncoder()
@@ -67,7 +67,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from an RDD.
* @since 1.6.0
*/
- implicit def rddToDatasetHolder[T : Encoder](rdd: RDD[T]): DatasetHolder[T] = {
+ implicit def rddToDatasetHolder[T: Encoder](rdd: RDD[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(rdd))
}
@@ -75,7 +75,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from a local Seq.
* @since 1.6.0
*/
- implicit def localSeqToDatasetHolder[T : Encoder](s: Seq[T]): DatasetHolder[T] = {
+ implicit def localSeqToDatasetHolder[T: Encoder](s: Seq[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(s))
}
@@ -89,7 +89,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from an RDD of Product (e.g. case classes, tuples).
* @since 1.3.0
*/
- implicit def rddToDataFrameHolder[A <: Product : TypeTag](rdd: RDD[A]): DataFrameHolder = {
+ implicit def rddToDataFrameHolder[A <: Product: TypeTag](rdd: RDD[A]): DataFrameHolder = {
DataFrameHolder(_sqlContext.createDataFrame(rdd))
}
@@ -97,7 +97,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from a local Seq of Product.
* @since 1.3.0
*/
- implicit def localSeqToDataFrameHolder[A <: Product : TypeTag](data: Seq[A]): DataFrameHolder =
+ implicit def localSeqToDataFrameHolder[A <: Product: TypeTag](data: Seq[A]): DataFrameHolder =
{
DataFrameHolder(_sqlContext.createDataFrame(data))
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
index d912aeb70d..a8e6a40169 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
@@ -39,7 +39,7 @@ private[r] object SQLUtils {
new JavaSparkContext(sqlCtx.sparkContext)
}
- def createStructType(fields : Seq[StructField]): StructType = {
+ def createStructType(fields: Seq[StructField]): StructType = {
StructType(fields)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
index 6b10057707..058d147c7d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Exchange.scala
@@ -223,7 +223,7 @@ case class Exchange(
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
- protected override def doExecute(): RDD[InternalRow] = attachTree(this , "execute") {
+ protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
index 38263af0f7..bb55161477 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/Queryable.scala
@@ -71,7 +71,7 @@ private[sql] trait Queryable {
private[sql] def formatString (
rows: Seq[Seq[String]],
numRows: Int,
- hasMoreData : Boolean,
+ hasMoreData: Boolean,
truncate: Boolean = true): String = {
val sb = new StringBuilder
val numCols = schema.fieldNames.length
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
index 1df38f7ff5..b5ac530444 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TypedAggregateExpression.scala
@@ -29,7 +29,7 @@ import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.types._
object TypedAggregateExpression {
- def apply[A, B : Encoder, C : Encoder](
+ def apply[A, B: Encoder, C: Encoder](
aggregator: Aggregator[A, B, C]): TypedAggregateExpression = {
new TypedAggregateExpression(
aggregator.asInstanceOf[Aggregator[Any, Any, Any]],
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
index d45d2db62f..d5e0d80076 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
@@ -256,7 +256,7 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag](
val infos = c.newGetLocationInfo.invoke(split).asInstanceOf[Array[AnyRef]]
Some(HadoopRDD.convertSplitLocationInfo(infos))
} catch {
- case e : Exception =>
+ case e: Exception =>
logDebug("Failed to use InputSplit#getLocationInfo.", e)
None
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
index fb97a03df6..c4b125e9d5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
@@ -557,7 +557,7 @@ private[parquet] object CatalystSchemaConverter {
}
}
- private def computeMinBytesForPrecision(precision : Int) : Int = {
+ private def computeMinBytesForPrecision(precision: Int): Int = {
var numBytes = 1
while (math.pow(2.0, 8 * numBytes - 1) < math.pow(10.0, precision)) {
numBytes += 1
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
index 93d32e1fb9..a567457dba 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/CartesianProduct.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter
* materialize the right RDD (in case of the right RDD is nondeterministic).
*/
private[spark]
-class UnsafeCartesianRDD(left : RDD[UnsafeRow], right : RDD[UnsafeRow], numFieldsOfRight: Int)
+class UnsafeCartesianRDD(left: RDD[UnsafeRow], right: RDD[UnsafeRow], numFieldsOfRight: Int)
extends CartesianRDD[UnsafeRow, UnsafeRow](left.sparkContext, left, right) {
override def compute(split: Partition, context: TaskContext): Iterator[(UnsafeRow, UnsafeRow)] = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
index 52735c9d7f..8c68d9ee0a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/metric/SQLMetrics.scala
@@ -64,7 +64,7 @@ private[sql] trait SQLMetricValue[T] extends Serializable {
/**
* A wrapper of Long to avoid boxing and unboxing when using Accumulator
*/
-private[sql] class LongSQLMetricValue(private var _value : Long) extends SQLMetricValue[Long] {
+private[sql] class LongSQLMetricValue(private var _value: Long) extends SQLMetricValue[Long] {
def add(incr: Long): LongSQLMetricValue = {
_value += incr
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
index a191759813..a4cb54e2bf 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
@@ -94,7 +94,7 @@ private[sql] object FrequentItems extends Logging {
(name, originalSchema.fields(index).dataType)
}.toArray
- val freqItems = df.select(cols.map(Column(_)) : _*).rdd.aggregate(countMaps)(
+ val freqItems = df.select(cols.map(Column(_)): _*).rdd.aggregate(countMaps)(
seqOp = (counts, row) => {
var i = 0
while (i < numCols) {
@@ -115,7 +115,7 @@ private[sql] object FrequentItems extends Logging {
}
)
val justItems = freqItems.map(m => m.baseMap.keys.toArray)
- val resultRow = Row(justItems : _*)
+ val resultRow = Row(justItems: _*)
// append frequent Items to the column name for easy debugging
val outputCols = colInfo.map { v =>
StructField(v._1 + "_freqItems", ArrayType(v._2, false))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
index e9b60841fc..05a9f377b9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/Window.scala
@@ -44,7 +44,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(colName: String, colNames: String*): WindowSpec = {
- spec.partitionBy(colName, colNames : _*)
+ spec.partitionBy(colName, colNames: _*)
}
/**
@@ -53,7 +53,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(cols: Column*): WindowSpec = {
- spec.partitionBy(cols : _*)
+ spec.partitionBy(cols: _*)
}
/**
@@ -62,7 +62,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(colName: String, colNames: String*): WindowSpec = {
- spec.orderBy(colName, colNames : _*)
+ spec.orderBy(colName, colNames: _*)
}
/**
@@ -71,7 +71,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(cols: Column*): WindowSpec = {
- spec.orderBy(cols : _*)
+ spec.orderBy(cols: _*)
}
private def spec: WindowSpec = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 592d79df31..1ac62883a6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -306,7 +306,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def countDistinct(columnName: String, columnNames: String*): Column =
- countDistinct(Column(columnName), columnNames.map(Column.apply) : _*)
+ countDistinct(Column(columnName), columnNames.map(Column.apply): _*)
/**
* Aggregate function: returns the first value in a group.
@@ -768,7 +768,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def array(colName: String, colNames: String*): Column = {
- array((colName +: colNames).map(col) : _*)
+ array((colName +: colNames).map(col): _*)
}
/**
@@ -977,7 +977,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def struct(colName: String, colNames: String*): Column = {
- struct((colName +: colNames).map(col) : _*)
+ struct((colName +: colNames).map(col): _*)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
index 467d8d62d1..d2c31d6e04 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/AggregatedDialect.scala
@@ -30,7 +30,7 @@ private class AggregatedDialect(dialects: List[JdbcDialect]) extends JdbcDialect
require(dialects.nonEmpty)
- override def canHandle(url : String): Boolean =
+ override def canHandle(url: String): Boolean =
dialects.map(_.canHandle(url)).reduce(_ && _)
override def getCatalystType(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index ca2d909e2c..8d58321d48 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -31,7 +31,7 @@ import org.apache.spark.sql.types._
* send a null value to the database.
*/
@DeveloperApi
-case class JdbcType(databaseTypeDefinition : String, jdbcNullType : Int)
+case class JdbcType(databaseTypeDefinition: String, jdbcNullType: Int)
/**
* :: DeveloperApi ::
@@ -60,7 +60,7 @@ abstract class JdbcDialect extends Serializable {
* @return True if the dialect can be applied on the given jdbc url.
* @throws NullPointerException if the url is null.
*/
- def canHandle(url : String): Boolean
+ def canHandle(url: String): Boolean
/**
* Get the custom datatype mapping for the given jdbc meta information.
@@ -130,7 +130,7 @@ object JdbcDialects {
*
* @param dialect The new dialect.
*/
- def registerDialect(dialect: JdbcDialect) : Unit = {
+ def registerDialect(dialect: JdbcDialect): Unit = {
dialects = dialect :: dialects.filterNot(_ == dialect)
}
@@ -139,7 +139,7 @@ object JdbcDialects {
*
* @param dialect The jdbc dialect.
*/
- def unregisterDialect(dialect : JdbcDialect) : Unit = {
+ def unregisterDialect(dialect: JdbcDialect): Unit = {
dialects = dialects.filterNot(_ == dialect)
}
@@ -169,5 +169,5 @@ object JdbcDialects {
* NOOP dialect object, always returning the neutral element.
*/
private object NoopDialect extends JdbcDialect {
- override def canHandle(url : String): Boolean = true
+ override def canHandle(url: String): Boolean = true
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
index e1717049f3..faae54e605 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
@@ -23,10 +23,13 @@ import org.apache.spark.sql.types.{BooleanType, DataType, LongType, MetadataBuil
private case object MySQLDialect extends JdbcDialect {
- override def canHandle(url : String): Boolean = url.startsWith("jdbc:mysql")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:mysql")
override def getCatalystType(
- sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
+ sqlType: Int,
+ typeName: String,
+ size: Int,
+ md: MetadataBuilder): Option[DataType] = {
if (sqlType == Types.VARBINARY && typeName.equals("BIT") && size != 1) {
// This could instead be a BinaryType if we'd rather return bit-vectors of up to 64 bits as
// byte arrays instead of longs.
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
index 3258f3782d..f952fc07fd 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetAggregatorSuite.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
/** An `Aggregator` that adds up any numeric type returned by the given function. */
-class SumOf[I, N : Numeric](f: I => N) extends Aggregator[I, N, N] {
+class SumOf[I, N: Numeric](f: I => N) extends Aggregator[I, N, N] {
val numeric = implicitly[Numeric[N]]
override def zero: N = numeric.zero
@@ -113,7 +113,7 @@ class DatasetAggregatorSuite extends QueryTest with SharedSQLContext {
import testImplicits._
- def sum[I, N : Numeric : Encoder](f: I => N): TypedColumn[I, N] =
+ def sum[I, N: Numeric: Encoder](f: I => N): TypedColumn[I, N] =
new SumOf(f).toColumn
test("typed aggregation: TypedAggregator") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
index 3a283a4e1f..848f1af655 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala
@@ -27,7 +27,7 @@ class DatasetCacheSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("persist and unpersist") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val cached = ds.cache()
// count triggers the caching action. It should not throw.
cached.count()
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 53b5f45c2d..a3ed2e0616 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -30,7 +30,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("toDS") {
- val data = Seq(("a", 1) , ("b", 2), ("c", 3))
+ val data = Seq(("a", 1), ("b", 2), ("c", 3))
checkAnswer(
data.toDS(),
data: _*)
@@ -87,7 +87,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("as case class / collect") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkAnswer(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
@@ -105,7 +105,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("map") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
@@ -124,23 +124,23 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("select 2") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
- expr("_2").as[Int]) : Dataset[(String, Int)],
+ expr("_2").as[Int]): Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -149,7 +149,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@@ -158,7 +158,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class, fields reordered") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDecoding(
ds.select(
expr("_1").as[String],
@@ -167,28 +167,28 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("filter") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.filter(_._1 == "b"),
("b", 2))
}
test("foreach") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreach(v => acc += v._2)
assert(acc.value == 6)
}
test("foreachPartition") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreachPartition(_.foreach(v => acc += v._2))
assert(acc.value == 6)
}
test("reduce") {
- val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
+ val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
index 4ab148065a..860e07c68c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
@@ -206,7 +206,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
- StructType(StructField("f1", LongType, true) :: Nil) ,
+ StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
index ab48e971b5..f2e0a868f4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
@@ -72,7 +72,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
- protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
+ protected def checkParquetFile[T <: Product: ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 1fa22e2933..984e3fbc05 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -46,7 +46,7 @@ class JDBCSuite extends SparkFunSuite
val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte)
val testH2Dialect = new JdbcDialect {
- override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
Some(StringType)
@@ -489,7 +489,7 @@ class JDBCSuite extends SparkFunSuite
test("Aggregated dialects") {
val agg = new AggregatedDialect(List(new JdbcDialect {
- override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2:")
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2:")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
if (sqlType % 2 == 0) {