diff options
Diffstat (limited to 'sql/core/src')
5 files changed, 5 insertions, 5 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala index 339e61e572..24f61992d4 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala @@ -1147,7 +1147,7 @@ class DataFrame private[sql]( * columns of the input row are implicitly joined with each value that is output by the function. * * {{{ - * df.explode("words", "word"){words: String => words.split(" ")} + * df.explode("words", "word") {words: String => words.split(" ")} * }}} * @group dfops * @since 1.3.0 diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala index dd1fbcf3c8..daddf6e0c5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala @@ -605,7 +605,7 @@ class Dataset[T] private[sql]( * duplicate items. As such, it is analogous to `UNION ALL` in SQL. * @since 1.6.0 */ - def union(other: Dataset[T]): Dataset[T] = withPlan[T](other){ (left, right) => + def union(other: Dataset[T]): Dataset[T] = withPlan[T](other) { (left, right) => // This breaks caching, but it's usually ok because it addresses a very specific use case: // using union to union many files or partitions. CombineUnions(Union(left, right)) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala index 032ba61d9d..41cff07472 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/CompressionCodecs.scala @@ -57,7 +57,7 @@ private[datasources] object CompressionCodecs { * `codec` should be a full class path */ def setCodecConfiguration(conf: Configuration, codec: String): Unit = { - if (codec != null){ + if (codec != null) { conf.set("mapreduce.output.fileoutputformat.compress", "true") conf.set("mapreduce.output.fileoutputformat.compress.type", CompressionType.BLOCK.toString) conf.set("mapreduce.output.fileoutputformat.compress.codec", codec) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala index 49915adf6c..9d3cd9bb14 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala @@ -114,7 +114,7 @@ private[sql] class ExecutionPage(parent: SQLTab) extends WebUIPage("execution") {metadata} </div> {planVisualizationResources} - <script>$(function(){{ renderPlanViz(); }})</script> + <script>$(function() {{ renderPlanViz(); }})</script> </div> } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala index 34e914cb1e..b7834d76cc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala @@ -145,7 +145,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext { withTempPath { dir => val data = makeDecimalRDD(DecimalType(precision, scale)) data.write.parquet(dir.getCanonicalPath) - readParquetFile(dir.getCanonicalPath){ df => { + readParquetFile(dir.getCanonicalPath) { df => { checkAnswer(df, data.collect().toSeq) }} } |