aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/scala
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-05-05 14:37:50 -0700
committerAndrew Or <andrew@databricks.com>2016-05-05 14:37:50 -0700
commit2c170dd3d731bd848d62265431795e1c141d75d7 (patch)
treed81ec5e4a6adfda683d7882680d50d2261b06818 /examples/src/main/scala
parentbb9991dec5dd631b22a05e2e1b83b9082a845e8f (diff)
downloadspark-2c170dd3d731bd848d62265431795e1c141d75d7.tar.gz
spark-2c170dd3d731bd848d62265431795e1c141d75d7.tar.bz2
spark-2c170dd3d731bd848d62265431795e1c141d75d7.zip
[SPARK-15134][EXAMPLE] Indent SparkSession builder patterns and update binary_classification_metrics_example.py
## What changes were proposed in this pull request? This issue addresses the comments in SPARK-15031 and also fix java-linter errors. - Use multiline format in SparkSession builder patterns. - Update `binary_classification_metrics_example.py` to use `SparkSession`. - Fix Java Linter errors (in SPARK-13745, SPARK-15031, and so far) ## How was this patch tested? After passing the Jenkins tests and run `dev/lint-java` manually. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12911 from dongjoon-hyun/SPARK-15134.
Diffstat (limited to 'examples/src/main/scala')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala5
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala5
53 files changed, 210 insertions, 53 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
index 3795af8309..2b224d50a0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/AFTSurvivalRegressionExample.scala
@@ -30,7 +30,10 @@ import org.apache.spark.sql.SparkSession
object AFTSurvivalRegressionExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("AFTSurvivalRegressionExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("AFTSurvivalRegressionExample")
+ .getOrCreate()
// $example on$
val training = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
index 41750ca779..7c1cfe2937 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/ALSExample.scala
@@ -42,7 +42,10 @@ object ALSExample {
// $example off$
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("ALSExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("ALSExample")
+ .getOrCreate()
import spark.implicits._
// $example on$
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
index 93c153f923..82bc14789b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/BinarizerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.{DataFrame, SparkSession}
object BinarizerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("BinarizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("BinarizerExample")
+ .getOrCreate()
// $example on$
val data = Array((0, 0.1), (1, 0.8), (2, 0.2))
val dataFrame: DataFrame = spark.createDataFrame(data).toDF("label", "feature")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
index 779ad33dbd..38cce34bb5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/BucketizerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object BucketizerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("BucketizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("BucketizerExample")
+ .getOrCreate()
// $example on$
val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity)
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
index 84ca1f0b56..80f50cd355 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/ChiSqSelectorExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object ChiSqSelectorExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("ChiSqSelectorExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("ChiSqSelectorExample")
+ .getOrCreate()
import spark.implicits._
// $example on$
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
index 9ab43a48bf..51aa5179fa 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/CountVectorizerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object CountVectorizerExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("CounterVectorizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("CounterVectorizerExample")
+ .getOrCreate()
// $example on$
val df = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
index b415333c71..5a888b15eb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DCTExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object DCTExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("DCTExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("DCTExample")
+ .getOrCreate()
// $example on$
val data = Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index 2f892f8d72..6cb81cde6f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -61,7 +61,10 @@ object DataFrameExample {
}
def run(params: Params) {
- val spark = SparkSession.builder.appName(s"DataFrameExample with $params").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName(s"DataFrameExample with $params")
+ .getOrCreate()
// Load input data
println(s"Loading LIBSVM file with UDT from ${params.input}.")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
index a0a2e1fb33..7f6c8de967 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeClassificationExample.scala
@@ -29,7 +29,10 @@ import org.apache.spark.sql.SparkSession
object DecisionTreeClassificationExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("DecisionTreeClassificationExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("DecisionTreeClassificationExample")
+ .getOrCreate()
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
val data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
index cea1d801aa..eadb02ab0d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
@@ -167,7 +167,9 @@ object DecisionTreeExample {
testInput: String,
algo: String,
fracTest: Double): (DataFrame, DataFrame) = {
- val spark = SparkSession.builder.getOrCreate()
+ val spark = SparkSession
+ .builder
+ .getOrCreate()
// Load training data
val origExamples: DataFrame = loadData(spark, input, dataFormat)
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
index 26b52d0489..799070ef47 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeRegressionExample.scala
@@ -29,7 +29,10 @@ import org.apache.spark.sql.SparkSession
object DecisionTreeRegressionExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("DecisionTreeRegressionExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("DecisionTreeRegressionExample")
+ .getOrCreate()
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
index 2aa1ab1ec8..a522d2127e 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/DeveloperApiExample.scala
@@ -37,7 +37,10 @@ import org.apache.spark.sql.{Dataset, Row, SparkSession}
object DeveloperApiExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("DeveloperApiExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("DeveloperApiExample")
+ .getOrCreate()
import spark.implicits._
// Prepare training data.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
index f289c28df9..b99b76e58c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/ElementwiseProductExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object ElementwiseProductExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("ElementwiseProductExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("ElementwiseProductExample")
+ .getOrCreate()
// $example on$
// Create some vector data; also works for sparse vectors
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
index 91076ccbc1..972241e769 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/EstimatorTransformerParamExample.scala
@@ -29,7 +29,10 @@ import org.apache.spark.sql.SparkSession
object EstimatorTransformerParamExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("EstimatorTransformerParamExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("EstimatorTransformerParamExample")
+ .getOrCreate()
// $example on$
// Prepare training data from a list of (label, features) tuples.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
index 412c54db7d..b6a8baba2d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeClassifierExample.scala
@@ -28,7 +28,10 @@ import org.apache.spark.sql.SparkSession
object GradientBoostedTreeClassifierExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("GradientBoostedTreeClassifierExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("GradientBoostedTreeClassifierExample")
+ .getOrCreate()
// $example on$
// Load and parse the data file, converting it to a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
index fd43553cc6..62285b83cb 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GradientBoostedTreeRegressorExample.scala
@@ -28,7 +28,10 @@ import org.apache.spark.sql.SparkSession
object GradientBoostedTreeRegressorExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("GradientBoostedTreeRegressorExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("GradientBoostedTreeRegressorExample")
+ .getOrCreate()
// $example on$
// Load and parse the data file, converting it to a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
index d873618726..950733831c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object IndexToStringExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("IndexToStringExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("IndexToStringExample")
+ .getOrCreate()
// $example on$
val df = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
index d2573fad35..2abd588c6f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala
@@ -36,7 +36,10 @@ object KMeansExample {
def main(args: Array[String]): Unit = {
// Creates a Spark context and a SQL context
- val spark = SparkSession.builder.appName(s"${this.getClass.getSimpleName}").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName(s"${this.getClass.getSimpleName}")
+ .getOrCreate()
// $example on$
// Crates a DataFrame
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
index c23adee1a3..c2920f6a5d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
@@ -40,7 +40,10 @@ object LDAExample {
val input = "data/mllib/sample_lda_data.txt"
// Creates a Spark context and a SQL context
- val spark = SparkSession.builder.appName(s"${this.getClass.getSimpleName}").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName(s"${this.getClass.getSimpleName}")
+ .getOrCreate()
// $example on$
// Loads data
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
index cb6e2492f5..94cf286623 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionWithElasticNetExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object LinearRegressionWithElasticNetExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("LinearRegressionWithElasticNetExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("LinearRegressionWithElasticNetExample")
+ .getOrCreate()
// $example on$
// Load training data
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
index 50670d7b38..cd8775c942 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionSummaryExample.scala
@@ -27,7 +27,10 @@ import org.apache.spark.sql.functions.max
object LogisticRegressionSummaryExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("LogisticRegressionSummaryExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("LogisticRegressionSummaryExample")
+ .getOrCreate()
import spark.implicits._
// Load training data
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
index 896d8fadbe..572adce657 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MaxAbsScalerExample.scala
@@ -24,7 +24,10 @@ import org.apache.spark.sql.SparkSession
object MaxAbsScalerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("MaxAbsScalerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("MaxAbsScalerExample")
+ .getOrCreate()
// $example on$
val dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
index bcdca0fa04..d728019a62 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MinMaxScalerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object MinMaxScalerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("MinMaxScalerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("MinMaxScalerExample")
+ .getOrCreate()
// $example on$
val dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
index a11fe1b4b2..0e780fb7d3 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/MultilayerPerceptronClassifierExample.scala
@@ -30,7 +30,10 @@ import org.apache.spark.sql.SparkSession
object MultilayerPerceptronClassifierExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("MultilayerPerceptronClassifierExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("MultilayerPerceptronClassifierExample")
+ .getOrCreate()
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
index 1b71a39890..e0b52e7a36 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/NGramExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object NGramExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("NGramExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("NGramExample")
+ .getOrCreate()
// $example on$
val wordDataFrame = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
index 8d54555cd3..90cdebfcb0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object NaiveBayesExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("NaiveBayesExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("NaiveBayesExample")
+ .getOrCreate()
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
val data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
index 4622d69ef9..75ba33a7e7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/NormalizerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object NormalizerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("NormalizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("NormalizerExample")
+ .getOrCreate()
// $example on$
val dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
index 338436100c..4aa649b133 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/OneHotEncoderExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object OneHotEncoderExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("OneHotEncoderExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("OneHotEncoderExample")
+ .getOrCreate()
// $example on$
val df = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
index e2351c682d..fc73ae07ff 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/OneVsRestExample.scala
@@ -109,7 +109,10 @@ object OneVsRestExample {
}
private def run(params: Params) {
- val spark = SparkSession.builder.appName(s"OneVsRestExample with $params").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName(s"OneVsRestExample with $params")
+ .getOrCreate()
// $example on$
val inputData = spark.read.format("libsvm").load(params.input)
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
index 14394d5624..7927323b42 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/PCAExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object PCAExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("PCAExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("PCAExample")
+ .getOrCreate()
// $example on$
val data = Array(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
index 61b34aebd9..e5e916ac16 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/PipelineExample.scala
@@ -30,7 +30,10 @@ import org.apache.spark.sql.SparkSession
object PipelineExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("PipelineExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("PipelineExample")
+ .getOrCreate()
// $example on$
// Prepare training documents from a list of (id, text, label) tuples.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
index 4d8c672a55..94b17a3cd7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object PolynomialExpansionExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("PolynomialExpansionExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("PolynomialExpansionExample")
+ .getOrCreate()
// $example on$
val data = Array(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
index 0839c609f1..1a16515594 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/QuantileDiscretizerExample.scala
@@ -24,7 +24,10 @@ import org.apache.spark.sql.SparkSession
object QuantileDiscretizerExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("QuantileDiscretizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("QuantileDiscretizerExample")
+ .getOrCreate()
import spark.implicits._
// $example on$
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
index 699b621db9..9ea4920146 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RFormulaExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object RFormulaExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("RFormulaExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RFormulaExample")
+ .getOrCreate()
// $example on$
val dataset = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
index 4192a9c737..ae0bd945d8 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestClassifierExample.scala
@@ -28,7 +28,10 @@ import org.apache.spark.sql.SparkSession
object RandomForestClassifierExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("RandomForestClassifierExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RandomForestClassifierExample")
+ .getOrCreate()
// $example on$
// Load and parse the data file, converting it to a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
index 5632f0419a..96dc2f05be 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestRegressorExample.scala
@@ -28,7 +28,10 @@ import org.apache.spark.sql.SparkSession
object RandomForestRegressorExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("RandomForestRegressorExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RandomForestRegressorExample")
+ .getOrCreate()
// $example on$
// Load and parse the data file, converting it to a DataFrame.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
index f03b29ba32..bb4587b82c 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object SQLTransformerExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("SQLTransformerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("SQLTransformerExample")
+ .getOrCreate()
// $example on$
val df = spark.createDataFrame(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
index dff7719507..3547dd95bd 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleParamsExample.scala
@@ -34,7 +34,10 @@ import org.apache.spark.sql.{Row, SparkSession}
object SimpleParamsExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("SimpleParamsExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("SimpleParamsExample")
+ .getOrCreate()
import spark.implicits._
// Prepare training data.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
index 05199007f0..c78ff2378b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/SimpleTextClassificationPipeline.scala
@@ -42,7 +42,10 @@ case class Document(id: Long, text: String)
object SimpleTextClassificationPipeline {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("SimpleTextClassificationPipeline").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("SimpleTextClassificationPipeline")
+ .getOrCreate()
import spark.implicits._
// Prepare training documents, which are labeled.
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
index 55f777c6e2..4d668e8ab9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/StandardScalerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object StandardScalerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("StandardScalerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("StandardScalerExample")
+ .getOrCreate()
// $example on$
val dataFrame = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
index 85e79c8cb3..fb1a43e962 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/StopWordsRemoverExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object StopWordsRemoverExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("StopWordsRemoverExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("StopWordsRemoverExample")
+ .getOrCreate()
// $example on$
val remover = new StopWordsRemover()
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
index e01a768da9..63f273e87a 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/StringIndexerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object StringIndexerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("StringIndexerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("StringIndexerExample")
+ .getOrCreate()
// $example on$
val df = spark.createDataFrame(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
index 910ef62a26..33b5daec59 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object TfIdfExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("TfIdfExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("TfIdfExample")
+ .getOrCreate()
// $example on$
val sentenceData = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
index 4f0c47b3c8..1c70dc700b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object TokenizerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("TokenizerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("TokenizerExample")
+ .getOrCreate()
// $example on$
val sentenceDataFrame = spark.createDataFrame(Seq(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
index 56b7263b19..8e382ccc48 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/VectorAssemblerExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object VectorAssemblerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("VectorAssemblerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("VectorAssemblerExample")
+ .getOrCreate()
// $example on$
val dataset = spark.createDataFrame(
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
index 214ad91634..afa761aee0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/VectorIndexerExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object VectorIndexerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("VectorIndexerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("VectorIndexerExample")
+ .getOrCreate()
// $example on$
val data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
index 716bf023a8..b1a3997f48 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/VectorSlicerExample.scala
@@ -31,7 +31,10 @@ import org.apache.spark.sql.SparkSession
object VectorSlicerExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("VectorSlicerExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("VectorSlicerExample")
+ .getOrCreate()
// $example on$
val data = Arrays.asList(Row(Vectors.dense(-2.0, 2.3, 0.0)))
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
index 292b6d9f77..9ac5623607 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/Word2VecExample.scala
@@ -25,7 +25,10 @@ import org.apache.spark.sql.SparkSession
object Word2VecExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("Word2Vec example").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("Word2Vec example")
+ .getOrCreate()
// $example on$
// Input data: Each row is a bag of words from a sentence or document.
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index c2bf1548b5..7651aade49 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -189,7 +189,9 @@ object LDAExample {
vocabSize: Int,
stopwordFile: String): (RDD[(Long, Vector)], Array[String], Long) = {
- val spark = SparkSession.builder.getOrCreate()
+ val spark = SparkSession
+ .builder
+ .getOrCreate()
import spark.implicits._
// Get dataset of document texts
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
index cd4f0bb0de..781a934df6 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/RankingMetricsExample.scala
@@ -26,7 +26,10 @@ import org.apache.spark.sql.SparkSession
object RankingMetricsExample {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("RankingMetricsExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RankingMetricsExample")
+ .getOrCreate()
import spark.implicits._
// $example on$
// Read in the ratings data
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
index 22c47a694d..abeaaa00b5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/RegressionMetricsExample.scala
@@ -27,7 +27,10 @@ import org.apache.spark.sql.SparkSession
object RegressionMetricsExample {
def main(args: Array[String]): Unit = {
- val spark = SparkSession.builder.appName("RegressionMetricsExample").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RegressionMetricsExample")
+ .getOrCreate()
// $example on$
// Load the data
val data = spark
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index b4118b16e2..94c378ae4b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -26,7 +26,10 @@ case class Record(key: Int, value: String)
object RDDRelation {
def main(args: Array[String]) {
- val spark = SparkSession.builder.appName("RDDRelation").getOrCreate()
+ val spark = SparkSession
+ .builder
+ .appName("RDDRelation")
+ .getOrCreate()
// Importing the SparkSession gives access to all the SQL functions and implicit conversions.
import spark.implicits._
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
index 2f0fe704f7..9aba4a05a8 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/SqlNetworkWordCount.scala
@@ -93,7 +93,10 @@ object SparkSessionSingleton {
def getInstance(sparkConf: SparkConf): SparkSession = {
if (instance == null) {
- instance = SparkSession.builder.config(sparkConf).getOrCreate()
+ instance = SparkSession
+ .builder
+ .config(sparkConf)
+ .getOrCreate()
}
instance
}