aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/java
diff options
context:
space:
mode:
authorZheng RuiFeng <ruifengz@foxmail.com>2016-05-11 22:45:30 -0700
committerAndrew Or <andrew@databricks.com>2016-05-11 22:45:30 -0700
commit9e266d07a444fd465fe178cdd5c4894cd09cbda3 (patch)
treee6ddfe9c9f6f6e8c5bc8c4b91b44c5fc5a948fac /examples/src/main/java
parentba5487c061168627b27af2fa9610d53791fcc90d (diff)
downloadspark-9e266d07a444fd465fe178cdd5c4894cd09cbda3.tar.gz
spark-9e266d07a444fd465fe178cdd5c4894cd09cbda3.tar.bz2
spark-9e266d07a444fd465fe178cdd5c4894cd09cbda3.zip
[SPARK-15031][SPARK-15134][EXAMPLE][DOC] Use SparkSession and update indent in examples
## What changes were proposed in this pull request? 1, Use `SparkSession` according to [SPARK-15031](https://issues.apache.org/jira/browse/SPARK-15031) 2, Update indent for `SparkContext` according to [SPARK-15134](https://issues.apache.org/jira/browse/SPARK-15134) 3, BTW, remove some duplicate space and add missing '.' ## How was this patch tested? manual tests Author: Zheng RuiFeng <ruifengz@foxmail.com> Closes #13050 from zhengruifeng/use_sparksession.
Diffstat (limited to 'examples/src/main/java')
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java14
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java12
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java6
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaEstimatorTransformerParamExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java6
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java12
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaLinearRegressionWithElasticNetExample.java12
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionSummaryExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionWithElasticNetExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaMultilayerPerceptronClassifierExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java6
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java8
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleTextClassificationPipeline.java4
17 files changed, 70 insertions, 42 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java
index 733bc4181c..bdb76f004f 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java
@@ -32,7 +32,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaDecisionTreeClassificationExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaDecisionTreeClassificationExample").getOrCreate();
+ .builder()
+ .appName("JavaDecisionTreeClassificationExample")
+ .getOrCreate();
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
@@ -52,10 +54,10 @@ public class JavaDecisionTreeClassificationExample {
VectorIndexerModel featureIndexer = new VectorIndexer()
.setInputCol("features")
.setOutputCol("indexedFeatures")
- .setMaxCategories(4) // features with > 4 distinct values are treated as continuous
+ .setMaxCategories(4) // features with > 4 distinct values are treated as continuous.
.fit(data);
- // Split the data into training and test sets (30% held out for testing)
+ // Split the data into training and test sets (30% held out for testing).
Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3});
Dataset<Row> trainingData = splits[0];
Dataset<Row> testData = splits[1];
@@ -71,11 +73,11 @@ public class JavaDecisionTreeClassificationExample {
.setOutputCol("predictedLabel")
.setLabels(labelIndexer.labels());
- // Chain indexers and tree in a Pipeline
+ // Chain indexers and tree in a Pipeline.
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[]{labelIndexer, featureIndexer, dt, labelConverter});
- // Train model. This also runs the indexers.
+ // Train model. This also runs the indexers.
PipelineModel model = pipeline.fit(trainingData);
// Make predictions.
@@ -84,7 +86,7 @@ public class JavaDecisionTreeClassificationExample {
// Select example rows to display.
predictions.select("predictedLabel", "label", "features").show(5);
- // Select (prediction, true label) and compute test error
+ // Select (prediction, true label) and compute test error.
MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
.setLabelCol("indexedLabel")
.setPredictionCol("prediction")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java
index bd6dc3edd3..cffb7139ed 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeRegressionExample.java
@@ -33,7 +33,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaDecisionTreeRegressionExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaDecisionTreeRegressionExample").getOrCreate();
+ .builder()
+ .appName("JavaDecisionTreeRegressionExample")
+ .getOrCreate();
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
Dataset<Row> data = spark.read().format("libsvm")
@@ -47,7 +49,7 @@ public class JavaDecisionTreeRegressionExample {
.setMaxCategories(4)
.fit(data);
- // Split the data into training and test sets (30% held out for testing)
+ // Split the data into training and test sets (30% held out for testing).
Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3});
Dataset<Row> trainingData = splits[0];
Dataset<Row> testData = splits[1];
@@ -56,11 +58,11 @@ public class JavaDecisionTreeRegressionExample {
DecisionTreeRegressor dt = new DecisionTreeRegressor()
.setFeaturesCol("indexedFeatures");
- // Chain indexer and tree in a Pipeline
+ // Chain indexer and tree in a Pipeline.
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[]{featureIndexer, dt});
- // Train model. This also runs the indexer.
+ // Train model. This also runs the indexer.
PipelineModel model = pipeline.fit(trainingData);
// Make predictions.
@@ -69,7 +71,7 @@ public class JavaDecisionTreeRegressionExample {
// Select example rows to display.
predictions.select("label", "features").show(5);
- // Select (prediction, true label) and compute test error
+ // Select (prediction, true label) and compute test error.
RegressionEvaluator evaluator = new RegressionEvaluator()
.setLabelCol("label")
.setPredictionCol("prediction")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
index 49bad0afc0..3265c4d7ec 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
@@ -62,7 +62,7 @@ public class JavaDeveloperApiExample {
new LabeledPoint(1.0, Vectors.dense(0.0, 1.2, -0.5)));
Dataset<Row> training = spark.createDataFrame(localTraining, LabeledPoint.class);
- // Create a LogisticRegression instance. This instance is an Estimator.
+ // Create a LogisticRegression instance. This instance is an Estimator.
MyJavaLogisticRegression lr = new MyJavaLogisticRegression();
// Print out the parameters, documentation, and any default values.
System.out.println("MyJavaLogisticRegression parameters:\n" + lr.explainParams() + "\n");
@@ -70,7 +70,7 @@ public class JavaDeveloperApiExample {
// We may set parameters using setter methods.
lr.setMaxIter(10);
- // Learn a LogisticRegression model. This uses the parameters stored in lr.
+ // Learn a LogisticRegression model. This uses the parameters stored in lr.
MyJavaLogisticRegressionModel model = lr.fit(training);
// Prepare test data.
@@ -214,7 +214,7 @@ class MyJavaLogisticRegressionModel
}
/**
- * Number of classes the label can take. 2 indicates binary classification.
+ * Number of classes the label can take. 2 indicates binary classification.
*/
public int numClasses() { return 2; }
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaEstimatorTransformerParamExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaEstimatorTransformerParamExample.java
index 5ba8e6cf44..889f5785df 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaEstimatorTransformerParamExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaEstimatorTransformerParamExample.java
@@ -38,7 +38,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaEstimatorTransformerParamExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaEstimatorTransformerParamExample").getOrCreate();
+ .builder()
+ .appName("JavaEstimatorTransformerParamExample")
+ .getOrCreate();
// $example on$
// Prepare training data.
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java
index baacd796a0..5c2e03eda9 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeClassifierExample.java
@@ -75,11 +75,11 @@ public class JavaGradientBoostedTreeClassifierExample {
.setOutputCol("predictedLabel")
.setLabels(labelIndexer.labels());
- // Chain indexers and GBT in a Pipeline
+ // Chain indexers and GBT in a Pipeline.
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[] {labelIndexer, featureIndexer, gbt, labelConverter});
- // Train model. This also runs the indexers.
+ // Train model. This also runs the indexers.
PipelineModel model = pipeline.fit(trainingData);
// Make predictions.
@@ -88,7 +88,7 @@ public class JavaGradientBoostedTreeClassifierExample {
// Select example rows to display.
predictions.select("predictedLabel", "label", "features").show(5);
- // Select (prediction, true label) and compute test error
+ // Select (prediction, true label) and compute test error.
MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator()
.setLabelCol("indexedLabel")
.setPredictionCol("prediction")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java
index 6d3f21fdaf..769b5c3e85 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaGradientBoostedTreeRegressorExample.java
@@ -34,7 +34,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaGradientBoostedTreeRegressorExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaGradientBoostedTreeRegressorExample").getOrCreate();
+ .builder()
+ .appName("JavaGradientBoostedTreeRegressorExample")
+ .getOrCreate();
// $example on$
// Load and parse the data file, converting it to a DataFrame.
@@ -48,7 +50,7 @@ public class JavaGradientBoostedTreeRegressorExample {
.setMaxCategories(4)
.fit(data);
- // Split the data into training and test sets (30% held out for testing)
+ // Split the data into training and test sets (30% held out for testing).
Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3});
Dataset<Row> trainingData = splits[0];
Dataset<Row> testData = splits[1];
@@ -59,10 +61,10 @@ public class JavaGradientBoostedTreeRegressorExample {
.setFeaturesCol("indexedFeatures")
.setMaxIter(10);
- // Chain indexer and GBT in a Pipeline
+ // Chain indexer and GBT in a Pipeline.
Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] {featureIndexer, gbt});
- // Train model. This also runs the indexer.
+ // Train model. This also runs the indexer.
PipelineModel model = pipeline.fit(trainingData);
// Make predictions.
@@ -71,7 +73,7 @@ public class JavaGradientBoostedTreeRegressorExample {
// Select example rows to display.
predictions.select("prediction", "label", "features").show(5);
- // Select (prediction, true label) and compute test error
+ // Select (prediction, true label) and compute test error.
RegressionEvaluator evaluator = new RegressionEvaluator()
.setLabelCol("label")
.setPredictionCol("prediction")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaLinearRegressionWithElasticNetExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaLinearRegressionWithElasticNetExample.java
index b6ea1fed25..dcd209e28e 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaLinearRegressionWithElasticNetExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaLinearRegressionWithElasticNetExample.java
@@ -30,10 +30,12 @@ import org.apache.spark.sql.SparkSession;
public class JavaLinearRegressionWithElasticNetExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaLinearRegressionWithElasticNetExample").getOrCreate();
+ .builder()
+ .appName("JavaLinearRegressionWithElasticNetExample")
+ .getOrCreate();
// $example on$
- // Load training data
+ // Load training data.
Dataset<Row> training = spark.read().format("libsvm")
.load("data/mllib/sample_linear_regression_data.txt");
@@ -42,14 +44,14 @@ public class JavaLinearRegressionWithElasticNetExample {
.setRegParam(0.3)
.setElasticNetParam(0.8);
- // Fit the model
+ // Fit the model.
LinearRegressionModel lrModel = lr.fit(training);
- // Print the coefficients and intercept for linear regression
+ // Print the coefficients and intercept for linear regression.
System.out.println("Coefficients: "
+ lrModel.coefficients() + " Intercept: " + lrModel.intercept());
- // Summarize the model over the training set and print out some metrics
+ // Summarize the model over the training set and print out some metrics.
LinearRegressionTrainingSummary trainingSummary = lrModel.summary();
System.out.println("numIterations: " + trainingSummary.totalIterations());
System.out.println("objectiveHistory: " + Vectors.dense(trainingSummary.objectiveHistory()));
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionSummaryExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionSummaryExample.java
index fd040aead4..dee56799d8 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionSummaryExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionSummaryExample.java
@@ -31,7 +31,9 @@ import org.apache.spark.sql.functions;
public class JavaLogisticRegressionSummaryExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaLogisticRegressionSummaryExample").getOrCreate();
+ .builder()
+ .appName("JavaLogisticRegressionSummaryExample")
+ .getOrCreate();
// Load training data
Dataset<Row> training = spark.read().format("libsvm")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionWithElasticNetExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionWithElasticNetExample.java
index f00c7a05cd..6101c79fb0 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionWithElasticNetExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaLogisticRegressionWithElasticNetExample.java
@@ -28,7 +28,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaLogisticRegressionWithElasticNetExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaLogisticRegressionWithElasticNetExample").getOrCreate();
+ .builder()
+ .appName("JavaLogisticRegressionWithElasticNetExample")
+ .getOrCreate();
// $example on$
// Load training data
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java
index a4ec4f5815..975c65edc0 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java
@@ -43,7 +43,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaModelSelectionViaCrossValidationExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaModelSelectionViaCrossValidationExample").getOrCreate();
+ .builder()
+ .appName("JavaModelSelectionViaCrossValidationExample")
+ .getOrCreate();
// $example on$
// Prepare training documents, which are labeled.
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java
index 63a0ad1cb8..0f96293f03 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java
@@ -43,7 +43,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaModelSelectionViaTrainValidationSplitExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaModelSelectionViaTrainValidationSplitExample").getOrCreate();
+ .builder()
+ .appName("JavaModelSelectionViaTrainValidationSplitExample")
+ .getOrCreate();
// $example on$
Dataset<Row> data = spark.read().format("libsvm")
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaMultilayerPerceptronClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaMultilayerPerceptronClassifierExample.java
index d547a2a64b..c7d03d8593 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaMultilayerPerceptronClassifierExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaMultilayerPerceptronClassifierExample.java
@@ -33,7 +33,9 @@ public class JavaMultilayerPerceptronClassifierExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaMultilayerPerceptronClassifierExample").getOrCreate();
+ .builder()
+ .appName("JavaMultilayerPerceptronClassifierExample")
+ .getOrCreate();
// $example on$
// Load training data
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java
index 94e3fafcab..16f58a852d 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaQuantileDiscretizerExample.java
@@ -35,7 +35,9 @@ import org.apache.spark.sql.types.StructType;
public class JavaQuantileDiscretizerExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaQuantileDiscretizerExample").getOrCreate();
+ .builder()
+ .appName("JavaQuantileDiscretizerExample")
+ .getOrCreate();
// $example on$
List<Row> data = Arrays.asList(
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java
index 21e783a968..14af2fbbbb 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestClassifierExample.java
@@ -33,7 +33,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaRandomForestClassifierExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaRandomForestClassifierExample").getOrCreate();
+ .builder()
+ .appName("JavaRandomForestClassifierExample")
+ .getOrCreate();
// $example on$
// Load and parse the data file, converting it to a DataFrame.
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java
index ece184a878..a7078453de 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaRandomForestRegressorExample.java
@@ -34,7 +34,9 @@ import org.apache.spark.sql.SparkSession;
public class JavaRandomForestRegressorExample {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaRandomForestRegressorExample").getOrCreate();
+ .builder()
+ .appName("JavaRandomForestRegressorExample")
+ .getOrCreate();
// $example on$
// Load and parse the data file, converting it to a DataFrame.
@@ -62,7 +64,7 @@ public class JavaRandomForestRegressorExample {
Pipeline pipeline = new Pipeline()
.setStages(new PipelineStage[] {featureIndexer, rf});
- // Train model. This also runs the indexer.
+ // Train model. This also runs the indexer.
PipelineModel model = pipeline.fit(trainingData);
// Make predictions.
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java
index 0787079ba4..ff1eb07dc6 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleParamsExample.java
@@ -46,7 +46,7 @@ public class JavaSimpleParamsExample {
.getOrCreate();
// Prepare training data.
- // We use LabeledPoint, which is a JavaBean. Spark SQL can convert RDDs of JavaBeans
+ // We use LabeledPoint, which is a JavaBean. Spark SQL can convert RDDs of JavaBeans
// into DataFrames, where it uses the bean metadata to infer the schema.
List<LabeledPoint> localTraining = Lists.newArrayList(
new LabeledPoint(1.0, Vectors.dense(0.0, 1.1, 0.1)),
@@ -56,7 +56,7 @@ public class JavaSimpleParamsExample {
Dataset<Row> training =
spark.createDataFrame(localTraining, LabeledPoint.class);
- // Create a LogisticRegression instance. This instance is an Estimator.
+ // Create a LogisticRegression instance. This instance is an Estimator.
LogisticRegression lr = new LogisticRegression();
// Print out the parameters, documentation, and any default values.
System.out.println("LogisticRegression parameters:\n" + lr.explainParams() + "\n");
@@ -65,7 +65,7 @@ public class JavaSimpleParamsExample {
lr.setMaxIter(10)
.setRegParam(0.01);
- // Learn a LogisticRegression model. This uses the parameters stored in lr.
+ // Learn a LogisticRegression model. This uses the parameters stored in lr.
LogisticRegressionModel model1 = lr.fit(training);
// Since model1 is a Model (i.e., a Transformer produced by an Estimator),
// we can view the parameters it used during fit().
@@ -82,7 +82,7 @@ public class JavaSimpleParamsExample {
// One can also combine ParamMaps.
ParamMap paramMap2 = new ParamMap();
- paramMap2.put(lr.probabilityCol().w("myProbability")); // Change output column name
+ paramMap2.put(lr.probabilityCol().w("myProbability")); // Change output column name.
ParamMap paramMapCombined = paramMap.$plus$plus(paramMap2);
// Now learn a new model using the paramMapCombined parameters.
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleTextClassificationPipeline.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleTextClassificationPipeline.java
index 9516ce1f4f..7c24c46d2e 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleTextClassificationPipeline.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaSimpleTextClassificationPipeline.java
@@ -43,7 +43,9 @@ public class JavaSimpleTextClassificationPipeline {
public static void main(String[] args) {
SparkSession spark = SparkSession
- .builder().appName("JavaSimpleTextClassificationPipeline").getOrCreate();
+ .builder()
+ .appName("JavaSimpleTextClassificationPipeline")
+ .getOrCreate();
// Prepare training documents, which are labeled.
List<LabeledDocument> localTraining = Lists.newArrayList(