aboutsummaryrefslogtreecommitdiff
path: root/docs/ml-ensembles.md
diff options
context:
space:
mode:
Diffstat (limited to 'docs/ml-ensembles.md')
-rw-r--r--docs/ml-ensembles.md10
1 files changed, 5 insertions, 5 deletions
diff --git a/docs/ml-ensembles.md b/docs/ml-ensembles.md
index 58f566c9b4..ce15f5e646 100644
--- a/docs/ml-ensembles.md
+++ b/docs/ml-ensembles.md
@@ -195,7 +195,7 @@ import org.apache.spark.ml.feature.*;
import org.apache.spark.sql.DataFrame;
// Load and parse the data file, converting it to a DataFrame.
-DataFrame data = sqlContext.read.format("libsvm")
+DataFrame data = sqlContext.read().format("libsvm")
.load("data/mllib/sample_libsvm_data.txt");
// Index labels, adding metadata to the label column.
@@ -384,7 +384,7 @@ import org.apache.spark.ml.regression.RandomForestRegressor;
import org.apache.spark.sql.DataFrame;
// Load and parse the data file, converting it to a DataFrame.
-DataFrame data = sqlContext.read.format("libsvm")
+DataFrame data = sqlContext.read().format("libsvm")
.load("data/mllib/sample_libsvm_data.txt");
// Automatically identify categorical features, and index them.
@@ -640,7 +640,7 @@ import org.apache.spark.ml.feature.*;
import org.apache.spark.sql.DataFrame;
// Load and parse the data file, converting it to a DataFrame.
-DataFrame data sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+DataFrame data sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
// Index labels, adding metadata to the label column.
// Fit on whole dataset to include all labels in index.
@@ -830,7 +830,7 @@ import org.apache.spark.ml.regression.GBTRegressor;
import org.apache.spark.sql.DataFrame;
// Load and parse the data file, converting it to a DataFrame.
-DataFrame data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt");
+DataFrame data = sqlContext.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt");
// Automatically identify categorical features, and index them.
// Set maxCategories so features with > 4 distinct values are treated as continuous.
@@ -1000,7 +1000,7 @@ SparkConf conf = new SparkConf().setAppName("JavaOneVsRestExample");
JavaSparkContext jsc = new JavaSparkContext(conf);
SQLContext jsql = new SQLContext(jsc);
-DataFrame dataFrame = sqlContext.read.format("libsvm")
+DataFrame dataFrame = sqlContext.read().format("libsvm")
.load("data/mllib/sample_multiclass_classification_data.txt");
DataFrame[] splits = dataFrame.randomSplit(new double[] {0.7, 0.3}, 12345);