aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/ml-clustering.md30
-rw-r--r--docs/ml-guide.md3
-rw-r--r--docs/mllib-guide.md1
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaLDAExample.java94
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala77
5 files changed, 1 insertions, 204 deletions
diff --git a/docs/ml-clustering.md b/docs/ml-clustering.md
deleted file mode 100644
index 1743ef43a6..0000000000
--- a/docs/ml-clustering.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: global
-title: Clustering - ML
-displayTitle: <a href="ml-guide.html">ML</a> - Clustering
----
-
-In this section, we introduce the pipeline API for [clustering in mllib](mllib-clustering.html).
-
-## Latent Dirichlet allocation (LDA)
-
-`LDA` is implemented as an `Estimator` that supports both `EMLDAOptimizer` and `OnlineLDAOptimizer`,
-and generates a `LDAModel` as the base models. Expert users may cast a `LDAModel` generated by
-`EMLDAOptimizer` to a `DistributedLDAModel` if needed.
-
-<div class="codetabs">
-
-Refer to the [Scala API docs](api/scala/index.html#org.apache.spark.ml.clustering.LDA) for more details.
-
-<div data-lang="scala" markdown="1">
-{% include_example scala/org/apache/spark/examples/ml/LDAExample.scala %}
-</div>
-
-<div data-lang="java" markdown="1">
-
-Refer to the [Java API docs](api/java/org/apache/spark/ml/clustering/LDA.html) for more details.
-
-{% include_example java/org/apache/spark/examples/ml/JavaLDAExample.java %}
-</div>
-
-</div> \ No newline at end of file
diff --git a/docs/ml-guide.md b/docs/ml-guide.md
index 6f35b30c3d..be18a05361 100644
--- a/docs/ml-guide.md
+++ b/docs/ml-guide.md
@@ -40,7 +40,6 @@ Also, some algorithms have additional capabilities in the `spark.ml` API; e.g.,
provide class probabilities, and linear models provide model summaries.
* [Feature extraction, transformation, and selection](ml-features.html)
-* [Clustering](ml-clustering.html)
* [Decision Trees for classification and regression](ml-decision-tree.html)
* [Ensembles](ml-ensembles.html)
* [Linear methods with elastic net regularization](ml-linear-methods.html)
@@ -951,4 +950,4 @@ model.transform(test)
{% endhighlight %}
</div>
-</div> \ No newline at end of file
+</div>
diff --git a/docs/mllib-guide.md b/docs/mllib-guide.md
index 54e35fcbb1..91e50ccfec 100644
--- a/docs/mllib-guide.md
+++ b/docs/mllib-guide.md
@@ -69,7 +69,6 @@ We list major functionality from both below, with links to detailed guides.
concepts. It also contains sections on using algorithms within the Pipelines API, for example:
* [Feature extraction, transformation, and selection](ml-features.html)
-* [Clustering](ml-clustering.html)
* [Decision trees for classification and regression](ml-decision-tree.html)
* [Ensembles](ml-ensembles.html)
* [Linear methods with elastic net regularization](ml-linear-methods.html)
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaLDAExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaLDAExample.java
deleted file mode 100644
index b3a7d2eb29..0000000000
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaLDAExample.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.examples.ml;
-
-import java.util.regex.Pattern;
-
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.api.java.function.Function;
-import org.apache.spark.ml.clustering.LDA;
-import org.apache.spark.ml.clustering.LDAModel;
-import org.apache.spark.mllib.linalg.Vector;
-import org.apache.spark.mllib.linalg.VectorUDT;
-import org.apache.spark.mllib.linalg.Vectors;
-import org.apache.spark.sql.DataFrame;
-import org.apache.spark.sql.Row;
-import org.apache.spark.sql.SQLContext;
-import org.apache.spark.sql.catalyst.expressions.GenericRow;
-import org.apache.spark.sql.types.Metadata;
-import org.apache.spark.sql.types.StructField;
-import org.apache.spark.sql.types.StructType;
-
-/**
- * An example demonstrating LDA
- * Run with
- * <pre>
- * bin/run-example ml.JavaLDAExample
- * </pre>
- */
-public class JavaLDAExample {
-
- private static class ParseVector implements Function<String, Row> {
- private static final Pattern separator = Pattern.compile(" ");
-
- @Override
- public Row call(String line) {
- String[] tok = separator.split(line);
- double[] point = new double[tok.length];
- for (int i = 0; i < tok.length; ++i) {
- point[i] = Double.parseDouble(tok[i]);
- }
- Vector[] points = {Vectors.dense(point)};
- return new GenericRow(points);
- }
- }
-
- public static void main(String[] args) {
-
- String inputFile = "data/mllib/sample_lda_data.txt";
-
- // Parses the arguments
- SparkConf conf = new SparkConf().setAppName("JavaLDAExample");
- JavaSparkContext jsc = new JavaSparkContext(conf);
- SQLContext sqlContext = new SQLContext(jsc);
-
- // Loads data
- JavaRDD<Row> points = jsc.textFile(inputFile).map(new ParseVector());
- StructField[] fields = {new StructField("features", new VectorUDT(), false, Metadata.empty())};
- StructType schema = new StructType(fields);
- DataFrame dataset = sqlContext.createDataFrame(points, schema);
-
- // Trains a LDA model
- LDA lda = new LDA()
- .setK(10)
- .setMaxIter(10);
- LDAModel model = lda.fit(dataset);
-
- System.out.println(model.logLikelihood(dataset));
- System.out.println(model.logPerplexity(dataset));
-
- // Shows the result
- DataFrame topics = model.describeTopics(3);
- topics.show(false);
- model.transform(dataset).show(false);
-
- jsc.stop();
- }
-}
diff --git a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
deleted file mode 100644
index 419ce3d87a..0000000000
--- a/examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.examples.ml
-
-// scalastyle:off println
-import org.apache.spark.{SparkContext, SparkConf}
-import org.apache.spark.mllib.linalg.{VectorUDT, Vectors}
-// $example on$
-import org.apache.spark.ml.clustering.LDA
-import org.apache.spark.sql.{Row, SQLContext}
-import org.apache.spark.sql.types.{StructField, StructType}
-// $example off$
-
-/**
- * An example demonstrating a LDA of ML pipeline.
- * Run with
- * {{{
- * bin/run-example ml.LDAExample
- * }}}
- */
-object LDAExample {
-
- final val FEATURES_COL = "features"
-
- def main(args: Array[String]): Unit = {
-
- val input = "data/mllib/sample_lda_data.txt"
- // Creates a Spark context and a SQL context
- val conf = new SparkConf().setAppName(s"${this.getClass.getSimpleName}")
- val sc = new SparkContext(conf)
- val sqlContext = new SQLContext(sc)
-
- // $example on$
- // Loads data
- val rowRDD = sc.textFile(input).filter(_.nonEmpty)
- .map(_.split(" ").map(_.toDouble)).map(Vectors.dense).map(Row(_))
- val schema = StructType(Array(StructField(FEATURES_COL, new VectorUDT, false)))
- val dataset = sqlContext.createDataFrame(rowRDD, schema)
-
- // Trains a LDA model
- val lda = new LDA()
- .setK(10)
- .setMaxIter(10)
- .setFeaturesCol(FEATURES_COL)
- val model = lda.fit(dataset)
- val transformed = model.transform(dataset)
-
- val ll = model.logLikelihood(dataset)
- val lp = model.logPerplexity(dataset)
-
- // describeTopics
- val topics = model.describeTopics(3)
-
- // Shows the result
- topics.show(false)
- transformed.show(false)
-
- // $example off$
- sc.stop()
- }
-}
-// scalastyle:on println