diff options
3 files changed, 23 insertions, 5 deletions
diff --git a/docs/mllib-clustering.md b/docs/mllib-clustering.md index 3aad4149f9..d72dc20a5a 100644 --- a/docs/mllib-clustering.md +++ b/docs/mllib-clustering.md @@ -447,7 +447,7 @@ It supports different inference algorithms via `setOptimizer` function. EMLDAOpt on the likelihood function and yields comprehensive results, while OnlineLDAOptimizer uses iterative mini-batch sampling for [online variational inference](https://www.cs.princeton.edu/~blei/papers/HoffmanBleiBach2010b.pdf) and is generally memory friendly. After fitting on the documents, LDA provides: * Topics: Inferred topics, each of which is a probability distribution over terms (words). -* Topic distributions for documents: For each document in the training set, LDA gives a probability distribution over topics. (EM only) +* Topic distributions for documents: For each non empty document in the training set, LDA gives a probability distribution over topics. (EM only). Note that for empty documents, we don't create the topic distributions. (EM only) LDA takes the following parameters: diff --git a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java index 581c033f08..b48f190f59 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java @@ -28,12 +28,13 @@ import static org.junit.Assert.assertArrayEquals; import org.junit.Before; import org.junit.Test; +import org.apache.spark.api.java.function.Function; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.mllib.linalg.Matrix; import org.apache.spark.mllib.linalg.Vector; - +import org.apache.spark.mllib.linalg.Vectors; public class JavaLDASuite implements Serializable { private transient JavaSparkContext sc; @@ -110,7 +111,15 @@ public class JavaLDASuite implements Serializable { // Check: topic distributions JavaPairRDD<Long, Vector> topicDistributions = model.javaTopicDistributions(); - assertEquals(topicDistributions.count(), corpus.count()); + // SPARK-5562. since the topicDistribution returns the distribution of the non empty docs + // over topics. Compare it against nonEmptyCorpus instead of corpus + JavaPairRDD<Long, Vector> nonEmptyCorpus = corpus.filter( + new Function<Tuple2<Long, Vector>, Boolean>() { + public Boolean call(Tuple2<Long, Vector> tuple2) { + return Vectors.norm(tuple2._2(), 1.0) != 0.0; + } + }); + assertEquals(topicDistributions.count(), nonEmptyCorpus.count()); } @Test diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala index 406affa255..03a8a2538b 100644 --- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala +++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala @@ -99,9 +99,13 @@ class LDASuite extends SparkFunSuite with MLlibTestSparkContext { // Check: per-doc topic distributions val topicDistributions = model.topicDistributions.collect() + // Ensure all documents are covered. - assert(topicDistributions.length === tinyCorpus.length) - assert(tinyCorpus.map(_._1).toSet === topicDistributions.map(_._1).toSet) + // SPARK-5562. since the topicDistribution returns the distribution of the non empty docs + // over topics. Compare it against nonEmptyTinyCorpus instead of tinyCorpus + val nonEmptyTinyCorpus = getNonEmptyDoc(tinyCorpus) + assert(topicDistributions.length === nonEmptyTinyCorpus.length) + assert(nonEmptyTinyCorpus.map(_._1).toSet === topicDistributions.map(_._1).toSet) // Ensure we have proper distributions topicDistributions.foreach { case (docId, topicDistribution) => assert(topicDistribution.size === tinyK) @@ -232,12 +236,17 @@ private[clustering] object LDASuite { } def tinyCorpus: Array[(Long, Vector)] = Array( + Vectors.dense(0, 0, 0, 0, 0), // empty doc Vectors.dense(1, 3, 0, 2, 8), Vectors.dense(0, 2, 1, 0, 4), Vectors.dense(2, 3, 12, 3, 1), + Vectors.dense(0, 0, 0, 0, 0), // empty doc Vectors.dense(0, 3, 1, 9, 8), Vectors.dense(1, 1, 4, 2, 6) ).zipWithIndex.map { case (wordCounts, docId) => (docId.toLong, wordCounts) } assert(tinyCorpus.forall(_._2.size == tinyVocabSize)) // sanity check for test data + def getNonEmptyDoc(corpus: Array[(Long, Vector)]): Array[(Long, Vector)] = corpus.filter { + case (_, wc: Vector) => Vectors.norm(wc, p = 1.0) != 0.0 + } } |