aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'mllib/src/test')
-rw-r--r--mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java24
-rw-r--r--mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java22
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala13
3 files changed, 59 insertions, 0 deletions
diff --git a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
index d272a42c85..427be9430d 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java
@@ -124,6 +124,10 @@ public class JavaLDASuite implements Serializable {
}
});
assertEquals(topicDistributions.count(), nonEmptyCorpus.count());
+
+ // Check: javaTopTopicsPerDocuments
+ JavaRDD<scala.Tuple3<java.lang.Long, int[], java.lang.Double[]>> topTopics =
+ model.javaTopTopicsPerDocument(3);
}
@Test
@@ -160,11 +164,31 @@ public class JavaLDASuite implements Serializable {
assertEquals(roundedLocalTopicSummary.length, k);
}
+ @Test
+ public void localLdaMethods() {
+ JavaRDD<Tuple2<Long, Vector>> docs = sc.parallelize(toyData, 2);
+ JavaPairRDD<Long, Vector> pairedDocs = JavaPairRDD.fromJavaRDD(docs);
+
+ // check: topicDistributions
+ assertEquals(toyModel.topicDistributions(pairedDocs).count(), pairedDocs.count());
+
+ // check: logPerplexity
+ double logPerplexity = toyModel.logPerplexity(pairedDocs);
+
+ // check: logLikelihood.
+ ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<Tuple2<Long, Vector>>();
+ docsSingleWord.add(new Tuple2<Long, Vector>(Long.valueOf(0), Vectors.dense(1.0, 0.0, 0.0)));
+ JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord));
+ double logLikelihood = toyModel.logLikelihood(single);
+ }
+
private static int tinyK = LDASuite$.MODULE$.tinyK();
private static int tinyVocabSize = LDASuite$.MODULE$.tinyVocabSize();
private static Matrix tinyTopics = LDASuite$.MODULE$.tinyTopics();
private static Tuple2<int[], double[]>[] tinyTopicDescription =
LDASuite$.MODULE$.tinyTopicDescription();
private JavaPairRDD<Long, Vector> corpus;
+ private LocalLDAModel toyModel = LDASuite$.MODULE$.toyModel();
+ private ArrayList<Tuple2<Long, Vector>> toyData = LDASuite$.MODULE$.javaToyData();
}
diff --git a/mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java b/mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java
index 62f7f26b7c..eb4e369862 100644
--- a/mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java
+++ b/mllib/src/test/java/org/apache/spark/mllib/stat/JavaStatisticsSuite.java
@@ -27,7 +27,12 @@ import org.junit.Test;
import static org.junit.Assert.assertEquals;
import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaDoubleRDD;
import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.mllib.linalg.Vectors;
+import org.apache.spark.mllib.regression.LabeledPoint;
+import org.apache.spark.mllib.stat.test.ChiSqTestResult;
+import org.apache.spark.mllib.stat.test.KolmogorovSmirnovTestResult;
public class JavaStatisticsSuite implements Serializable {
private transient JavaSparkContext sc;
@@ -53,4 +58,21 @@ public class JavaStatisticsSuite implements Serializable {
// Check default method
assertEquals(corr1, corr2);
}
+
+ @Test
+ public void kolmogorovSmirnovTest() {
+ JavaDoubleRDD data = sc.parallelizeDoubles(Lists.newArrayList(0.2, 1.0, -1.0, 2.0));
+ KolmogorovSmirnovTestResult testResult1 = Statistics.kolmogorovSmirnovTest(data, "norm");
+ KolmogorovSmirnovTestResult testResult2 = Statistics.kolmogorovSmirnovTest(
+ data, "norm", 0.0, 1.0);
+ }
+
+ @Test
+ public void chiSqTest() {
+ JavaRDD<LabeledPoint> data = sc.parallelize(Lists.newArrayList(
+ new LabeledPoint(0.0, Vectors.dense(0.1, 2.3)),
+ new LabeledPoint(1.0, Vectors.dense(1.5, 5.1)),
+ new LabeledPoint(0.0, Vectors.dense(2.4, 8.1))));
+ ChiSqTestResult[] testResults = Statistics.chiSqTest(data);
+ }
}
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
index ce6a8eb8e8..926185e90b 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/clustering/LDASuite.scala
@@ -17,6 +17,8 @@
package org.apache.spark.mllib.clustering
+import java.util.{ArrayList => JArrayList}
+
import breeze.linalg.{DenseMatrix => BDM, argtopk, max, argmax}
import org.apache.spark.SparkFunSuite
@@ -575,6 +577,17 @@ private[clustering] object LDASuite {
Vectors.sparse(6, Array(4, 5), Array(1, 1))
).zipWithIndex.map { case (wordCounts, docId) => (docId.toLong, wordCounts) }
+ /** Used in the Java Test Suite */
+ def javaToyData: JArrayList[(java.lang.Long, Vector)] = {
+ val javaData = new JArrayList[(java.lang.Long, Vector)]
+ var i = 0
+ while (i < toyData.size) {
+ javaData.add((toyData(i)._1, toyData(i)._2))
+ i += 1
+ }
+ javaData
+ }
+
def toyModel: LocalLDAModel = {
val k = 2
val vocabSize = 6