aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/mllib
diff options
context:
space:
mode:
authorYu ISHIKAWA <yuu.ishikawa@gmail.com>2015-11-06 22:56:29 -0800
committerDavies Liu <davies.liu@gmail.com>2015-11-06 22:56:29 -0800
commit2ff0e79a8647cca5c9c57f613a07e739ac4f677e (patch)
tree8f79d410c855c4099f572b4b1eba1fd858e83aff /python/pyspark/mllib
parent7f741905b06ed6d3dfbff6db41a3355dab71aa3c (diff)
downloadspark-2ff0e79a8647cca5c9c57f613a07e739ac4f677e.tar.gz
spark-2ff0e79a8647cca5c9c57f613a07e739ac4f677e.tar.bz2
spark-2ff0e79a8647cca5c9c57f613a07e739ac4f677e.zip
[SPARK-8467] [MLLIB] [PYSPARK] Add LDAModel.describeTopics() in Python
Could jkbradley and davies review it? - Create a wrapper class: `LDAModelWrapper` for `LDAModel`. Because we can't deal with the return value of`describeTopics` in Scala from pyspark directly. `Array[(Array[Int], Array[Double])]` is too complicated to convert it. - Add `loadLDAModel` in `PythonMLlibAPI`. Since `LDAModel` in Scala is an abstract class and we need to call `load` of `DistributedLDAModel`. [[SPARK-8467] Add LDAModel.describeTopics() in Python - ASF JIRA](https://issues.apache.org/jira/browse/SPARK-8467) Author: Yu ISHIKAWA <yuu.ishikawa@gmail.com> Closes #8643 from yu-iskw/SPARK-8467-2.
Diffstat (limited to 'python/pyspark/mllib')
-rw-r--r--python/pyspark/mllib/clustering.py33
1 files changed, 18 insertions, 15 deletions
diff --git a/python/pyspark/mllib/clustering.py b/python/pyspark/mllib/clustering.py
index 8629aa5a17..12081f8c69 100644
--- a/python/pyspark/mllib/clustering.py
+++ b/python/pyspark/mllib/clustering.py
@@ -671,7 +671,7 @@ class StreamingKMeans(object):
return dstream.mapValues(lambda x: self._model.predict(x))
-class LDAModel(JavaModelWrapper):
+class LDAModel(JavaModelWrapper, JavaSaveable, Loader):
""" A clustering model derived from the LDA method.
@@ -691,9 +691,14 @@ class LDAModel(JavaModelWrapper):
... [2, SparseVector(2, {0: 1.0})],
... ]
>>> rdd = sc.parallelize(data)
- >>> model = LDA.train(rdd, k=2)
+ >>> model = LDA.train(rdd, k=2, seed=1)
>>> model.vocabSize()
2
+ >>> model.describeTopics()
+ [([1, 0], [0.5..., 0.49...]), ([0, 1], [0.5..., 0.49...])]
+ >>> model.describeTopics(1)
+ [([1], [0.5...]), ([0], [0.5...])]
+
>>> topics = model.topicsMatrix()
>>> topics_expect = array([[0.5, 0.5], [0.5, 0.5]])
>>> assert_almost_equal(topics, topics_expect, 1)
@@ -724,18 +729,17 @@ class LDAModel(JavaModelWrapper):
"""Vocabulary size (number of terms or terms in the vocabulary)"""
return self.call("vocabSize")
- @since('1.5.0')
- def save(self, sc, path):
- """Save the LDAModel on to disk.
+ @since('1.6.0')
+ def describeTopics(self, maxTermsPerTopic=None):
+ """Return the topics described by weighted terms.
- :param sc: SparkContext
- :param path: str, path to where the model needs to be stored.
+ WARNING: If vocabSize and k are large, this can return a large object!
"""
- if not isinstance(sc, SparkContext):
- raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
- if not isinstance(path, basestring):
- raise TypeError("path should be a basestring, got type %s" % type(path))
- self._java_model.save(sc._jsc.sc(), path)
+ if maxTermsPerTopic is None:
+ topics = self.call("describeTopics")
+ else:
+ topics = self.call("describeTopics", maxTermsPerTopic)
+ return topics
@classmethod
@since('1.5.0')
@@ -749,9 +753,8 @@ class LDAModel(JavaModelWrapper):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
- java_model = sc._jvm.org.apache.spark.mllib.clustering.DistributedLDAModel.load(
- sc._jsc.sc(), path)
- return cls(java_model)
+ model = callMLlibFunc("loadLDAModel", sc, path)
+ return LDAModel(model)
class LDA(object):