aboutsummaryrefslogblamecommitdiff
path: root/python/pyspark/ml/clustering.py
blob: b90daf16e2f71f14662c2dd4468ff1033f719bad (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
















                                                                          
                                       
                             


                                                       
 
                                                     
                                   

                                                                     














                                                                      


                                                                            








                                                       
                                                                      






































































































                                                                                                   
                                 






                                        

 
                                                             

                           

                           

       
                   



                                                                             







                                                                                            

            
                                                                                          
                                             
       

                                                                




                                                                           
                                    



                                        

                             





                                                                          
                                           



                                          
                                                





                                                               

                           

       

                                                                   


                                                                                           

                                                              

                                                                                            

                 





                                                                                     

                                                                                              
                                                                                      






                                            
                   

                                                                                      
           

                                                                                     





                                             
                   


                                       
           
                                 
 
                   





                                        
                   


                                              
           
                                        
 
                   





                                               
                   


                                               
           
                                         
 
                   






                                                
                                                                      






















                                                                             

                                                                                           




























                                                                                                   











                                                               



                           

                                                                   

                                                                                 

                                                                         































                                                                                             
                                 












                                                             
                                                       











                                                                         




































































































































































































































































































































































































































































































                                                                                                    

                          
                                

                                            
                                                 





                                                                     











                                                                                                

                     
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from pyspark import since, keyword_only
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.mllib.common import inherit_doc

__all__ = ['BisectingKMeans', 'BisectingKMeansModel',
           'KMeans', 'KMeansModel',
           'GaussianMixture', 'GaussianMixtureModel',
           'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel']


class GaussianMixtureModel(JavaModel, JavaMLWritable, JavaMLReadable):
    """
    .. note:: Experimental

    Model fitted by GaussianMixture.

    .. versionadded:: 2.0.0
    """

    @property
    @since("2.0.0")
    def weights(self):
        """
        Weight for each Gaussian distribution in the mixture.
        This is a multinomial probability distribution over the k Gaussians,
        where weights[i] is the weight for Gaussian i, and weights sum to 1.
        """
        return self._call_java("weights")

    @property
    @since("2.0.0")
    def gaussiansDF(self):
        """
        Retrieve Gaussian distributions as a DataFrame.
        Each row represents a Gaussian Distribution.
        The DataFrame has two columns: mean (Vector) and cov (Matrix).
        """
        return self._call_java("gaussiansDF")


@inherit_doc
class GaussianMixture(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
                      HasProbabilityCol, JavaMLWritable, JavaMLReadable):
    """
    .. note:: Experimental

    GaussianMixture clustering.

    >>> from pyspark.mllib.linalg import Vectors

    >>> data = [(Vectors.dense([-0.1, -0.05 ]),),
    ...         (Vectors.dense([-0.01, -0.1]),),
    ...         (Vectors.dense([0.9, 0.8]),),
    ...         (Vectors.dense([0.75, 0.935]),),
    ...         (Vectors.dense([-0.83, -0.68]),),
    ...         (Vectors.dense([-0.91, -0.76]),)]
    >>> df = sqlContext.createDataFrame(data, ["features"])
    >>> gm = GaussianMixture(k=3, tol=0.0001,
    ...                      maxIter=10, seed=10)
    >>> model = gm.fit(df)
    >>> weights = model.weights
    >>> len(weights)
    3
    >>> model.gaussiansDF.show()
    +--------------------+--------------------+
    |                mean|                 cov|
    +--------------------+--------------------+
    |[-0.0550000000000...|0.002025000000000...|
    |[0.82499999999999...|0.005625000000000...|
    |[-0.87,-0.7200000...|0.001600000000000...|
    +--------------------+--------------------+
    ...
    >>> transformed = model.transform(df).select("features", "prediction")
    >>> rows = transformed.collect()
    >>> rows[4].prediction == rows[5].prediction
    True
    >>> rows[2].prediction == rows[3].prediction
    True
    >>> gmm_path = temp_path + "/gmm"
    >>> gm.save(gmm_path)
    >>> gm2 = GaussianMixture.load(gmm_path)
    >>> gm2.getK()
    3
    >>> model_path = temp_path + "/gmm_model"
    >>> model.save(model_path)
    >>> model2 = GaussianMixtureModel.load(model_path)
    >>> model2.weights == model.weights
    True
    >>> model2.gaussiansDF.show()
    +--------------------+--------------------+
    |                mean|                 cov|
    +--------------------+--------------------+
    |[-0.0550000000000...|0.002025000000000...|
    |[0.82499999999999...|0.005625000000000...|
    |[-0.87,-0.7200000...|0.001600000000000...|
    +--------------------+--------------------+
    ...

    .. versionadded:: 2.0.0
    """

    k = Param(Params._dummy(), "k", "number of clusters to create",
              typeConverter=TypeConverters.toInt)

    @keyword_only
    def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
                 probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
        """
        __init__(self, featuresCol="features", predictionCol="prediction", k=2, \
                 probabilityCol="probability", tol=0.01, maxIter=100, seed=None)
        """
        super(GaussianMixture, self).__init__()
        self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
                                            self.uid)
        self._setDefault(k=2, tol=0.01, maxIter=100)
        kwargs = self.__init__._input_kwargs
        self.setParams(**kwargs)

    def _create_model(self, java_model):
        return GaussianMixtureModel(java_model)

    @keyword_only
    @since("2.0.0")
    def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
                  probabilityCol="probability", tol=0.01, maxIter=100, seed=None):
        """
        setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
                  probabilityCol="probability", tol=0.01, maxIter=100, seed=None)

        Sets params for GaussianMixture.
        """
        kwargs = self.setParams._input_kwargs
        return self._set(**kwargs)

    @since("2.0.0")
    def setK(self, value):
        """
        Sets the value of :py:attr:`k`.
        """
        return self._set(k=value)

    @since("2.0.0")
    def getK(self):
        """
        Gets the value of `k`
        """
        return self.getOrDefault(self.k)


class KMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
    """
    Model fitted by KMeans.

    .. versionadded:: 1.5.0
    """

    @since("1.5.0")
    def clusterCenters(self):
        """Get the cluster centers, represented as a list of NumPy arrays."""
        return [c.toArray() for c in self._call_java("clusterCenters")]

    @since("2.0.0")
    def computeCost(self, dataset):
        """
        Return the K-means cost (sum of squared distances of points to their nearest center)
        for this model on the given data.
        """
        return self._call_java("computeCost", dataset)


@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed,
             JavaMLWritable, JavaMLReadable):
    """
    K-means clustering with a k-means++ like initialization mode
    (the k-means|| algorithm by Bahmani et al).

    >>> from pyspark.mllib.linalg import Vectors
    >>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
    ...         (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
    >>> df = sqlContext.createDataFrame(data, ["features"])
    >>> kmeans = KMeans(k=2, seed=1)
    >>> model = kmeans.fit(df)
    >>> centers = model.clusterCenters()
    >>> len(centers)
    2
    >>> model.computeCost(df)
    2.000...
    >>> transformed = model.transform(df).select("features", "prediction")
    >>> rows = transformed.collect()
    >>> rows[0].prediction == rows[1].prediction
    True
    >>> rows[2].prediction == rows[3].prediction
    True
    >>> kmeans_path = temp_path + "/kmeans"
    >>> kmeans.save(kmeans_path)
    >>> kmeans2 = KMeans.load(kmeans_path)
    >>> kmeans2.getK()
    2
    >>> model_path = temp_path + "/kmeans_model"
    >>> model.save(model_path)
    >>> model2 = KMeansModel.load(model_path)
    >>> model.clusterCenters()[0] == model2.clusterCenters()[0]
    array([ True,  True], dtype=bool)
    >>> model.clusterCenters()[1] == model2.clusterCenters()[1]
    array([ True,  True], dtype=bool)

    .. versionadded:: 1.5.0
    """

    k = Param(Params._dummy(), "k", "number of clusters to create",
              typeConverter=TypeConverters.toInt)
    initMode = Param(Params._dummy(), "initMode",
                     "the initialization algorithm. This can be either \"random\" to " +
                     "choose random points as initial cluster centers, or \"k-means||\" " +
                     "to use a parallel variant of k-means++",
                     typeConverter=TypeConverters.toString)
    initSteps = Param(Params._dummy(), "initSteps", "steps for k-means initialization mode",
                      typeConverter=TypeConverters.toInt)

    @keyword_only
    def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
                 initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
        """
        __init__(self, featuresCol="features", predictionCol="prediction", k=2, \
                 initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)
        """
        super(KMeans, self).__init__()
        self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
        self._setDefault(k=2, initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20)
        kwargs = self.__init__._input_kwargs
        self.setParams(**kwargs)

    def _create_model(self, java_model):
        return KMeansModel(java_model)

    @keyword_only
    @since("1.5.0")
    def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
                  initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
        """
        setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
                  initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)

        Sets params for KMeans.
        """
        kwargs = self.setParams._input_kwargs
        return self._set(**kwargs)

    @since("1.5.0")
    def setK(self, value):
        """
        Sets the value of :py:attr:`k`.
        """
        return self._set(k=value)

    @since("1.5.0")
    def getK(self):
        """
        Gets the value of `k`
        """
        return self.getOrDefault(self.k)

    @since("1.5.0")
    def setInitMode(self, value):
        """
        Sets the value of :py:attr:`initMode`.
        """
        return self._set(initMode=value)

    @since("1.5.0")
    def getInitMode(self):
        """
        Gets the value of `initMode`
        """
        return self.getOrDefault(self.initMode)

    @since("1.5.0")
    def setInitSteps(self, value):
        """
        Sets the value of :py:attr:`initSteps`.
        """
        return self._set(initSteps=value)

    @since("1.5.0")
    def getInitSteps(self):
        """
        Gets the value of `initSteps`
        """
        return self.getOrDefault(self.initSteps)


class BisectingKMeansModel(JavaModel, JavaMLWritable, JavaMLReadable):
    """
    .. note:: Experimental

    Model fitted by BisectingKMeans.

    .. versionadded:: 2.0.0
    """

    @since("2.0.0")
    def clusterCenters(self):
        """Get the cluster centers, represented as a list of NumPy arrays."""
        return [c.toArray() for c in self._call_java("clusterCenters")]

    @since("2.0.0")
    def computeCost(self, dataset):
        """
        Computes the sum of squared distances between the input points
        and their corresponding cluster centers.
        """
        return self._call_java("computeCost", dataset)


@inherit_doc
class BisectingKMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasSeed,
                      JavaMLWritable, JavaMLReadable):
    """
    .. note:: Experimental

    A bisecting k-means algorithm based on the paper "A comparison of document clustering
    techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
    The algorithm starts from a single cluster that contains all points.
    Iteratively it finds divisible clusters on the bottom level and bisects each of them using
    k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
    The bisecting steps of clusters on the same level are grouped together to increase parallelism.
    If bisecting all divisible clusters on the bottom level would result more than `k` leaf
    clusters, larger clusters get higher priority.

    >>> from pyspark.mllib.linalg import Vectors
    >>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
    ...         (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
    >>> df = sqlContext.createDataFrame(data, ["features"])
    >>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
    >>> model = bkm.fit(df)
    >>> centers = model.clusterCenters()
    >>> len(centers)
    2
    >>> model.computeCost(df)
    2.000...
    >>> transformed = model.transform(df).select("features", "prediction")
    >>> rows = transformed.collect()
    >>> rows[0].prediction == rows[1].prediction
    True
    >>> rows[2].prediction == rows[3].prediction
    True
    >>> bkm_path = temp_path + "/bkm"
    >>> bkm.save(bkm_path)
    >>> bkm2 = BisectingKMeans.load(bkm_path)
    >>> bkm2.getK()
    2
    >>> model_path = temp_path + "/bkm_model"
    >>> model.save(model_path)
    >>> model2 = BisectingKMeansModel.load(model_path)
    >>> model.clusterCenters()[0] == model2.clusterCenters()[0]
    array([ True,  True], dtype=bool)
    >>> model.clusterCenters()[1] == model2.clusterCenters()[1]
    array([ True,  True], dtype=bool)

    .. versionadded:: 2.0.0
    """

    k = Param(Params._dummy(), "k", "number of clusters to create",
              typeConverter=TypeConverters.toInt)
    minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
                                    "the minimum number of points (if >= 1.0) " +
                                    "or the minimum proportion",
                                    typeConverter=TypeConverters.toFloat)

    @keyword_only
    def __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20,
                 seed=None, k=4, minDivisibleClusterSize=1.0):
        """
        __init__(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
                 seed=None, k=4, minDivisibleClusterSize=1.0)
        """
        super(BisectingKMeans, self).__init__()
        self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
                                            self.uid)
        self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
        kwargs = self.__init__._input_kwargs
        self.setParams(**kwargs)

    @keyword_only
    @since("2.0.0")
    def setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20,
                  seed=None, k=4, minDivisibleClusterSize=1.0):
        """
        setParams(self, featuresCol="features", predictionCol="prediction", maxIter=20, \
                  seed=None, k=4, minDivisibleClusterSize=1.0)
        Sets params for BisectingKMeans.
        """
        kwargs = self.setParams._input_kwargs
        return self._set(**kwargs)

    @since("2.0.0")
    def setK(self, value):
        """
        Sets the value of :py:attr:`k`.
        """
        return self._set(k=value)

    @since("2.0.0")
    def getK(self):
        """
        Gets the value of `k` or its default value.
        """
        return self.getOrDefault(self.k)

    @since("2.0.0")
    def setMinDivisibleClusterSize(self, value):
        """
        Sets the value of :py:attr:`minDivisibleClusterSize`.
        """
        return self._set(minDivisibleClusterSize=value)

    @since("2.0.0")
    def getMinDivisibleClusterSize(self):
        """
        Gets the value of `minDivisibleClusterSize` or its default value.
        """
        return self.getOrDefault(self.minDivisibleClusterSize)

    def _create_model(self, java_model):
        return BisectingKMeansModel(java_model)


@inherit_doc
class LDAModel(JavaModel):
    """
    .. note:: Experimental

    Latent Dirichlet Allocation (LDA) model.
    This abstraction permits for different underlying representations,
    including local and distributed data structures.

    .. versionadded:: 2.0.0
    """

    @since("2.0.0")
    def isDistributed(self):
        """
        Indicates whether this instance is of type DistributedLDAModel
        """
        return self._call_java("isDistributed")

    @since("2.0.0")
    def vocabSize(self):
        """Vocabulary size (number of terms or words in the vocabulary)"""
        return self._call_java("vocabSize")

    @since("2.0.0")
    def topicsMatrix(self):
        """
        Inferred topics, where each topic is represented by a distribution over terms.
        This is a matrix of size vocabSize x k, where each column is a topic.
        No guarantees are given about the ordering of the topics.

        WARNING: If this model is actually a :py:class:`DistributedLDAModel` instance produced by
        the Expectation-Maximization ("em") `optimizer`, then this method could involve
        collecting a large amount of data to the driver (on the order of vocabSize x k).
        """
        return self._call_java("topicsMatrix")

    @since("2.0.0")
    def logLikelihood(self, dataset):
        """
        Calculates a lower bound on the log likelihood of the entire corpus.
        See Equation (16) in the Online LDA paper (Hoffman et al., 2010).

        WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
        :py:attr:`optimizer` is set to "em"), this involves collecting a large
        :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
        """
        return self._call_java("logLikelihood", dataset)

    @since("2.0.0")
    def logPerplexity(self, dataset):
        """
        Calculate an upper bound bound on perplexity.  (Lower is better.)
        See Equation (16) in the Online LDA paper (Hoffman et al., 2010).

        WARNING: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
        :py:attr:`optimizer` is set to "em"), this involves collecting a large
        :py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
        """
        return self._call_java("logPerplexity", dataset)

    @since("2.0.0")
    def describeTopics(self, maxTermsPerTopic=10):
        """
        Return the topics described by their top-weighted terms.
        """
        return self._call_java("describeTopics", maxTermsPerTopic)

    @since("2.0.0")
    def estimatedDocConcentration(self):
        """
        Value for :py:attr:`LDA.docConcentration` estimated from data.
        If Online LDA was used and :py:attr::`LDA.optimizeDocConcentration` was set to false,
        then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
        """
        return self._call_java("estimatedDocConcentration")


@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
    """
    .. note:: Experimental

    Distributed model fitted by :py:class:`LDA`.
    This type of model is currently only produced by Expectation-Maximization (EM).

    This model stores the inferred topics, the full training dataset, and the topic distribution
    for each training document.

    .. versionadded:: 2.0.0
    """

    @since("2.0.0")
    def toLocal(self):
        """
        Convert this distributed model to a local representation.  This discards info about the
        training dataset.

        WARNING: This involves collecting a large :py:func:`topicsMatrix` to the driver.
        """
        return LocalLDAModel(self._call_java("toLocal"))

    @since("2.0.0")
    def trainingLogLikelihood(self):
        """
        Log likelihood of the observed tokens in the training set,
        given the current parameter estimates:
        log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)

        Notes:
          - This excludes the prior; for that, use :py:func:`logPrior`.
          - Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
            the hyperparameters.
          - This is computed from the topic distributions computed during training. If you call
            :py:func:`logLikelihood` on the same training dataset, the topic distributions
            will be computed again, possibly giving different results.
        """
        return self._call_java("trainingLogLikelihood")

    @since("2.0.0")
    def logPrior(self):
        """
        Log probability of the current parameter estimate:
        log P(topics, topic distributions for docs | alpha, eta)
        """
        return self._call_java("logPrior")

    @since("2.0.0")
    def getCheckpointFiles(self):
        """
        If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
        be saved checkpoint files.  This method is provided so that users can manage those files.

        Note that removing the checkpoints can cause failures if a partition is lost and is needed
        by certain :py:class:`DistributedLDAModel` methods.  Reference counting will clean up the
        checkpoints when this model and derivative data go out of scope.

        :return  List of checkpoint files from training
        """
        return self._call_java("getCheckpointFiles")


@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
    """
    .. note:: Experimental

    Local (non-distributed) model fitted by :py:class:`LDA`.
    This model stores the inferred topics only; it does not store info about the training dataset.

    .. versionadded:: 2.0.0
    """
    pass


@inherit_doc
class LDA(JavaEstimator, HasFeaturesCol, HasMaxIter, HasSeed, HasCheckpointInterval,
          JavaMLReadable, JavaMLWritable):
    """
    .. note:: Experimental

    Latent Dirichlet Allocation (LDA), a topic model designed for text documents.

    Terminology:

     - "term" = "word": an el
     - "token": instance of a term appearing in a document
     - "topic": multinomial distribution over terms representing some concept
     - "document": one piece of text, corresponding to one row in the input data

    Original LDA paper (journal version):
      Blei, Ng, and Jordan.  "Latent Dirichlet Allocation."  JMLR, 2003.

    Input data (featuresCol):
    LDA is given a collection of documents as input data, via the featuresCol parameter.
    Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
    count for the corresponding term (word) in the document.  Feature transformers such as
    :py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
    can be useful for converting text to word count vectors.

    >>> from pyspark.mllib.linalg import Vectors, SparseVector
    >>> from pyspark.ml.clustering import LDA
    >>> df = sqlContext.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
    ...      [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
    >>> lda = LDA(k=2, seed=1, optimizer="em")
    >>> model = lda.fit(df)
    >>> model.isDistributed()
    True
    >>> localModel = model.toLocal()
    >>> localModel.isDistributed()
    False
    >>> model.vocabSize()
    2
    >>> model.describeTopics().show()
    +-----+-----------+--------------------+
    |topic|termIndices|         termWeights|
    +-----+-----------+--------------------+
    |    0|     [1, 0]|[0.50401530077160...|
    |    1|     [0, 1]|[0.50401530077160...|
    +-----+-----------+--------------------+
    ...
    >>> model.topicsMatrix()
    DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
    >>> lda_path = temp_path + "/lda"
    >>> lda.save(lda_path)
    >>> sameLDA = LDA.load(lda_path)
    >>> distributed_model_path = temp_path + "/lda_distributed_model"
    >>> model.save(distributed_model_path)
    >>> sameModel = DistributedLDAModel.load(distributed_model_path)
    >>> local_model_path = temp_path + "/lda_local_model"
    >>> localModel.save(local_model_path)
    >>> sameLocalModel = LocalLDAModel.load(local_model_path)

    .. versionadded:: 2.0.0
    """

    k = Param(Params._dummy(), "k", "number of topics (clusters) to infer",
              typeConverter=TypeConverters.toInt)
    optimizer = Param(Params._dummy(), "optimizer",
                      "Optimizer or inference algorithm used to estimate the LDA model.  "
                      "Supported: online, em", typeConverter=TypeConverters.toString)
    learningOffset = Param(Params._dummy(), "learningOffset",
                           "A (positive) learning parameter that downweights early iterations."
                           " Larger values make early iterations count less",
                           typeConverter=TypeConverters.toFloat)
    learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
                          "exponential decay rate. This should be between (0.5, 1.0] to "
                          "guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
    subsamplingRate = Param(Params._dummy(), "subsamplingRate",
                            "Fraction of the corpus to be sampled and used in each iteration "
                            "of mini-batch gradient descent, in range (0, 1].",
                            typeConverter=TypeConverters.toFloat)
    optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
                                     "Indicates whether the docConcentration (Dirichlet parameter "
                                     "for document-topic distribution) will be optimized during "
                                     "training.", typeConverter=TypeConverters.toBoolean)
    docConcentration = Param(Params._dummy(), "docConcentration",
                             "Concentration parameter (commonly named \"alpha\") for the "
                             "prior placed on documents' distributions over topics (\"theta\").",
                             typeConverter=TypeConverters.toListFloat)
    topicConcentration = Param(Params._dummy(), "topicConcentration",
                               "Concentration parameter (commonly named \"beta\" or \"eta\") for "
                               "the prior placed on topic' distributions over terms.",
                               typeConverter=TypeConverters.toFloat)
    topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
                                 "Output column with estimates of the topic mixture distribution "
                                 "for each document (often called \"theta\" in the literature). "
                                 "Returns a vector of zeros for an empty document.",
                                 typeConverter=TypeConverters.toString)
    keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
                               "(For EM optimizer) If using checkpointing, this indicates whether"
                               " to keep the last checkpoint. If false, then the checkpoint will be"
                               " deleted. Deleting the checkpoint can cause failures if a data"
                               " partition is lost, so set this bit with care.",
                               TypeConverters.toBoolean)

    @keyword_only
    def __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
                 k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
                 subsamplingRate=0.05, optimizeDocConcentration=True,
                 docConcentration=None, topicConcentration=None,
                 topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
        """
        __init__(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
                  k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
                  subsamplingRate=0.05, optimizeDocConcentration=True,\
                  docConcentration=None, topicConcentration=None,\
                  topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
        """
        super(LDA, self).__init__()
        self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
        self._setDefault(maxIter=20, checkpointInterval=10,
                         k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
                         subsamplingRate=0.05, optimizeDocConcentration=True,
                         topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
        kwargs = self.__init__._input_kwargs
        self.setParams(**kwargs)

    def _create_model(self, java_model):
        if self.getOptimizer() == "em":
            return DistributedLDAModel(java_model)
        else:
            return LocalLDAModel(java_model)

    @keyword_only
    @since("2.0.0")
    def setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
                  k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
                  subsamplingRate=0.05, optimizeDocConcentration=True,
                  docConcentration=None, topicConcentration=None,
                  topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
        """
        setParams(self, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
                  k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
                  subsamplingRate=0.05, optimizeDocConcentration=True,\
                  docConcentration=None, topicConcentration=None,\
                  topicDistributionCol="topicDistribution", keepLastCheckpoint=True):

        Sets params for LDA.
        """
        kwargs = self.setParams._input_kwargs
        return self._set(**kwargs)

    @since("2.0.0")
    def setK(self, value):
        """
        Sets the value of :py:attr:`k`.

        >>> algo = LDA().setK(10)
        >>> algo.getK()
        10
        """
        return self._set(k=value)

    @since("2.0.0")
    def getK(self):
        """
        Gets the value of :py:attr:`k` or its default value.
        """
        return self.getOrDefault(self.k)

    @since("2.0.0")
    def setOptimizer(self, value):
        """
        Sets the value of :py:attr:`optimizer`.
        Currenlty only support 'em' and 'online'.

        >>> algo = LDA().setOptimizer("em")
        >>> algo.getOptimizer()
        'em'
        """
        return self._set(optimizer=value)

    @since("2.0.0")
    def getOptimizer(self):
        """
        Gets the value of :py:attr:`optimizer` or its default value.
        """
        return self.getOrDefault(self.optimizer)

    @since("2.0.0")
    def setLearningOffset(self, value):
        """
        Sets the value of :py:attr:`learningOffset`.

        >>> algo = LDA().setLearningOffset(100)
        >>> algo.getLearningOffset()
        100.0
        """
        return self._set(learningOffset=value)

    @since("2.0.0")
    def getLearningOffset(self):
        """
        Gets the value of :py:attr:`learningOffset` or its default value.
        """
        return self.getOrDefault(self.learningOffset)

    @since("2.0.0")
    def setLearningDecay(self, value):
        """
        Sets the value of :py:attr:`learningDecay`.

        >>> algo = LDA().setLearningDecay(0.1)
        >>> algo.getLearningDecay()
        0.1...
        """
        return self._set(learningDecay=value)

    @since("2.0.0")
    def getLearningDecay(self):
        """
        Gets the value of :py:attr:`learningDecay` or its default value.
        """
        return self.getOrDefault(self.learningDecay)

    @since("2.0.0")
    def setSubsamplingRate(self, value):
        """
        Sets the value of :py:attr:`subsamplingRate`.

        >>> algo = LDA().setSubsamplingRate(0.1)
        >>> algo.getSubsamplingRate()
        0.1...
        """
        return self._set(subsamplingRate=value)

    @since("2.0.0")
    def getSubsamplingRate(self):
        """
        Gets the value of :py:attr:`subsamplingRate` or its default value.
        """
        return self.getOrDefault(self.subsamplingRate)

    @since("2.0.0")
    def setOptimizeDocConcentration(self, value):
        """
        Sets the value of :py:attr:`optimizeDocConcentration`.

        >>> algo = LDA().setOptimizeDocConcentration(True)
        >>> algo.getOptimizeDocConcentration()
        True
        """
        return self._set(optimizeDocConcentration=value)

    @since("2.0.0")
    def getOptimizeDocConcentration(self):
        """
        Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
        """
        return self.getOrDefault(self.optimizeDocConcentration)

    @since("2.0.0")
    def setDocConcentration(self, value):
        """
        Sets the value of :py:attr:`docConcentration`.

        >>> algo = LDA().setDocConcentration([0.1, 0.2])
        >>> algo.getDocConcentration()
        [0.1..., 0.2...]
        """
        return self._set(docConcentration=value)

    @since("2.0.0")
    def getDocConcentration(self):
        """
        Gets the value of :py:attr:`docConcentration` or its default value.
        """
        return self.getOrDefault(self.docConcentration)

    @since("2.0.0")
    def setTopicConcentration(self, value):
        """
        Sets the value of :py:attr:`topicConcentration`.

        >>> algo = LDA().setTopicConcentration(0.5)
        >>> algo.getTopicConcentration()
        0.5...
        """
        return self._set(topicConcentration=value)

    @since("2.0.0")
    def getTopicConcentration(self):
        """
        Gets the value of :py:attr:`topicConcentration` or its default value.
        """
        return self.getOrDefault(self.topicConcentration)

    @since("2.0.0")
    def setTopicDistributionCol(self, value):
        """
        Sets the value of :py:attr:`topicDistributionCol`.

        >>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
        >>> algo.getTopicDistributionCol()
        'topicDistributionCol'
        """
        return self._set(topicDistributionCol=value)

    @since("2.0.0")
    def getTopicDistributionCol(self):
        """
        Gets the value of :py:attr:`topicDistributionCol` or its default value.
        """
        return self.getOrDefault(self.topicDistributionCol)

    @since("2.0.0")
    def setKeepLastCheckpoint(self, value):
        """
        Sets the value of :py:attr:`keepLastCheckpoint`.

        >>> algo = LDA().setKeepLastCheckpoint(False)
        >>> algo.getKeepLastCheckpoint()
        False
        """
        return self._set(keepLastCheckpoint=value)

    @since("2.0.0")
    def getKeepLastCheckpoint(self):
        """
        Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
        """
        return self.getOrDefault(self.keepLastCheckpoint)


if __name__ == "__main__":
    import doctest
    import pyspark.ml.clustering
    from pyspark.context import SparkContext
    from pyspark.sql import SQLContext
    globs = pyspark.ml.clustering.__dict__.copy()
    # The small batch size here ensures that we see multiple batches,
    # even in these small test examples:
    sc = SparkContext("local[2]", "ml.clustering tests")
    sqlContext = SQLContext(sc)
    globs['sc'] = sc
    globs['sqlContext'] = sqlContext
    import tempfile
    temp_path = tempfile.mkdtemp()
    globs['temp_path'] = temp_path
    try:
        (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
        sc.stop()
    finally:
        from shutil import rmtree
        try:
            rmtree(temp_path)
        except OSError:
            pass
    if failure_count:
        exit(-1)