aboutsummaryrefslogtreecommitdiff
path: root/R
diff options
context:
space:
mode:
authorFelix Cheung <felixcheung_m@hotmail.com>2017-04-04 22:32:46 -0700
committerFelix Cheung <felixcheung@apache.org>2017-04-04 22:32:46 -0700
commitc1b8b667506ed95c6c2808e7d3db8463435e73f6 (patch)
tree459d4f24ac83a4add957250029c094df46017d37 /R
parentb28bbffbadf7ebc4349666e8f17111f6fca18c9a (diff)
downloadspark-c1b8b667506ed95c6c2808e7d3db8463435e73f6.tar.gz
spark-c1b8b667506ed95c6c2808e7d3db8463435e73f6.tar.bz2
spark-c1b8b667506ed95c6c2808e7d3db8463435e73f6.zip
[SPARKR][DOC] update doc for fpgrowth
## What changes were proposed in this pull request? minor update zero323 Author: Felix Cheung <felixcheung_m@hotmail.com> Closes #17526 from felixcheung/rfpgrowthfollowup.
Diffstat (limited to 'R')
-rw-r--r--R/pkg/R/mllib_clustering.R6
-rw-r--r--R/pkg/R/mllib_fpm.R4
2 files changed, 5 insertions, 5 deletions
diff --git a/R/pkg/R/mllib_clustering.R b/R/pkg/R/mllib_clustering.R
index 0ebdb5a273..97c9fa1b45 100644
--- a/R/pkg/R/mllib_clustering.R
+++ b/R/pkg/R/mllib_clustering.R
@@ -498,11 +498,7 @@ setMethod("write.ml", signature(object = "KMeansModel", path = "character"),
#' @export
#' @examples
#' \dontrun{
-#' # nolint start
-#' # An example "path/to/file" can be
-#' # paste0(Sys.getenv("SPARK_HOME"), "/data/mllib/sample_lda_libsvm_data.txt")
-#' # nolint end
-#' text <- read.df("path/to/file", source = "libsvm")
+#' text <- read.df("data/mllib/sample_lda_libsvm_data.txt", source = "libsvm")
#' model <- spark.lda(data = text, optimizer = "em")
#'
#' # get a summary of the model
diff --git a/R/pkg/R/mllib_fpm.R b/R/pkg/R/mllib_fpm.R
index 96251b2c7c..dfcb45a1b6 100644
--- a/R/pkg/R/mllib_fpm.R
+++ b/R/pkg/R/mllib_fpm.R
@@ -27,6 +27,10 @@ setClass("FPGrowthModel", slots = list(jobj = "jobj"))
#' FP-growth
#'
#' A parallel FP-growth algorithm to mine frequent itemsets.
+#' \code{spark.fpGrowth} fits a FP-growth model on a SparkDataFrame. Users can
+#' \code{spark.freqItemsets} to get frequent itemsets, \code{spark.associationRules} to get
+#' association rules, \code{predict} to make predictions on new data based on generated association
+#' rules, and \code{write.ml}/\code{read.ml} to save/load fitted models.
#' For more details, see
#' \href{https://spark.apache.org/docs/latest/mllib-frequent-pattern-mining.html#fp-growth}{
#' FP-growth}.