aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2016-10-26 11:48:54 -0700
committerJoseph K. Bradley <joseph@databricks.com>2016-10-26 11:48:54 -0700
commitea3605e82545031a00235ee0f449e1e2418674e8 (patch)
treec26ab8a517d4210dcce914978b026bc936a24670 /mllib
parent7d10631c16b980adf1f55378c128436310daed65 (diff)
downloadspark-ea3605e82545031a00235ee0f449e1e2418674e8.tar.gz
spark-ea3605e82545031a00235ee0f449e1e2418674e8.tar.bz2
spark-ea3605e82545031a00235ee0f449e1e2418674e8.zip
[MINOR][ML] Refactor clustering summary.
## What changes were proposed in this pull request? Abstract ```ClusteringSummary``` from ```KMeansSummary```, ```GaussianMixtureSummary``` and ```BisectingSummary```, and eliminate duplicated pieces of code. ## How was this patch tested? Existing tests. Author: Yanbo Liang <ybliang8@gmail.com> Closes #15555 from yanboliang/clustering-summary.
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/BisectingKMeans.scala36
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/ClusteringSummary.scala54
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala37
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala36
4 files changed, 80 insertions, 83 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/BisectingKMeans.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/BisectingKMeans.scala
index ef2d918ea3..2718dd93dc 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/BisectingKMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/BisectingKMeans.scala
@@ -288,35 +288,15 @@ object BisectingKMeans extends DefaultParamsReadable[BisectingKMeans] {
* :: Experimental ::
* Summary of BisectingKMeans.
*
- * @param predictions [[DataFrame]] produced by [[BisectingKMeansModel.transform()]]
- * @param predictionCol Name for column of predicted clusters in `predictions`
- * @param featuresCol Name for column of features in `predictions`
- * @param k Number of clusters
+ * @param predictions [[DataFrame]] produced by [[BisectingKMeansModel.transform()]].
+ * @param predictionCol Name for column of predicted clusters in `predictions`.
+ * @param featuresCol Name for column of features in `predictions`.
+ * @param k Number of clusters.
*/
@Since("2.1.0")
@Experimental
class BisectingKMeansSummary private[clustering] (
- @Since("2.1.0") @transient val predictions: DataFrame,
- @Since("2.1.0") val predictionCol: String,
- @Since("2.1.0") val featuresCol: String,
- @Since("2.1.0") val k: Int) extends Serializable {
-
- /**
- * Cluster centers of the transformed data.
- */
- @Since("2.1.0")
- @transient lazy val cluster: DataFrame = predictions.select(predictionCol)
-
- /**
- * Size of (number of data points in) each cluster.
- */
- @Since("2.1.0")
- lazy val clusterSizes: Array[Long] = {
- val sizes = Array.fill[Long](k)(0)
- cluster.groupBy(predictionCol).count().select(predictionCol, "count").collect().foreach {
- case Row(cluster: Int, count: Long) => sizes(cluster) = count
- }
- sizes
- }
-
-}
+ predictions: DataFrame,
+ predictionCol: String,
+ featuresCol: String,
+ k: Int) extends ClusteringSummary(predictions, predictionCol, featuresCol, k)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/ClusteringSummary.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/ClusteringSummary.scala
new file mode 100644
index 0000000000..8b5f525194
--- /dev/null
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/ClusteringSummary.scala
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.ml.clustering
+
+import org.apache.spark.annotation.Experimental
+import org.apache.spark.sql.{DataFrame, Row}
+
+/**
+ * :: Experimental ::
+ * Summary of clustering algorithms.
+ *
+ * @param predictions [[DataFrame]] produced by model.transform().
+ * @param predictionCol Name for column of predicted clusters in `predictions`.
+ * @param featuresCol Name for column of features in `predictions`.
+ * @param k Number of clusters.
+ */
+@Experimental
+class ClusteringSummary private[clustering] (
+ @transient val predictions: DataFrame,
+ val predictionCol: String,
+ val featuresCol: String,
+ val k: Int) extends Serializable {
+
+ /**
+ * Cluster centers of the transformed data.
+ */
+ @transient lazy val cluster: DataFrame = predictions.select(predictionCol)
+
+ /**
+ * Size of (number of data points in) each cluster.
+ */
+ lazy val clusterSizes: Array[Long] = {
+ val sizes = Array.fill[Long](k)(0)
+ cluster.groupBy(predictionCol).count().select(predictionCol, "count").collect().foreach {
+ case Row(cluster: Int, count: Long) => sizes(cluster) = count
+ }
+ sizes
+ }
+}
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
index 69f060ad77..e3cb92f4f1 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala
@@ -356,42 +356,25 @@ object GaussianMixture extends DefaultParamsReadable[GaussianMixture] {
* :: Experimental ::
* Summary of GaussianMixture.
*
- * @param predictions [[DataFrame]] produced by [[GaussianMixtureModel.transform()]]
- * @param predictionCol Name for column of predicted clusters in `predictions`
- * @param probabilityCol Name for column of predicted probability of each cluster in `predictions`
- * @param featuresCol Name for column of features in `predictions`
- * @param k Number of clusters
+ * @param predictions [[DataFrame]] produced by [[GaussianMixtureModel.transform()]].
+ * @param predictionCol Name for column of predicted clusters in `predictions`.
+ * @param probabilityCol Name for column of predicted probability of each cluster
+ * in `predictions`.
+ * @param featuresCol Name for column of features in `predictions`.
+ * @param k Number of clusters.
*/
@Since("2.0.0")
@Experimental
class GaussianMixtureSummary private[clustering] (
- @Since("2.0.0") @transient val predictions: DataFrame,
- @Since("2.0.0") val predictionCol: String,
+ predictions: DataFrame,
+ predictionCol: String,
@Since("2.0.0") val probabilityCol: String,
- @Since("2.0.0") val featuresCol: String,
- @Since("2.0.0") val k: Int) extends Serializable {
-
- /**
- * Cluster centers of the transformed data.
- */
- @Since("2.0.0")
- @transient lazy val cluster: DataFrame = predictions.select(predictionCol)
+ featuresCol: String,
+ k: Int) extends ClusteringSummary(predictions, predictionCol, featuresCol, k) {
/**
* Probability of each cluster.
*/
@Since("2.0.0")
@transient lazy val probability: DataFrame = predictions.select(probabilityCol)
-
- /**
- * Size of (number of data points in) each cluster.
- */
- @Since("2.0.0")
- lazy val clusterSizes: Array[Long] = {
- val sizes = Array.fill[Long](k)(0)
- cluster.groupBy(predictionCol).count().select(predictionCol, "count").collect().foreach {
- case Row(cluster: Int, count: Long) => sizes(cluster) = count
- }
- sizes
- }
}
diff --git a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
index 0d2405b500..05ed3223ae 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/clustering/KMeans.scala
@@ -346,35 +346,15 @@ object KMeans extends DefaultParamsReadable[KMeans] {
* :: Experimental ::
* Summary of KMeans.
*
- * @param predictions [[DataFrame]] produced by [[KMeansModel.transform()]]
- * @param predictionCol Name for column of predicted clusters in `predictions`
- * @param featuresCol Name for column of features in `predictions`
- * @param k Number of clusters
+ * @param predictions [[DataFrame]] produced by [[KMeansModel.transform()]].
+ * @param predictionCol Name for column of predicted clusters in `predictions`.
+ * @param featuresCol Name for column of features in `predictions`.
+ * @param k Number of clusters.
*/
@Since("2.0.0")
@Experimental
class KMeansSummary private[clustering] (
- @Since("2.0.0") @transient val predictions: DataFrame,
- @Since("2.0.0") val predictionCol: String,
- @Since("2.0.0") val featuresCol: String,
- @Since("2.0.0") val k: Int) extends Serializable {
-
- /**
- * Cluster centers of the transformed data.
- */
- @Since("2.0.0")
- @transient lazy val cluster: DataFrame = predictions.select(predictionCol)
-
- /**
- * Size of (number of data points in) each cluster.
- */
- @Since("2.0.0")
- lazy val clusterSizes: Array[Long] = {
- val sizes = Array.fill[Long](k)(0)
- cluster.groupBy(predictionCol).count().select(predictionCol, "count").collect().foreach {
- case Row(cluster: Int, count: Long) => sizes(cluster) = count
- }
- sizes
- }
-
-}
+ predictions: DataFrame,
+ predictionCol: String,
+ featuresCol: String,
+ k: Int) extends ClusteringSummary(predictions, predictionCol, featuresCol, k)