aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2016-12-02 12:16:57 -0800
committerJoseph K. Bradley <joseph@databricks.com>2016-12-02 12:16:57 -0800
commita985dd8e99d2663a3cb4745c675fa2057aa67155 (patch)
tree2655ae0d069ea9642b406618dc9401e60f94ba8a
parent48778976e0566d9c93a8c900825def82c6b81fd6 (diff)
downloadspark-a985dd8e99d2663a3cb4745c675fa2057aa67155.tar.gz
spark-a985dd8e99d2663a3cb4745c675fa2057aa67155.tar.bz2
spark-a985dd8e99d2663a3cb4745c675fa2057aa67155.zip
[SPARK-18291][SPARKR][ML] Revert "[SPARK-18291][SPARKR][ML] SparkR glm predict should output original label when family = binomial."
## What changes were proposed in this pull request? It's better we can fix this issue by providing an option ```type``` for users to change the ```predict``` output schema, then they could output probabilities, log-space predictions, or original labels. In order to not involve breaking API change for 2.1, so revert this change firstly and will add it back after [SPARK-18618](https://issues.apache.org/jira/browse/SPARK-18618) resolved. ## How was this patch tested? Existing unit tests. This reverts commit daa975f4bfa4f904697bf3365a4be9987032e490. Author: Yanbo Liang <ybliang8@gmail.com> Closes #16118 from yanboliang/spark-18291-revert.
-rw-r--r--R/pkg/inst/tests/testthat/test_mllib.R20
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala78
2 files changed, 12 insertions, 86 deletions
diff --git a/R/pkg/inst/tests/testthat/test_mllib.R b/R/pkg/inst/tests/testthat/test_mllib.R
index 0553e704bd..dcfeeb4cd2 100644
--- a/R/pkg/inst/tests/testthat/test_mllib.R
+++ b/R/pkg/inst/tests/testthat/test_mllib.R
@@ -64,16 +64,6 @@ test_that("spark.glm and predict", {
rVals <- predict(glm(Sepal.Width ~ Sepal.Length + Species, data = iris), iris)
expect_true(all(abs(rVals - vals) < 1e-6), rVals - vals)
- # binomial family
- binomialTraining <- training[training$Species %in% c("versicolor", "virginica"), ]
- model <- spark.glm(binomialTraining, Species ~ Sepal_Length + Sepal_Width,
- family = binomial(link = "logit"))
- prediction <- predict(model, binomialTraining)
- expect_equal(typeof(take(select(prediction, "prediction"), 1)$prediction), "character")
- expected <- c("virginica", "virginica", "virginica", "versicolor", "virginica",
- "versicolor", "virginica", "versicolor", "virginica", "versicolor")
- expect_equal(as.list(take(select(prediction, "prediction"), 10))[[1]], expected)
-
# poisson family
model <- spark.glm(training, Sepal_Width ~ Sepal_Length + Species,
family = poisson(link = identity))
@@ -138,10 +128,10 @@ test_that("spark.glm summary", {
expect_equal(stats$aic, rStats$aic)
# Test spark.glm works with weighted dataset
- a1 <- c(0, 1, 2, 3, 4)
- a2 <- c(5, 2, 1, 3, 2)
- w <- c(1, 2, 3, 4, 5)
- b <- c(1, 0, 1, 0, 0)
+ a1 <- c(0, 1, 2, 3)
+ a2 <- c(5, 2, 1, 3)
+ w <- c(1, 2, 3, 4)
+ b <- c(1, 0, 1, 0)
data <- as.data.frame(cbind(a1, a2, w, b))
df <- createDataFrame(data)
@@ -168,7 +158,7 @@ test_that("spark.glm summary", {
data <- as.data.frame(cbind(a1, a2, b))
df <- suppressWarnings(createDataFrame(data))
regStats <- summary(spark.glm(df, b ~ a1 + a2, regParam = 1.0))
- expect_equal(regStats$aic, 14.00976, tolerance = 1e-4) # 14.00976 is from summary() result
+ expect_equal(regStats$aic, 13.32836, tolerance = 1e-4) # 13.32836 is from summary() result
# Test spark.glm works on collinear data
A <- matrix(c(1, 2, 3, 4, 2, 4, 6, 8), 4, 2)
diff --git a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
index 8bcc9fe5d1..78f401f29b 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/r/GeneralizedLinearRegressionWrapper.scala
@@ -23,17 +23,12 @@ import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.ml.{Pipeline, PipelineModel}
-import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NominalAttribute}
-import org.apache.spark.ml.feature.{IndexToString, RFormula}
-import org.apache.spark.ml.regression._
-import org.apache.spark.ml.Transformer
-import org.apache.spark.ml.param.ParamMap
-import org.apache.spark.ml.param.shared._
+import org.apache.spark.ml.attribute.AttributeGroup
+import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.r.RWrapperUtils._
+import org.apache.spark.ml.regression._
import org.apache.spark.ml.util._
import org.apache.spark.sql._
-import org.apache.spark.sql.functions._
-import org.apache.spark.sql.types._
private[r] class GeneralizedLinearRegressionWrapper private (
val pipeline: PipelineModel,
@@ -48,8 +43,6 @@ private[r] class GeneralizedLinearRegressionWrapper private (
val rNumIterations: Int,
val isLoaded: Boolean = false) extends MLWritable {
- import GeneralizedLinearRegressionWrapper._
-
private val glm: GeneralizedLinearRegressionModel =
pipeline.stages(1).asInstanceOf[GeneralizedLinearRegressionModel]
@@ -60,16 +53,7 @@ private[r] class GeneralizedLinearRegressionWrapper private (
def residuals(residualsType: String): DataFrame = glm.summary.residuals(residualsType)
def transform(dataset: Dataset[_]): DataFrame = {
- if (rFamily == "binomial") {
- pipeline.transform(dataset)
- .drop(PREDICTED_LABEL_PROB_COL)
- .drop(PREDICTED_LABEL_INDEX_COL)
- .drop(glm.getFeaturesCol)
- .drop(glm.getLabelCol)
- } else {
- pipeline.transform(dataset)
- .drop(glm.getFeaturesCol)
- }
+ pipeline.transform(dataset).drop(glm.getFeaturesCol)
}
override def write: MLWriter =
@@ -79,10 +63,6 @@ private[r] class GeneralizedLinearRegressionWrapper private (
private[r] object GeneralizedLinearRegressionWrapper
extends MLReadable[GeneralizedLinearRegressionWrapper] {
- val PREDICTED_LABEL_PROB_COL = "pred_label_prob"
- val PREDICTED_LABEL_INDEX_COL = "pred_label_idx"
- val PREDICTED_LABEL_COL = "prediction"
-
def fit(
formula: String,
data: DataFrame,
@@ -93,7 +73,6 @@ private[r] object GeneralizedLinearRegressionWrapper
weightCol: String,
regParam: Double): GeneralizedLinearRegressionWrapper = {
val rFormula = new RFormula().setFormula(formula)
- if (family == "binomial") rFormula.setForceIndexLabel(true)
checkDataColumns(rFormula, data)
val rFormulaModel = rFormula.fit(data)
// get labels and feature names from output schema
@@ -111,28 +90,9 @@ private[r] object GeneralizedLinearRegressionWrapper
.setWeightCol(weightCol)
.setRegParam(regParam)
.setFeaturesCol(rFormula.getFeaturesCol)
- .setLabelCol(rFormula.getLabelCol)
- val pipeline = if (family == "binomial") {
- // Convert prediction from probability to label index.
- val probToPred = new ProbabilityToPrediction()
- .setInputCol(PREDICTED_LABEL_PROB_COL)
- .setOutputCol(PREDICTED_LABEL_INDEX_COL)
- // Convert prediction from label index to original label.
- val labelAttr = Attribute.fromStructField(schema(rFormulaModel.getLabelCol))
- .asInstanceOf[NominalAttribute]
- val labels = labelAttr.values.get
- val idxToStr = new IndexToString()
- .setInputCol(PREDICTED_LABEL_INDEX_COL)
- .setOutputCol(PREDICTED_LABEL_COL)
- .setLabels(labels)
-
- new Pipeline()
- .setStages(Array(rFormulaModel, glr.setPredictionCol(PREDICTED_LABEL_PROB_COL),
- probToPred, idxToStr))
- .fit(data)
- } else {
- new Pipeline().setStages(Array(rFormulaModel, glr)).fit(data)
- }
+ val pipeline = new Pipeline()
+ .setStages(Array(rFormulaModel, glr))
+ .fit(data)
val glm: GeneralizedLinearRegressionModel =
pipeline.stages(1).asInstanceOf[GeneralizedLinearRegressionModel]
@@ -248,27 +208,3 @@ private[r] object GeneralizedLinearRegressionWrapper
}
}
}
-
-/**
- * This utility transformer converts the predicted value of GeneralizedLinearRegressionModel
- * with "binomial" family from probability to prediction according to threshold 0.5.
- */
-private[r] class ProbabilityToPrediction private[r] (override val uid: String)
- extends Transformer with HasInputCol with HasOutputCol with DefaultParamsWritable {
-
- def this() = this(Identifiable.randomUID("probToPred"))
-
- def setInputCol(value: String): this.type = set(inputCol, value)
-
- def setOutputCol(value: String): this.type = set(outputCol, value)
-
- override def transformSchema(schema: StructType): StructType = {
- StructType(schema.fields :+ StructField($(outputCol), DoubleType))
- }
-
- override def transform(dataset: Dataset[_]): DataFrame = {
- dataset.withColumn($(outputCol), round(col($(inputCol))))
- }
-
- override def copy(extra: ParamMap): ProbabilityToPrediction = defaultCopy(extra)
-}