aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2015-02-16 22:09:04 -0800
committerXiangrui Meng <meng@databricks.com>2015-02-16 22:09:04 -0800
commitfd84229e2aeb6a03760703c9dccd2db853779400 (patch)
tree302d59e173bb16499ca4223aff88d136cf7d9ceb /mllib
parentd380f324c6d38ffacfda83a525a1a7e23347e5b8 (diff)
downloadspark-fd84229e2aeb6a03760703c9dccd2db853779400.tar.gz
spark-fd84229e2aeb6a03760703c9dccd2db853779400.tar.bz2
spark-fd84229e2aeb6a03760703c9dccd2db853779400.zip
[SPARK-5802][MLLIB] cache transformed data in glm
If we need to transform the input data, we should cache the output to avoid re-computing feature vectors every iteration. dbtsai Author: Xiangrui Meng <meng@databricks.com> Closes #4593 from mengxr/SPARK-5802 and squashes the following commits: ae3be84 [Xiangrui Meng] cache transformed data in glm
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala29
1 files changed, 15 insertions, 14 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala
index 17de215b97..2b7145362a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala
@@ -205,7 +205,7 @@ abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel]
throw new SparkException("Input validation failed.")
}
- /**
+ /*
* Scaling columns to unit variance as a heuristic to reduce the condition number:
*
* During the optimization process, the convergence (rate) depends on the condition number of
@@ -225,26 +225,27 @@ abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel]
* Currently, it's only enabled in LogisticRegressionWithLBFGS
*/
val scaler = if (useFeatureScaling) {
- (new StandardScaler(withStd = true, withMean = false)).fit(input.map(x => x.features))
+ new StandardScaler(withStd = true, withMean = false).fit(input.map(_.features))
} else {
null
}
// Prepend an extra variable consisting of all 1.0's for the intercept.
- val data = if (addIntercept) {
- if (useFeatureScaling) {
- input.map(labeledPoint =>
- (labeledPoint.label, appendBias(scaler.transform(labeledPoint.features))))
- } else {
- input.map(labeledPoint => (labeledPoint.label, appendBias(labeledPoint.features)))
- }
- } else {
- if (useFeatureScaling) {
- input.map(labeledPoint => (labeledPoint.label, scaler.transform(labeledPoint.features)))
+ // TODO: Apply feature scaling to the weight vector instead of input data.
+ val data =
+ if (addIntercept) {
+ if (useFeatureScaling) {
+ input.map(lp => (lp.label, appendBias(scaler.transform(lp.features)))).cache()
+ } else {
+ input.map(lp => (lp.label, appendBias(lp.features))).cache()
+ }
} else {
- input.map(labeledPoint => (labeledPoint.label, labeledPoint.features))
+ if (useFeatureScaling) {
+ input.map(lp => (lp.label, scaler.transform(lp.features))).cache()
+ } else {
+ input.map(lp => (lp.label, lp.features))
+ }
}
- }
/**
* TODO: For better convergence, in logistic regression, the intercepts should be computed