diff options
author | Kousuke Saruta <sarutak@oss.nttdata.co.jp> | 2014-09-03 20:47:00 -0700 |
---|---|---|
committer | Xiangrui Meng <meng@databricks.com> | 2014-09-03 20:47:00 -0700 |
commit | 1bed0a3869a526241381d2a74ba064e5b3721336 (patch) | |
tree | 7e93cdce4ce5ece360d55abf66805ca3d0932be9 /mllib | |
parent | 7c6e71f05f4f5e0cd2d038ee81d1cda4a3e5cb39 (diff) | |
download | spark-1bed0a3869a526241381d2a74ba064e5b3721336.tar.gz spark-1bed0a3869a526241381d2a74ba064e5b3721336.tar.bz2 spark-1bed0a3869a526241381d2a74ba064e5b3721336.zip |
[SPARK-3372] [MLlib] MLlib doesn't pass maven build / checkstyle due to multi-byte character contained in Gradient.scala
Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp>
Closes #2248 from sarutak/SPARK-3372 and squashes the following commits:
73a28b8 [Kousuke Saruta] Replaced UTF-8 hyphen with ascii hyphen
Diffstat (limited to 'mllib')
-rw-r--r-- | mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala index fdd6716011..45dbf6044f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala @@ -128,7 +128,7 @@ class LeastSquaresGradient extends Gradient { class HingeGradient extends Gradient { override def compute(data: Vector, label: Double, weights: Vector): (Vector, Double) = { val dotProduct = dot(data, weights) - // Our loss function with {0, 1} labels is max(0, 1 - (2y – 1) (f_w(x))) + // Our loss function with {0, 1} labels is max(0, 1 - (2y - 1) (f_w(x))) // Therefore the gradient is -(2y - 1)*x val labelScaled = 2 * label - 1.0 if (1.0 > labelScaled * dotProduct) { @@ -146,7 +146,7 @@ class HingeGradient extends Gradient { weights: Vector, cumGradient: Vector): Double = { val dotProduct = dot(data, weights) - // Our loss function with {0, 1} labels is max(0, 1 - (2y – 1) (f_w(x))) + // Our loss function with {0, 1} labels is max(0, 1 - (2y - 1) (f_w(x))) // Therefore the gradient is -(2y - 1)*x val labelScaled = 2 * label - 1.0 if (1.0 > labelScaled * dotProduct) { |