aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorZheng RuiFeng <ruifengz@foxmail.com>2016-05-15 15:59:49 +0100
committerSean Owen <sowen@cloudera.com>2016-05-15 15:59:49 +0100
commitc7efc56c7b6fc99c005b35c335716ff676856c6c (patch)
tree09412bc7bec3167e8ec15dc8293043e1ab7abbc5 /mllib
parentf5576a052da0bb59343bc2a6b6ce06c6abaac75b (diff)
downloadspark-c7efc56c7b6fc99c005b35c335716ff676856c6c.tar.gz
spark-c7efc56c7b6fc99c005b35c335716ff676856c6c.tar.bz2
spark-c7efc56c7b6fc99c005b35c335716ff676856c6c.zip
[MINOR] Fix Typos
## What changes were proposed in this pull request? 1,Rename matrix args in BreezeUtil to upper to match the doc 2,Fix several typos in ML and SQL ## How was this patch tested? manual tests Author: Zheng RuiFeng <ruifengz@foxmail.com> Closes #13078 from zhengruifeng/fix_ann.
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/ann/BreezeUtil.scala33
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala36
2 files changed, 36 insertions, 33 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/BreezeUtil.scala b/mllib/src/main/scala/org/apache/spark/ml/ann/BreezeUtil.scala
index 7429f9d652..6bbe7e1cb2 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/ann/BreezeUtil.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/ann/BreezeUtil.scala
@@ -26,38 +26,39 @@ import com.github.fommil.netlib.BLAS.{getInstance => NativeBLAS}
private[ann] object BreezeUtil {
// TODO: switch to MLlib BLAS interface
- private def transposeString(a: BDM[Double]): String = if (a.isTranspose) "T" else "N"
+ private def transposeString(A: BDM[Double]): String = if (A.isTranspose) "T" else "N"
/**
* DGEMM: C := alpha * A * B + beta * C
* @param alpha alpha
- * @param a A
- * @param b B
+ * @param A A
+ * @param B B
* @param beta beta
- * @param c C
+ * @param C C
*/
- def dgemm(alpha: Double, a: BDM[Double], b: BDM[Double], beta: Double, c: BDM[Double]): Unit = {
+ def dgemm(alpha: Double, A: BDM[Double], B: BDM[Double], beta: Double, C: BDM[Double]): Unit = {
// TODO: add code if matrices isTranspose!!!
- require(a.cols == b.rows, "A & B Dimension mismatch!")
- require(a.rows == c.rows, "A & C Dimension mismatch!")
- require(b.cols == c.cols, "A & C Dimension mismatch!")
- NativeBLAS.dgemm(transposeString(a), transposeString(b), c.rows, c.cols, a.cols,
- alpha, a.data, a.offset, a.majorStride, b.data, b.offset, b.majorStride,
- beta, c.data, c.offset, c.rows)
+ require(A.cols == B.rows, "A & B Dimension mismatch!")
+ require(A.rows == C.rows, "A & C Dimension mismatch!")
+ require(B.cols == C.cols, "A & C Dimension mismatch!")
+ NativeBLAS.dgemm(transposeString(A), transposeString(B), C.rows, C.cols, A.cols,
+ alpha, A.data, A.offset, A.majorStride, B.data, B.offset, B.majorStride,
+ beta, C.data, C.offset, C.rows)
}
/**
* DGEMV: y := alpha * A * x + beta * y
* @param alpha alpha
- * @param a A
+ * @param A A
* @param x x
* @param beta beta
* @param y y
*/
- def dgemv(alpha: Double, a: BDM[Double], x: BDV[Double], beta: Double, y: BDV[Double]): Unit = {
- require(a.cols == x.length, "A & b Dimension mismatch!")
- NativeBLAS.dgemv(transposeString(a), a.rows, a.cols,
- alpha, a.data, a.offset, a.majorStride, x.data, x.offset, x.stride,
+ def dgemv(alpha: Double, A: BDM[Double], x: BDV[Double], beta: Double, y: BDV[Double]): Unit = {
+ require(A.cols == x.length, "A & x Dimension mismatch!")
+ require(A.rows == y.length, "A & y Dimension mismatch!")
+ NativeBLAS.dgemv(transposeString(A), A.rows, A.cols,
+ alpha, A.data, A.offset, A.majorStride, x.data, x.offset, x.stride,
beta, y.data, y.offset, y.stride)
}
}
diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
index 3588ac1e95..a27ee51874 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
@@ -64,8 +64,9 @@ private[ann] trait Layer extends Serializable {
* @return the layer model
*/
def createModel(initialWeights: BDV[Double]): LayerModel
+
/**
- * Returns the instance of the layer with random generated weights
+ * Returns the instance of the layer with random generated weights.
*
* @param weights vector for weights initialization, must be equal to weightSize
* @param random random number generator
@@ -83,11 +84,11 @@ private[ann] trait LayerModel extends Serializable {
val weights: BDV[Double]
/**
- * Evaluates the data (process the data through the layer)
+ * Evaluates the data (process the data through the layer).
* Output is allocated based on the size provided by the
- * LayerModel implementation and the stack (batch) size
+ * LayerModel implementation and the stack (batch) size.
* Developer is responsible for checking the size of output
- * when writing to it
+ * when writing to it.
*
* @param data data
* @param output output (modified in place)
@@ -95,11 +96,11 @@ private[ann] trait LayerModel extends Serializable {
def eval(data: BDM[Double], output: BDM[Double]): Unit
/**
- * Computes the delta for back propagation
+ * Computes the delta for back propagation.
* Delta is allocated based on the size provided by the
- * LayerModel implementation and the stack (batch) size
+ * LayerModel implementation and the stack (batch) size.
* Developer is responsible for checking the size of
- * prevDelta when writing to it
+ * prevDelta when writing to it.
*
* @param delta delta of this layer
* @param output output of this layer
@@ -108,10 +109,10 @@ private[ann] trait LayerModel extends Serializable {
def computePrevDelta(delta: BDM[Double], output: BDM[Double], prevDelta: BDM[Double]): Unit
/**
- * Computes the gradient
- * cumGrad is a wrapper on the part of the weight vector
- * size of cumGrad is based on weightSize provided by
- * implementation of LayerModel
+ * Computes the gradient.
+ * cumGrad is a wrapper on the part of the weight vector.
+ * Size of cumGrad is based on weightSize provided by
+ * implementation of LayerModel.
*
* @param delta delta for this layer
* @param input input data
@@ -197,11 +198,11 @@ private[ann] object AffineLayerModel {
}
/**
- * Initialize weights randomly in the interval
- * Uses [Bottou-88] heuristic [-a/sqrt(in); a/sqrt(in)]
- * where a is chosen in a such way that the weight variance corresponds
+ * Initialize weights randomly in the interval.
+ * Uses [Bottou-88] heuristic [-a/sqrt(in); a/sqrt(in)],
+ * where `a` is chosen in such a way that the weight variance corresponds
* to the points to the maximal curvature of the activation function
- * (which is approximately 2.38 for a standard sigmoid)
+ * (which is approximately 2.38 for a standard sigmoid).
*
* @param numIn number of inputs
* @param numOut number of outputs
@@ -306,7 +307,7 @@ private[ann] class FunctionalLayer (val activationFunction: ActivationFunction)
/**
* Functional layer model. Holds no weights.
*
- * @param layer functiona layer
+ * @param layer functional layer
*/
private[ann] class FunctionalLayerModel private[ann] (val layer: FunctionalLayer)
extends LayerModel {
@@ -352,6 +353,7 @@ private[ann] trait TopologyModel extends Serializable {
* Array of layer models
*/
val layerModels: Array[LayerModel]
+
/**
* Forward propagation
*
@@ -410,7 +412,7 @@ private[ml] object FeedForwardTopology {
* Creates a multi-layer perceptron
*
* @param layerSizes sizes of layers including input and output size
- * @param softmaxOnTop wether to use SoftMax or Sigmoid function for an output layer.
+ * @param softmaxOnTop whether to use SoftMax or Sigmoid function for an output layer.
* Softmax is default
* @return multilayer perceptron topology
*/