aboutsummaryrefslogtreecommitdiff
path: root/mllib
diff options
context:
space:
mode:
authorCodingCat <zhunansjtu@gmail.com>2014-04-18 10:01:16 -0700
committerReynold Xin <rxin@apache.org>2014-04-18 10:01:16 -0700
commite31c8ffca65e0e5cd5f1a6229f3d654a24b7b18c (patch)
treeb0923d192066b8f44bad5047f0ca03719af5c789 /mllib
parent7863ecca35be9af1eca0dfe5fd8806c5dd710fd6 (diff)
downloadspark-e31c8ffca65e0e5cd5f1a6229f3d654a24b7b18c.tar.gz
spark-e31c8ffca65e0e5cd5f1a6229f3d654a24b7b18c.tar.bz2
spark-e31c8ffca65e0e5cd5f1a6229f3d654a24b7b18c.zip
SPARK-1483: Rename minSplits to minPartitions in public APIs
https://issues.apache.org/jira/browse/SPARK-1483 From the original JIRA: " The parameter name is part of the public API in Scala and Python, since you can pass named parameters to a method, so we should name it to this more descriptive term. Everywhere else we refer to "splits" as partitions." - @mateiz Author: CodingCat <zhunansjtu@gmail.com> Closes #430 from CodingCat/SPARK-1483 and squashes the following commits: 4b60541 [CodingCat] deprecate defaultMinSplits ba2c663 [CodingCat] Rename minSplits to minPartitions in public APIs
Diffstat (limited to 'mllib')
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala12
1 files changed, 6 insertions, 6 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index 2f3ac10397..3d6e7e0d5c 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -57,7 +57,7 @@ object MLUtils {
* @param labelParser parser for labels, default: 1.0 if label > 0.5 or 0.0 otherwise
* @param numFeatures number of features, which will be determined from the input data if a
* negative value is given. The default value is -1.
- * @param minSplits min number of partitions, default: sc.defaultMinSplits
+ * @param minPartitions min number of partitions, default: sc.defaultMinPartitions
* @return labeled data stored as an RDD[LabeledPoint]
*/
def loadLibSVMData(
@@ -65,8 +65,8 @@ object MLUtils {
path: String,
labelParser: LabelParser,
numFeatures: Int,
- minSplits: Int): RDD[LabeledPoint] = {
- val parsed = sc.textFile(path, minSplits)
+ minPartitions: Int): RDD[LabeledPoint] = {
+ val parsed = sc.textFile(path, minPartitions)
.map(_.trim)
.filter(!_.isEmpty)
.map(_.split(' '))
@@ -101,7 +101,7 @@ object MLUtils {
* with number of features determined automatically and the default number of partitions.
*/
def loadLibSVMData(sc: SparkContext, path: String): RDD[LabeledPoint] =
- loadLibSVMData(sc, path, BinaryLabelParser, -1, sc.defaultMinSplits)
+ loadLibSVMData(sc, path, BinaryLabelParser, -1, sc.defaultMinPartitions)
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint],
@@ -112,7 +112,7 @@ object MLUtils {
sc: SparkContext,
path: String,
labelParser: LabelParser): RDD[LabeledPoint] =
- loadLibSVMData(sc, path, labelParser, -1, sc.defaultMinSplits)
+ loadLibSVMData(sc, path, labelParser, -1, sc.defaultMinPartitions)
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint],
@@ -124,7 +124,7 @@ object MLUtils {
path: String,
labelParser: LabelParser,
numFeatures: Int): RDD[LabeledPoint] =
- loadLibSVMData(sc, path, labelParser, numFeatures, sc.defaultMinSplits)
+ loadLibSVMData(sc, path, labelParser, numFeatures, sc.defaultMinPartitions)
/**
* :: Experimental ::