aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiangrui Meng <meng@databricks.com>2014-08-16 15:14:43 -0700
committerXiangrui Meng <meng@databricks.com>2014-08-16 15:14:52 -0700
commita12d3ae3223535e6e4c774e4a289b8b2f2e5228b (patch)
tree38d3aa38fe2ad2720b0a9000de0235e21ce1f3c2
parent0b354be2f9ec35547a60591acf4f4773a4869690 (diff)
downloadspark-a12d3ae3223535e6e4c774e4a289b8b2f2e5228b.tar.gz
spark-a12d3ae3223535e6e4c774e4a289b8b2f2e5228b.tar.bz2
spark-a12d3ae3223535e6e4c774e4a289b8b2f2e5228b.zip
[SPARK-3081][MLLIB] rename RandomRDDGenerators to RandomRDDs
`RandomRDDGenerators` means factory for `RandomRDDGenerator`. However, its methods return RDDs but not RDDGenerators. So a more proper (and shorter) name would be `RandomRDDs`. dorx brkyvz Author: Xiangrui Meng <meng@databricks.com> Closes #1979 from mengxr/randomrdds and squashes the following commits: b161a2d [Xiangrui Meng] rename RandomRDDGenerators to RandomRDDs (cherry picked from commit ac6411c6e75906997c78de23dfdbc8d225b87cfd) Signed-off-by: Xiangrui Meng <meng@databricks.com>
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala (renamed from mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDGenerators.scala)6
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDsSuite.scala (renamed from mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDGeneratorsSuite.scala)16
-rw-r--r--python/pyspark/mllib/random.py25
4 files changed, 24 insertions, 25 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
index 18dc087856..4343124f10 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
@@ -27,7 +27,7 @@ import org.apache.spark.mllib.classification._
import org.apache.spark.mllib.clustering._
import org.apache.spark.mllib.optimization._
import org.apache.spark.mllib.linalg.{Matrix, SparseVector, Vector, Vectors}
-import org.apache.spark.mllib.random.{RandomRDDGenerators => RG}
+import org.apache.spark.mllib.random.{RandomRDDs => RG}
import org.apache.spark.mllib.recommendation._
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.tree.configuration.{Algo, Strategy}
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDGenerators.scala b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
index b0a0593223..3627036952 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDGenerators.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/random/RandomRDDs.scala
@@ -17,6 +17,8 @@
package org.apache.spark.mllib.random
+import scala.reflect.ClassTag
+
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.linalg.Vector
@@ -24,14 +26,12 @@ import org.apache.spark.mllib.rdd.{RandomVectorRDD, RandomRDD}
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
-import scala.reflect.ClassTag
-
/**
* :: Experimental ::
* Generator methods for creating RDDs comprised of i.i.d. samples from some distribution.
*/
@Experimental
-object RandomRDDGenerators {
+object RandomRDDs {
/**
* :: Experimental ::
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDGeneratorsSuite.scala b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDsSuite.scala
index 96e0bc63b0..c50b78bcbc 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDGeneratorsSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomRDDsSuite.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.StatCounter
*
* TODO update tests to use TestingUtils for floating point comparison after PR 1367 is merged
*/
-class RandomRDDGeneratorsSuite extends FunSuite with LocalSparkContext with Serializable {
+class RandomRDDsSuite extends FunSuite with LocalSparkContext with Serializable {
def testGeneratedRDD(rdd: RDD[Double],
expectedSize: Long,
@@ -113,18 +113,18 @@ class RandomRDDGeneratorsSuite extends FunSuite with LocalSparkContext with Seri
val poissonMean = 100.0
for (seed <- 0 until 5) {
- val uniform = RandomRDDGenerators.uniformRDD(sc, size, numPartitions, seed)
+ val uniform = RandomRDDs.uniformRDD(sc, size, numPartitions, seed)
testGeneratedRDD(uniform, size, numPartitions, 0.5, 1 / math.sqrt(12))
- val normal = RandomRDDGenerators.normalRDD(sc, size, numPartitions, seed)
+ val normal = RandomRDDs.normalRDD(sc, size, numPartitions, seed)
testGeneratedRDD(normal, size, numPartitions, 0.0, 1.0)
- val poisson = RandomRDDGenerators.poissonRDD(sc, poissonMean, size, numPartitions, seed)
+ val poisson = RandomRDDs.poissonRDD(sc, poissonMean, size, numPartitions, seed)
testGeneratedRDD(poisson, size, numPartitions, poissonMean, math.sqrt(poissonMean), 0.1)
}
// mock distribution to check that partitions have unique seeds
- val random = RandomRDDGenerators.randomRDD(sc, new MockDistro(), 1000L, 1000, 0L)
+ val random = RandomRDDs.randomRDD(sc, new MockDistro(), 1000L, 1000, 0L)
assert(random.collect.size === random.collect.distinct.size)
}
@@ -135,13 +135,13 @@ class RandomRDDGeneratorsSuite extends FunSuite with LocalSparkContext with Seri
val poissonMean = 100.0
for (seed <- 0 until 5) {
- val uniform = RandomRDDGenerators.uniformVectorRDD(sc, rows, cols, parts, seed)
+ val uniform = RandomRDDs.uniformVectorRDD(sc, rows, cols, parts, seed)
testGeneratedVectorRDD(uniform, rows, cols, parts, 0.5, 1 / math.sqrt(12))
- val normal = RandomRDDGenerators.normalVectorRDD(sc, rows, cols, parts, seed)
+ val normal = RandomRDDs.normalVectorRDD(sc, rows, cols, parts, seed)
testGeneratedVectorRDD(normal, rows, cols, parts, 0.0, 1.0)
- val poisson = RandomRDDGenerators.poissonVectorRDD(sc, poissonMean, rows, cols, parts, seed)
+ val poisson = RandomRDDs.poissonVectorRDD(sc, poissonMean, rows, cols, parts, seed)
testGeneratedVectorRDD(poisson, rows, cols, parts, poissonMean, math.sqrt(poissonMean), 0.1)
}
}
diff --git a/python/pyspark/mllib/random.py b/python/pyspark/mllib/random.py
index eb496688b6..3f3b19053d 100644
--- a/python/pyspark/mllib/random.py
+++ b/python/pyspark/mllib/random.py
@@ -25,8 +25,7 @@ from pyspark.mllib._common import _deserialize_double, _deserialize_double_vecto
from pyspark.serializers import NoOpSerializer
-class RandomRDDGenerators:
-
+class RandomRDDs:
"""
Generator methods for creating RDDs comprised of i.i.d samples from
some distribution.
@@ -40,17 +39,17 @@ class RandomRDDGenerators:
To transform the distribution in the generated RDD from U[0.0, 1.0]
to U[a, b], use
- C{RandomRDDGenerators.uniformRDD(sc, n, p, seed)\
+ C{RandomRDDs.uniformRDD(sc, n, p, seed)\
.map(lambda v: a + (b - a) * v)}
- >>> x = RandomRDDGenerators.uniformRDD(sc, 100).collect()
+ >>> x = RandomRDDs.uniformRDD(sc, 100).collect()
>>> len(x)
100
>>> max(x) <= 1.0 and min(x) >= 0.0
True
- >>> RandomRDDGenerators.uniformRDD(sc, 100, 4).getNumPartitions()
+ >>> RandomRDDs.uniformRDD(sc, 100, 4).getNumPartitions()
4
- >>> parts = RandomRDDGenerators.uniformRDD(sc, 100, seed=4).getNumPartitions()
+ >>> parts = RandomRDDs.uniformRDD(sc, 100, seed=4).getNumPartitions()
>>> parts == sc.defaultParallelism
True
"""
@@ -66,10 +65,10 @@ class RandomRDDGenerators:
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma), use
- C{RandomRDDGenerators.normal(sc, n, p, seed)\
+ C{RandomRDDs.normal(sc, n, p, seed)\
.map(lambda v: mean + sigma * v)}
- >>> x = RandomRDDGenerators.normalRDD(sc, 1000, seed=1L)
+ >>> x = RandomRDDs.normalRDD(sc, 1000, seed=1L)
>>> stats = x.stats()
>>> stats.count()
1000L
@@ -89,7 +88,7 @@ class RandomRDDGenerators:
distribution with the input mean.
>>> mean = 100.0
- >>> x = RandomRDDGenerators.poissonRDD(sc, mean, 1000, seed=1L)
+ >>> x = RandomRDDs.poissonRDD(sc, mean, 1000, seed=1L)
>>> stats = x.stats()
>>> stats.count()
1000L
@@ -110,12 +109,12 @@ class RandomRDDGenerators:
from the uniform distribution on [0.0 1.0].
>>> import numpy as np
- >>> mat = np.matrix(RandomRDDGenerators.uniformVectorRDD(sc, 10, 10).collect())
+ >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect())
>>> mat.shape
(10, 10)
>>> mat.max() <= 1.0 and mat.min() >= 0.0
True
- >>> RandomRDDGenerators.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions()
+ >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions()
4
"""
jrdd = sc._jvm.PythonMLLibAPI() \
@@ -130,7 +129,7 @@ class RandomRDDGenerators:
from the standard normal distribution.
>>> import numpy as np
- >>> mat = np.matrix(RandomRDDGenerators.normalVectorRDD(sc, 100, 100, seed=1L).collect())
+ >>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1L).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
@@ -151,7 +150,7 @@ class RandomRDDGenerators:
>>> import numpy as np
>>> mean = 100.0
- >>> rdd = RandomRDDGenerators.poissonVectorRDD(sc, mean, 100, 100, seed=1L)
+ >>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1L)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)