aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala20
-rw-r--r--python/pyspark/mllib/feature.py4
2 files changed, 13 insertions, 11 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
index a80cca70f4..2ed6c6be1d 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala
@@ -126,13 +126,13 @@ private[python] class PythonMLLibAPI extends Serializable {
k: Int,
maxIterations: Int,
minDivisibleClusterSize: Double,
- seed: Long): BisectingKMeansModel = {
- new BisectingKMeans()
+ seed: java.lang.Long): BisectingKMeansModel = {
+ val kmeans = new BisectingKMeans()
.setK(k)
.setMaxIterations(maxIterations)
.setMinDivisibleClusterSize(minDivisibleClusterSize)
- .setSeed(seed)
- .run(data)
+ if (seed != null) kmeans.setSeed(seed)
+ kmeans.run(data)
}
/**
@@ -678,7 +678,7 @@ private[python] class PythonMLLibAPI extends Serializable {
learningRate: Double,
numPartitions: Int,
numIterations: Int,
- seed: Long,
+ seed: java.lang.Long,
minCount: Int,
windowSize: Int): Word2VecModelWrapper = {
val word2vec = new Word2Vec()
@@ -686,9 +686,9 @@ private[python] class PythonMLLibAPI extends Serializable {
.setLearningRate(learningRate)
.setNumPartitions(numPartitions)
.setNumIterations(numIterations)
- .setSeed(seed)
.setMinCount(minCount)
.setWindowSize(windowSize)
+ if (seed != null) word2vec.setSeed(seed)
try {
val model = word2vec.fit(dataJRDD.rdd.persist(StorageLevel.MEMORY_AND_DISK_SER))
new Word2VecModelWrapper(model)
@@ -751,7 +751,7 @@ private[python] class PythonMLLibAPI extends Serializable {
impurityStr: String,
maxDepth: Int,
maxBins: Int,
- seed: Int): RandomForestModel = {
+ seed: java.lang.Long): RandomForestModel = {
val algo = Algo.fromString(algoStr)
val impurity = Impurities.fromString(impurityStr)
@@ -763,11 +763,13 @@ private[python] class PythonMLLibAPI extends Serializable {
maxBins = maxBins,
categoricalFeaturesInfo = categoricalFeaturesInfo.asScala.toMap)
val cached = data.rdd.persist(StorageLevel.MEMORY_AND_DISK)
+ // Only done because methods below want an int, not an optional Long
+ val intSeed = getSeedOrDefault(seed).toInt
try {
if (algo == Algo.Classification) {
- RandomForest.trainClassifier(cached, strategy, numTrees, featureSubsetStrategy, seed)
+ RandomForest.trainClassifier(cached, strategy, numTrees, featureSubsetStrategy, intSeed)
} else {
- RandomForest.trainRegressor(cached, strategy, numTrees, featureSubsetStrategy, seed)
+ RandomForest.trainRegressor(cached, strategy, numTrees, featureSubsetStrategy, intSeed)
}
} finally {
cached.unpersist(blocking = false)
diff --git a/python/pyspark/mllib/feature.py b/python/pyspark/mllib/feature.py
index 324ba9758e..b32d0c70ec 100644
--- a/python/pyspark/mllib/feature.py
+++ b/python/pyspark/mllib/feature.py
@@ -600,7 +600,7 @@ class Word2Vec(object):
self.learningRate = 0.025
self.numPartitions = 1
self.numIterations = 1
- self.seed = random.randint(0, sys.maxsize)
+ self.seed = None
self.minCount = 5
self.windowSize = 5
@@ -675,7 +675,7 @@ class Word2Vec(object):
raise TypeError("data should be an RDD of list of string")
jmodel = callMLlibFunc("trainWord2VecModel", data, int(self.vectorSize),
float(self.learningRate), int(self.numPartitions),
- int(self.numIterations), int(self.seed),
+ int(self.numIterations), self.seed,
int(self.minCount), int(self.windowSize))
return Word2VecModel(jmodel)