aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/mllib/tests.py
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-04-21 17:49:55 -0700
committerReynold Xin <rxin@databricks.com>2015-04-21 17:49:55 -0700
commit3134c3fe495862b7687b5aa00d3344d09cd5e08e (patch)
treeed556b21bbaad651c7893b6b2dcb53f304100785 /python/pyspark/mllib/tests.py
parente72c16e30d85cdc394d318b5551698885cfda9b8 (diff)
downloadspark-3134c3fe495862b7687b5aa00d3344d09cd5e08e.tar.gz
spark-3134c3fe495862b7687b5aa00d3344d09cd5e08e.tar.bz2
spark-3134c3fe495862b7687b5aa00d3344d09cd5e08e.zip
[SPARK-6953] [PySpark] speed up python tests
This PR try to speed up some python tests: ``` tests.py 144s -> 103s -41s mllib/classification.py 24s -> 17s -7s mllib/regression.py 27s -> 15s -12s mllib/tree.py 27s -> 13s -14s mllib/tests.py 64s -> 31s -33s streaming/tests.py 185s -> 84s -101s ``` Considering python3, the total saving will be 558s (almost 10 minutes) (core, and streaming run three times, mllib runs twice). During testing, it will show used time for each test file: ``` Run core tests ... Running test: pyspark/rdd.py ... ok (22s) Running test: pyspark/context.py ... ok (16s) Running test: pyspark/conf.py ... ok (4s) Running test: pyspark/broadcast.py ... ok (4s) Running test: pyspark/accumulators.py ... ok (4s) Running test: pyspark/serializers.py ... ok (6s) Running test: pyspark/profiler.py ... ok (5s) Running test: pyspark/shuffle.py ... ok (1s) Running test: pyspark/tests.py ... ok (103s) 144s ``` Author: Reynold Xin <rxin@databricks.com> Author: Xiangrui Meng <meng@databricks.com> Closes #5605 from rxin/python-tests-speed and squashes the following commits: d08542d [Reynold Xin] Merge pull request #14 from mengxr/SPARK-6953 89321ee [Xiangrui Meng] fix seed in tests 3ad2387 [Reynold Xin] Merge pull request #5427 from davies/python_tests
Diffstat (limited to 'python/pyspark/mllib/tests.py')
-rw-r--r--python/pyspark/mllib/tests.py69
1 files changed, 36 insertions, 33 deletions
diff --git a/python/pyspark/mllib/tests.py b/python/pyspark/mllib/tests.py
index 8f89e2cee0..1b008b93bc 100644
--- a/python/pyspark/mllib/tests.py
+++ b/python/pyspark/mllib/tests.py
@@ -36,6 +36,7 @@ if sys.version_info[:2] <= (2, 6):
else:
import unittest
+from pyspark import SparkContext
from pyspark.mllib.common import _to_java_object_rdd
from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, _convert_to_vector,\
DenseMatrix, SparseMatrix, Vectors, Matrices
@@ -47,7 +48,6 @@ from pyspark.mllib.feature import IDF
from pyspark.mllib.feature import StandardScaler
from pyspark.serializers import PickleSerializer
from pyspark.sql import SQLContext
-from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
_have_scipy = False
try:
@@ -58,6 +58,12 @@ except:
pass
ser = PickleSerializer()
+sc = SparkContext('local[4]', "MLlib tests")
+
+
+class MLlibTestCase(unittest.TestCase):
+ def setUp(self):
+ self.sc = sc
def _squared_distance(a, b):
@@ -67,7 +73,7 @@ def _squared_distance(a, b):
return b.squared_distance(a)
-class VectorTests(PySparkTestCase):
+class VectorTests(MLlibTestCase):
def _test_serialize(self, v):
self.assertEqual(v, ser.loads(ser.dumps(v)))
@@ -212,7 +218,7 @@ class VectorTests(PySparkTestCase):
self.assertTrue(array_equal(sm.values, [1, 3, 4, 6, 9]))
-class ListTests(PySparkTestCase):
+class ListTests(MLlibTestCase):
"""
Test MLlib algorithms on plain lists, to make sure they're passed through
@@ -255,7 +261,7 @@ class ListTests(PySparkTestCase):
[-6, -7],
])
clusters = GaussianMixture.train(data, 2, convergenceTol=0.001,
- maxIterations=100, seed=56)
+ maxIterations=10, seed=56)
labels = clusters.predict(data).collect()
self.assertEquals(labels[0], labels[1])
self.assertEquals(labels[2], labels[3])
@@ -266,9 +272,9 @@ class ListTests(PySparkTestCase):
y = range(0, 100, 10)
data = self.sc.parallelize([[a, b] for a, b in zip(x, y)])
clusters1 = GaussianMixture.train(data, 5, convergenceTol=0.001,
- maxIterations=100, seed=63)
+ maxIterations=10, seed=63)
clusters2 = GaussianMixture.train(data, 5, convergenceTol=0.001,
- maxIterations=100, seed=63)
+ maxIterations=10, seed=63)
for c1, c2 in zip(clusters1.weights, clusters2.weights):
self.assertEquals(round(c1, 7), round(c2, 7))
@@ -287,13 +293,13 @@ class ListTests(PySparkTestCase):
temp_dir = tempfile.mkdtemp()
- lr_model = LogisticRegressionWithSGD.train(rdd)
+ lr_model = LogisticRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
- svm_model = SVMWithSGD.train(rdd)
+ svm_model = SVMWithSGD.train(rdd, iterations=10)
self.assertTrue(svm_model.predict(features[0]) <= 0)
self.assertTrue(svm_model.predict(features[1]) > 0)
self.assertTrue(svm_model.predict(features[2]) <= 0)
@@ -307,7 +313,7 @@ class ListTests(PySparkTestCase):
categoricalFeaturesInfo = {0: 3} # feature 0 has 3 categories
dt_model = DecisionTree.trainClassifier(
- rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo)
+ rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
@@ -319,7 +325,8 @@ class ListTests(PySparkTestCase):
self.assertEqual(same_dt_model.toDebugString(), dt_model.toDebugString())
rf_model = RandomForest.trainClassifier(
- rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=100)
+ rdd, numClasses=2, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10,
+ maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
@@ -331,7 +338,7 @@ class ListTests(PySparkTestCase):
self.assertEqual(same_rf_model.toDebugString(), rf_model.toDebugString())
gbt_model = GradientBoostedTrees.trainClassifier(
- rdd, categoricalFeaturesInfo=categoricalFeaturesInfo)
+ rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
@@ -360,19 +367,19 @@ class ListTests(PySparkTestCase):
rdd = self.sc.parallelize(data)
features = [p.features.tolist() for p in data]
- lr_model = LinearRegressionWithSGD.train(rdd)
+ lr_model = LinearRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(lr_model.predict(features[0]) <= 0)
self.assertTrue(lr_model.predict(features[1]) > 0)
self.assertTrue(lr_model.predict(features[2]) <= 0)
self.assertTrue(lr_model.predict(features[3]) > 0)
- lasso_model = LassoWithSGD.train(rdd)
+ lasso_model = LassoWithSGD.train(rdd, iterations=10)
self.assertTrue(lasso_model.predict(features[0]) <= 0)
self.assertTrue(lasso_model.predict(features[1]) > 0)
self.assertTrue(lasso_model.predict(features[2]) <= 0)
self.assertTrue(lasso_model.predict(features[3]) > 0)
- rr_model = RidgeRegressionWithSGD.train(rdd)
+ rr_model = RidgeRegressionWithSGD.train(rdd, iterations=10)
self.assertTrue(rr_model.predict(features[0]) <= 0)
self.assertTrue(rr_model.predict(features[1]) > 0)
self.assertTrue(rr_model.predict(features[2]) <= 0)
@@ -380,35 +387,35 @@ class ListTests(PySparkTestCase):
categoricalFeaturesInfo = {0: 2} # feature 0 has 2 categories
dt_model = DecisionTree.trainRegressor(
- rdd, categoricalFeaturesInfo=categoricalFeaturesInfo)
+ rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, maxBins=4)
self.assertTrue(dt_model.predict(features[0]) <= 0)
self.assertTrue(dt_model.predict(features[1]) > 0)
self.assertTrue(dt_model.predict(features[2]) <= 0)
self.assertTrue(dt_model.predict(features[3]) > 0)
rf_model = RandomForest.trainRegressor(
- rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=100, seed=1)
+ rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numTrees=10, maxBins=4, seed=1)
self.assertTrue(rf_model.predict(features[0]) <= 0)
self.assertTrue(rf_model.predict(features[1]) > 0)
self.assertTrue(rf_model.predict(features[2]) <= 0)
self.assertTrue(rf_model.predict(features[3]) > 0)
gbt_model = GradientBoostedTrees.trainRegressor(
- rdd, categoricalFeaturesInfo=categoricalFeaturesInfo)
+ rdd, categoricalFeaturesInfo=categoricalFeaturesInfo, numIterations=4)
self.assertTrue(gbt_model.predict(features[0]) <= 0)
self.assertTrue(gbt_model.predict(features[1]) > 0)
self.assertTrue(gbt_model.predict(features[2]) <= 0)
self.assertTrue(gbt_model.predict(features[3]) > 0)
try:
- LinearRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]))
- LassoWithSGD.train(rdd, initialWeights=array([1.0, 1.0]))
- RidgeRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]))
+ LinearRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
+ LassoWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
+ RidgeRegressionWithSGD.train(rdd, initialWeights=array([1.0, 1.0]), iterations=10)
except ValueError:
self.fail()
-class StatTests(PySparkTestCase):
+class StatTests(MLlibTestCase):
# SPARK-4023
def test_col_with_different_rdds(self):
# numpy
@@ -438,7 +445,7 @@ class StatTests(PySparkTestCase):
self.assertTrue(math.fabs(summary2.normL2()[0] - expectedNormL2) < 1e-14)
-class VectorUDTTests(PySparkTestCase):
+class VectorUDTTests(MLlibTestCase):
dv0 = DenseVector([])
dv1 = DenseVector([1.0, 2.0])
@@ -472,7 +479,7 @@ class VectorUDTTests(PySparkTestCase):
@unittest.skipIf(not _have_scipy, "SciPy not installed")
-class SciPyTests(PySparkTestCase):
+class SciPyTests(MLlibTestCase):
"""
Test both vector operations and MLlib algorithms with SciPy sparse matrices,
@@ -613,7 +620,7 @@ class SciPyTests(PySparkTestCase):
self.assertTrue(dt_model.predict(features[3]) > 0)
-class ChiSqTestTests(PySparkTestCase):
+class ChiSqTestTests(MLlibTestCase):
def test_goodness_of_fit(self):
from numpy import inf
@@ -711,13 +718,13 @@ class ChiSqTestTests(PySparkTestCase):
self.assertIsNotNone(chi[1000])
-class SerDeTest(PySparkTestCase):
+class SerDeTest(MLlibTestCase):
def test_to_java_object_rdd(self): # SPARK-6660
data = RandomRDDs.uniformRDD(self.sc, 10, 5, seed=0)
self.assertEqual(_to_java_object_rdd(data).count(), 10)
-class FeatureTest(PySparkTestCase):
+class FeatureTest(MLlibTestCase):
def test_idf_model(self):
data = [
Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
@@ -730,13 +737,8 @@ class FeatureTest(PySparkTestCase):
self.assertEqual(len(idf), 11)
-class Word2VecTests(PySparkTestCase):
+class Word2VecTests(MLlibTestCase):
def test_word2vec_setters(self):
- data = [
- ["I", "have", "a", "pen"],
- ["I", "like", "soccer", "very", "much"],
- ["I", "live", "in", "Tokyo"]
- ]
model = Word2Vec() \
.setVectorSize(2) \
.setLearningRate(0.01) \
@@ -765,7 +767,7 @@ class Word2VecTests(PySparkTestCase):
self.assertEquals(len(model.getVectors()), 3)
-class StandardScalerTests(PySparkTestCase):
+class StandardScalerTests(MLlibTestCase):
def test_model_setters(self):
data = [
[1.0, 2.0, 3.0],
@@ -793,3 +795,4 @@ if __name__ == "__main__":
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
+ sc.stop()