aboutsummaryrefslogtreecommitdiff
path: root/python/run-tests
diff options
context:
space:
mode:
authorDavies Liu <davies.liu@gmail.com>2014-10-06 14:07:53 -0700
committerJosh Rosen <joshrosen@apache.org>2014-10-06 14:07:53 -0700
commit4f01265f7d62e070ba42c251255e385644c1b16c (patch)
treee6dbec031ebe0653ab232ac613548289c720eb48 /python/run-tests
parent20ea54cc7a5176ebc63bfa9393a9bf84619bfc66 (diff)
downloadspark-4f01265f7d62e070ba42c251255e385644c1b16c.tar.gz
spark-4f01265f7d62e070ba42c251255e385644c1b16c.tar.bz2
spark-4f01265f7d62e070ba42c251255e385644c1b16c.zip
[SPARK-3786] [PySpark] speedup tests
This patch try to speed up tests of PySpark, re-use the SparkContext in tests.py and mllib/tests.py to reduce the overhead of create SparkContext, remove some test cases, which did not make sense. It also improve the performance of some cases, such as MergerTests and SortTests. before this patch: real 21m27.320s user 4m42.967s sys 0m17.343s after this patch: real 9m47.541s user 2m12.947s sys 0m14.543s It almost cut the time by half. Author: Davies Liu <davies.liu@gmail.com> Closes #2646 from davies/tests and squashes the following commits: c54de60 [Davies Liu] revert change about memory limit 6a2a4b0 [Davies Liu] refactor of tests, speedup 100%
Diffstat (limited to 'python/run-tests')
-rwxr-xr-xpython/run-tests74
1 files changed, 37 insertions, 37 deletions
diff --git a/python/run-tests b/python/run-tests
index a7ec270c7d..c713861eb7 100755
--- a/python/run-tests
+++ b/python/run-tests
@@ -34,7 +34,7 @@ rm -rf metastore warehouse
function run_test() {
echo "Running test: $1"
- SPARK_TESTING=1 "$FWDIR"/bin/pyspark $1 2>&1 | tee -a unit-tests.log
+ SPARK_TESTING=1 time "$FWDIR"/bin/pyspark $1 2>&1 | tee -a unit-tests.log
FAILED=$((PIPESTATUS[0]||$FAILED))
@@ -48,6 +48,37 @@ function run_test() {
fi
}
+function run_core_tests() {
+ echo "Run core tests ..."
+ run_test "pyspark/rdd.py"
+ run_test "pyspark/context.py"
+ run_test "pyspark/conf.py"
+ PYSPARK_DOC_TEST=1 run_test "pyspark/broadcast.py"
+ PYSPARK_DOC_TEST=1 run_test "pyspark/accumulators.py"
+ PYSPARK_DOC_TEST=1 run_test "pyspark/serializers.py"
+ run_test "pyspark/shuffle.py"
+ run_test "pyspark/tests.py"
+}
+
+function run_sql_tests() {
+ echo "Run sql tests ..."
+ run_test "pyspark/sql.py"
+}
+
+function run_mllib_tests() {
+ echo "Run mllib tests ..."
+ run_test "pyspark/mllib/classification.py"
+ run_test "pyspark/mllib/clustering.py"
+ run_test "pyspark/mllib/linalg.py"
+ run_test "pyspark/mllib/random.py"
+ run_test "pyspark/mllib/recommendation.py"
+ run_test "pyspark/mllib/regression.py"
+ run_test "pyspark/mllib/stat.py"
+ run_test "pyspark/mllib/tree.py"
+ run_test "pyspark/mllib/util.py"
+ run_test "pyspark/mllib/tests.py"
+}
+
echo "Running PySpark tests. Output is in python/unit-tests.log."
export PYSPARK_PYTHON="python"
@@ -60,29 +91,9 @@ fi
echo "Testing with Python version:"
$PYSPARK_PYTHON --version
-run_test "pyspark/rdd.py"
-run_test "pyspark/context.py"
-run_test "pyspark/conf.py"
-run_test "pyspark/sql.py"
-# These tests are included in the module-level docs, and so must
-# be handled on a higher level rather than within the python file.
-export PYSPARK_DOC_TEST=1
-run_test "pyspark/broadcast.py"
-run_test "pyspark/accumulators.py"
-run_test "pyspark/serializers.py"
-unset PYSPARK_DOC_TEST
-run_test "pyspark/shuffle.py"
-run_test "pyspark/tests.py"
-run_test "pyspark/mllib/classification.py"
-run_test "pyspark/mllib/clustering.py"
-run_test "pyspark/mllib/linalg.py"
-run_test "pyspark/mllib/random.py"
-run_test "pyspark/mllib/recommendation.py"
-run_test "pyspark/mllib/regression.py"
-run_test "pyspark/mllib/stat.py"
-run_test "pyspark/mllib/tests.py"
-run_test "pyspark/mllib/tree.py"
-run_test "pyspark/mllib/util.py"
+run_core_tests
+run_sql_tests
+run_mllib_tests
# Try to test with PyPy
if [ $(which pypy) ]; then
@@ -90,19 +101,8 @@ if [ $(which pypy) ]; then
echo "Testing with PyPy version:"
$PYSPARK_PYTHON --version
- run_test "pyspark/rdd.py"
- run_test "pyspark/context.py"
- run_test "pyspark/conf.py"
- run_test "pyspark/sql.py"
- # These tests are included in the module-level docs, and so must
- # be handled on a higher level rather than within the python file.
- export PYSPARK_DOC_TEST=1
- run_test "pyspark/broadcast.py"
- run_test "pyspark/accumulators.py"
- run_test "pyspark/serializers.py"
- unset PYSPARK_DOC_TEST
- run_test "pyspark/shuffle.py"
- run_test "pyspark/tests.py"
+ run_core_tests
+ run_sql_tests
fi
if [[ $FAILED == 0 ]]; then