diff options
author | Marcelo Vanzin <vanzin@cloudera.com> | 2016-04-04 16:52:21 -0700 |
---|---|---|
committer | Josh Rosen <joshrosen@databricks.com> | 2016-04-04 16:52:22 -0700 |
commit | 24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a (patch) | |
tree | 2069beb0e471afa4e1b1867efe786100b7f77f79 /python/pyspark | |
parent | 400b2f863ffaa01a34a8dae1541c61526fef908b (diff) | |
download | spark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.tar.gz spark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.tar.bz2 spark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.zip |
[SPARK-13579][BUILD] Stop building the main Spark assembly.
This change modifies the "assembly/" module to just copy needed
dependencies to its build directory, and modifies the packaging
script to pick those up (and remove duplicate jars packages in the
examples module).
I also made some minor adjustments to dependencies to remove some
test jars from the final packaging, and remove jars that conflict with each
other when packaged separately (e.g. servlet api).
Also note that this change restores guava in applications' classpaths, even
though it's still shaded inside Spark. This is now needed for the Hadoop
libraries that are packaged with Spark, which now are not processed by
the shade plugin.
Author: Marcelo Vanzin <vanzin@cloudera.com>
Closes #11796 from vanzin/SPARK-13579.
Diffstat (limited to 'python/pyspark')
-rw-r--r-- | python/pyspark/streaming/tests.py | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/python/pyspark/streaming/tests.py b/python/pyspark/streaming/tests.py index d010c0e008..148bf7e8ff 100644 --- a/python/pyspark/streaming/tests.py +++ b/python/pyspark/streaming/tests.py @@ -1482,7 +1482,7 @@ def search_kafka_assembly_jar(): raise Exception( ("Failed to find Spark Streaming kafka assembly jar in %s. " % kafka_assembly_dir) + "You need to build Spark with " - "'build/sbt assembly/assembly streaming-kafka-assembly/assembly' or " + "'build/sbt assembly/package streaming-kafka-assembly/assembly' or " "'build/mvn package' before running this test.") elif len(jars) > 1: raise Exception(("Found multiple Spark Streaming Kafka assembly JARs: %s; please " @@ -1548,7 +1548,7 @@ if __name__ == "__main__": elif are_kinesis_tests_enabled is False: sys.stderr.write("Skipping all Kinesis Python tests as the optional Kinesis project was " "not compiled into a JAR. To run these tests, " - "you need to build Spark with 'build/sbt -Pkinesis-asl assembly/assembly " + "you need to build Spark with 'build/sbt -Pkinesis-asl assembly/package " "streaming-kinesis-asl-assembly/assembly' or " "'build/mvn -Pkinesis-asl package' before running this test.") else: @@ -1556,7 +1556,7 @@ if __name__ == "__main__": ("Failed to find Spark Streaming Kinesis assembly jar in %s. " % kinesis_asl_assembly_dir) + "You need to build Spark with 'build/sbt -Pkinesis-asl " - "assembly/assembly streaming-kinesis-asl-assembly/assembly'" + "assembly/package streaming-kinesis-asl-assembly/assembly'" "or 'build/mvn -Pkinesis-asl package' before running this test.") sys.stderr.write("Running tests: %s \n" % (str(testcases))) |