aboutsummaryrefslogtreecommitdiff
path: root/project/SparkBuild.scala
diff options
context:
space:
mode:
authorMarcelo Vanzin <vanzin@cloudera.com>2016-04-04 16:52:21 -0700
committerJosh Rosen <joshrosen@databricks.com>2016-04-04 16:52:22 -0700
commit24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a (patch)
tree2069beb0e471afa4e1b1867efe786100b7f77f79 /project/SparkBuild.scala
parent400b2f863ffaa01a34a8dae1541c61526fef908b (diff)
downloadspark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.tar.gz
spark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.tar.bz2
spark-24d7d2e453ab5eef6099a32fb9e8ed60f6ada93a.zip
[SPARK-13579][BUILD] Stop building the main Spark assembly.
This change modifies the "assembly/" module to just copy needed dependencies to its build directory, and modifies the packaging script to pick those up (and remove duplicate jars packages in the examples module). I also made some minor adjustments to dependencies to remove some test jars from the final packaging, and remove jars that conflict with each other when packaged separately (e.g. servlet api). Also note that this change restores guava in applications' classpaths, even though it's still shaded inside Spark. This is now needed for the Hadoop libraries that are packaged with Spark, which now are not processed by the shade plugin. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #11796 from vanzin/SPARK-13579.
Diffstat (limited to 'project/SparkBuild.scala')
-rw-r--r--project/SparkBuild.scala45
1 files changed, 22 insertions, 23 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 5d62b688b9..b32480b164 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -57,11 +57,12 @@ object BuildCommons {
Seq("yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl",
"docker-integration-tests").map(ProjectRef(buildLocation, _))
- val assemblyProjects@Seq(assembly, networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKinesisAslAssembly) =
- Seq("assembly", "network-yarn", "streaming-flume-assembly", "streaming-kafka-assembly", "streaming-kinesis-asl-assembly")
+ val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKinesisAslAssembly) =
+ Seq("network-yarn", "streaming-flume-assembly", "streaming-kafka-assembly", "streaming-kinesis-asl-assembly")
.map(ProjectRef(buildLocation, _))
- val copyJarsProjects@Seq(examples) = Seq("examples").map(ProjectRef(buildLocation, _))
+ val copyJarsProjects@Seq(assembly, examples) = Seq("assembly", "examples")
+ .map(ProjectRef(buildLocation, _))
val tools = ProjectRef(buildLocation, "tools")
// Root project.
@@ -263,8 +264,14 @@ object SparkBuild extends PomBuild {
/* Unsafe settings */
enable(Unsafe.settings)(unsafe)
- /* Set up tasks to copy dependencies during packaging. */
- copyJarsProjects.foreach(enable(CopyDependencies.settings))
+ /*
+ * Set up tasks to copy dependencies during packaging. This step can be disabled in the command
+ * line, so that dev/mima can run without trying to copy these files again and potentially
+ * causing issues.
+ */
+ if (!"false".equals(System.getProperty("copyDependencies"))) {
+ copyJarsProjects.foreach(enable(CopyDependencies.settings))
+ }
/* Enable Assembly for all assembly projects */
assemblyProjects.foreach(enable(Assembly.settings))
@@ -477,8 +484,6 @@ object Assembly {
val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")
- val deployDatanucleusJars = taskKey[Unit]("Deploy datanucleus jars to the spark/lib_managed/jars directory")
-
lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
hadoopVersion := {
@@ -497,27 +502,13 @@ object Assembly {
s"${mName}-test-${v}.jar"
},
mergeStrategy in assembly := {
- case PathList("org", "datanucleus", xs @ _*) => MergeStrategy.discard
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case m if m.toLowerCase.matches("meta-inf.*\\.sf$") => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.discard
case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
case "reference.conf" => MergeStrategy.concat
case _ => MergeStrategy.first
- },
- deployDatanucleusJars := {
- val jars: Seq[File] = (fullClasspath in assembly).value.map(_.data)
- .filter(_.getPath.contains("org.datanucleus"))
- var libManagedJars = new File(BuildCommons.sparkHome, "lib_managed/jars")
- libManagedJars.mkdirs()
- jars.foreach { jar =>
- val dest = new File(libManagedJars, jar.getName)
- if (!dest.exists()) {
- Files.copy(jar.toPath, dest.toPath)
- }
- }
- },
- assembly <<= assembly.dependsOn(deployDatanucleusJars)
+ }
)
}
@@ -698,6 +689,13 @@ object Java8TestSettings {
object TestSettings {
import BuildCommons._
+ private val scalaBinaryVersion =
+ if (System.getProperty("scala-2.10") == "true") {
+ "2.10"
+ } else {
+ "2.11"
+ }
+
lazy val settings = Seq (
// Fork new JVMs for tests and set Java options for those
fork := true,
@@ -707,6 +705,7 @@ object TestSettings {
"SPARK_DIST_CLASSPATH" ->
(fullClasspath in Test).value.files.map(_.getAbsolutePath).mkString(":").stripSuffix(":"),
"SPARK_PREPEND_CLASSES" -> "1",
+ "SPARK_SCALA_VERSION" -> scalaBinaryVersion,
"SPARK_TESTING" -> "1",
"JAVA_HOME" -> sys.env.get("JAVA_HOME").getOrElse(sys.props("java.home"))),
javaOptions in Test += s"-Djava.io.tmpdir=$testTempDir",
@@ -744,7 +743,7 @@ object TestSettings {
// Make sure the test temp directory exists.
resourceGenerators in Test <+= resourceManaged in Test map { outDir: File =>
if (!new File(testTempDir).isDirectory()) {
- require(new File(testTempDir).mkdirs())
+ require(new File(testTempDir).mkdirs(), s"Error creating temp directory $testTempDir.")
}
Seq[File]()
},