aboutsummaryrefslogtreecommitdiff
path: root/project
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2015-01-17 17:03:07 -0800
committerPatrick Wendell <patrick@databricks.com>2015-01-17 17:03:07 -0800
commit6999910b0c5ef26080f978be1e2bf065f0816ac9 (patch)
tree58a1547368971ea09996f93e34d3992460b2e554 /project
parentc1f3c27f22c75188fbbc718de771ccdd637e4944 (diff)
downloadspark-6999910b0c5ef26080f978be1e2bf065f0816ac9.tar.gz
spark-6999910b0c5ef26080f978be1e2bf065f0816ac9.tar.bz2
spark-6999910b0c5ef26080f978be1e2bf065f0816ac9.zip
[SPARK-5096] Use sbt tasks instead of vals to get hadoop version
This makes it possible to compile spark as an external `ProjectRef` where as now we throw a `FileNotFoundException` Author: Michael Armbrust <michael@databricks.com> Closes #3905 from marmbrus/effectivePom and squashes the following commits: fd63aae [Michael Armbrust] Use sbt tasks instead of vals to get hadoop version.
Diffstat (limited to 'project')
-rw-r--r--project/SparkBuild.scala25
1 files changed, 6 insertions, 19 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index b2c546da21..ded4b5443a 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -114,17 +114,6 @@ object SparkBuild extends PomBuild {
override val userPropertiesMap = System.getProperties.toMap
- // Handle case where hadoop.version is set via profile.
- // Needed only because we read back this property in sbt
- // when we create the assembly jar.
- val pom = loadEffectivePom(new File("pom.xml"),
- profiles = profiles,
- userProps = userPropertiesMap)
- if (System.getProperty("hadoop.version") == null) {
- System.setProperty("hadoop.version",
- pom.getProperties.get("hadoop.version").asInstanceOf[String])
- }
-
lazy val MavenCompile = config("m2r") extend(Compile)
lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy")
@@ -303,16 +292,15 @@ object Assembly {
import sbtassembly.Plugin._
import AssemblyKeys._
+ val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")
+
lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
- jarName in assembly <<= (version, moduleName) map { (v, mName) =>
- if (mName.contains("network-yarn")) {
- // This must match the same name used in maven (see network/yarn/pom.xml)
- "spark-" + v + "-yarn-shuffle.jar"
- } else {
- mName + "-" + v + "-hadoop" + System.getProperty("hadoop.version") + ".jar"
- }
+ hadoopVersion := {
+ sys.props.get("hadoop.version")
+ .getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String])
},
+ jarName in assembly := s"${moduleName.value}-${version.value}-hadoop${hadoopVersion.value}.jar",
mergeStrategy in assembly := {
case PathList("org", "datanucleus", xs @ _*) => MergeStrategy.discard
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
@@ -323,7 +311,6 @@ object Assembly {
case _ => MergeStrategy.first
}
)
-
}
object Unidoc {