aboutsummaryrefslogtreecommitdiff
path: root/project/SparkBuild.scala
diff options
context:
space:
mode:
authorShixiong Zhu <shixiong@databricks.com>2016-03-25 17:37:16 -0700
committerReynold Xin <rxin@databricks.com>2016-03-25 17:37:16 -0700
commit24587ce433aa30f30a5d1ed6566365f24c222a27 (patch)
treeb1ff8ffa17b643d3e833be33debecf209d20ff6d /project/SparkBuild.scala
parent54d13bed87fcf2968f77e1f1153e85184ec91d78 (diff)
downloadspark-24587ce433aa30f30a5d1ed6566365f24c222a27.tar.gz
spark-24587ce433aa30f30a5d1ed6566365f24c222a27.tar.bz2
spark-24587ce433aa30f30a5d1ed6566365f24c222a27.zip
[SPARK-14073][STREAMING][TEST-MAVEN] Move flume back to Spark
## What changes were proposed in this pull request? This PR moves flume back to Spark as per the discussion in the dev mail-list. ## How was this patch tested? Existing Jenkins tests. Author: Shixiong Zhu <shixiong@databricks.com> Closes #11895 from zsxwing/move-flume-back.
Diffstat (limited to 'project/SparkBuild.scala')
-rw-r--r--project/SparkBuild.scala22
1 files changed, 14 insertions, 8 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index dbe98d1e14..fb229b979d 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -39,9 +39,9 @@ object BuildCommons {
).map(ProjectRef(buildLocation, _))
val streamingProjects@Seq(
- streaming, streamingKafka
+ streaming, streamingFlumeSink, streamingFlume, streamingKafka
) = Seq(
- "streaming", "streaming-kafka"
+ "streaming", "streaming-flume-sink", "streaming-flume", "streaming-kafka"
).map(ProjectRef(buildLocation, _))
val allProjects@Seq(
@@ -56,8 +56,8 @@ object BuildCommons {
Seq("yarn", "java8-tests", "ganglia-lgpl", "streaming-kinesis-asl",
"docker-integration-tests").map(ProjectRef(buildLocation, _))
- val assemblyProjects@Seq(assembly, networkYarn, streamingKafkaAssembly, streamingKinesisAslAssembly) =
- Seq("assembly", "network-yarn", "streaming-kafka-assembly", "streaming-kinesis-asl-assembly")
+ val assemblyProjects@Seq(assembly, networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKinesisAslAssembly) =
+ Seq("assembly", "network-yarn", "streaming-flume-assembly", "streaming-kafka-assembly", "streaming-kinesis-asl-assembly")
.map(ProjectRef(buildLocation, _))
val copyJarsProjects@Seq(examples) = Seq("examples").map(ProjectRef(buildLocation, _))
@@ -283,6 +283,8 @@ object SparkBuild extends PomBuild {
/* Hive console settings */
enable(Hive.settings)(hive)
+ enable(Flume.settings)(streamingFlumeSink)
+
enable(Java8TestSettings.settings)(java8Tests)
enable(DockerIntegrationTests.settings)(dockerIntegrationTests)
@@ -348,6 +350,10 @@ object Unsafe {
)
}
+object Flume {
+ lazy val settings = sbtavro.SbtAvro.avroSettings
+}
+
object DockerIntegrationTests {
// This serves to override the override specified in DependencyOverrides:
lazy val settings = Seq(
@@ -526,7 +532,7 @@ object Assembly {
.getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String])
},
jarName in assembly <<= (version, moduleName, hadoopVersion) map { (v, mName, hv) =>
- if (mName.contains("streaming-kafka-assembly") || mName.contains("streaming-kinesis-asl-assembly")) {
+ if (mName.contains("streaming-flume-assembly") || mName.contains("streaming-kafka-assembly") || mName.contains("streaming-kinesis-asl-assembly")) {
// This must match the same name used in maven (see external/kafka-assembly/pom.xml)
s"${mName}-${v}.jar"
} else {
@@ -644,9 +650,9 @@ object Unidoc {
publish := {},
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
- inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, yarn, testTags),
+ inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, testTags),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
- inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, yarn, testTags),
+ inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, testTags),
// Skip actual catalyst, but include the subproject.
// Catalyst is not public API and contains quasiquotes which break scaladoc.
@@ -665,7 +671,7 @@ object Unidoc {
"-public",
"-group", "Core Java API", packageList("api.java", "api.java.function"),
"-group", "Spark Streaming", packageList(
- "streaming.api.java", "streaming.kafka", "streaming.kinesis"
+ "streaming.api.java", "streaming.flume", "streaming.kafka", "streaming.kinesis"
),
"-group", "MLlib", packageList(
"mllib.classification", "mllib.clustering", "mllib.evaluation.binary", "mllib.linalg",