aboutsummaryrefslogtreecommitdiff
path: root/project/SparkBuild.scala
diff options
context:
space:
mode:
Diffstat (limited to 'project/SparkBuild.scala')
-rw-r--r--project/SparkBuild.scala18
1 files changed, 16 insertions, 2 deletions
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 6018b22bae..b1a9f39342 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -701,15 +701,29 @@ object Unidoc {
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test")))
}
+ private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = {
+ classpaths
+ .map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka-clients-0\.10.*""")))
+ .map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka_2\..*-0\.10.*""")))
+ }
+
val unidocSourceBase = settingKey[String]("Base URL of source links in Scaladoc.")
lazy val settings = scalaJavaUnidocSettings ++ Seq (
publish := {},
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
- inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags),
+ inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
- inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags),
+ inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010),
+
+ unidocAllClasspaths in (ScalaUnidoc, unidoc) := {
+ ignoreClasspaths((unidocAllClasspaths in (ScalaUnidoc, unidoc)).value)
+ },
+
+ unidocAllClasspaths in (JavaUnidoc, unidoc) := {
+ ignoreClasspaths((unidocAllClasspaths in (JavaUnidoc, unidoc)).value)
+ },
// Skip actual catalyst, but include the subproject.
// Catalyst is not public API and contains quasiquotes which break scaladoc.