aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakuya UESHIN <ueshin@happy-camper.st>2014-11-28 13:00:15 -0500
committerPatrick Wendell <pwendell@gmail.com>2014-11-28 13:00:15 -0500
commite464f0ac2d7210a4bf715478885fe7a8d397fe89 (patch)
treea7dd47931f952fd203c67f8282c9bfccd7451c6d
parent53ed7f1c7f8534d0d7856d83e3b46e36d09659d2 (diff)
downloadspark-e464f0ac2d7210a4bf715478885fe7a8d397fe89.tar.gz
spark-e464f0ac2d7210a4bf715478885fe7a8d397fe89.tar.bz2
spark-e464f0ac2d7210a4bf715478885fe7a8d397fe89.zip
[SPARK-4193][BUILD] Disable doclint in Java 8 to prevent from build error.
Author: Takuya UESHIN <ueshin@happy-camper.st> Closes #3058 from ueshin/issues/SPARK-4193 and squashes the following commits: e096bb1 [Takuya UESHIN] Add a plugin declaration to pluginManagement. 6762ec2 [Takuya UESHIN] Fix usage of -Xdoclint javadoc option. fdb280a [Takuya UESHIN] Fix Javadoc errors. 4745f3c [Takuya UESHIN] Merge branch 'master' into issues/SPARK-4193 923e2f0 [Takuya UESHIN] Use doclint option `-missing` instead of `none`. 30d6718 [Takuya UESHIN] Fix Javadoc errors. b548017 [Takuya UESHIN] Disable doclint in Java 8 to prevent from build error.
-rw-r--r--network/common/src/main/java/org/apache/spark/network/client/TransportClient.java2
-rw-r--r--network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java2
-rw-r--r--network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java2
-rw-r--r--network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java2
-rw-r--r--network/common/src/main/java/org/apache/spark/network/util/TransportConf.java2
-rw-r--r--pom.xml24
-rw-r--r--project/SparkBuild.scala7
7 files changed, 35 insertions, 6 deletions
diff --git a/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java b/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java
index 4e944114e8..37f2e34ceb 100644
--- a/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java
+++ b/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java
@@ -49,7 +49,7 @@ import org.apache.spark.network.util.NettyUtils;
* to perform this setup.
*
* For example, a typical workflow might be:
- * client.sendRPC(new OpenFile("/foo")) --> returns StreamId = 100
+ * client.sendRPC(new OpenFile("/foo")) --&gt; returns StreamId = 100
* client.fetchChunk(streamId = 100, chunkIndex = 0, callback)
* client.fetchChunk(streamId = 100, chunkIndex = 1, callback)
* ...
diff --git a/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java b/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
index 731d48d4d9..a6d390e13f 100644
--- a/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
+++ b/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java
@@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
import org.apache.spark.network.buffer.ManagedBuffer;
/**
- * StreamManager which allows registration of an Iterator<ManagedBuffer>, which are individually
+ * StreamManager which allows registration of an Iterator&lt;ManagedBuffer&gt;, which are individually
* fetched as chunks by the client. Each registered buffer is one chunk.
*/
public class OneForOneStreamManager extends StreamManager {
diff --git a/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java b/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java
index 63ca43c046..57113ed12d 100644
--- a/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java
+++ b/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java
@@ -27,7 +27,7 @@ import com.google.common.base.Preconditions;
* Wraps a {@link InputStream}, limiting the number of bytes which can be read.
*
* This code is from Guava's 14.0 source code, because there is no compatible way to
- * use this functionality in both a Guava 11 environment and a Guava >14 environment.
+ * use this functionality in both a Guava 11 environment and a Guava &gt;14 environment.
*/
public final class LimitedInputStream extends FilterInputStream {
private long left;
diff --git a/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java b/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java
index b3991a6577..2a4b88b64c 100644
--- a/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java
+++ b/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java
@@ -99,7 +99,7 @@ public class NettyUtils {
return new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 8, -8, 8);
}
- /** Returns the remote address on the channel or "<remote address>" if none exists. */
+ /** Returns the remote address on the channel or "&lt;remote address&gt;" if none exists. */
public static String getRemoteAddress(Channel channel) {
if (channel != null && channel.remoteAddress() != null) {
return channel.remoteAddress().toString();
diff --git a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
index 621427d8cb..1af40acf8b 100644
--- a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
+++ b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java
@@ -72,7 +72,7 @@ public class TransportConf {
/**
* Time (in milliseconds) that we will wait in order to perform a retry after an IOException.
- * Only relevant if maxIORetries > 0.
+ * Only relevant if maxIORetries &gt; 0.
*/
public int ioRetryWaitTime() { return conf.getInt("spark.shuffle.io.retryWaitMs", 5000); }
diff --git a/pom.xml b/pom.xml
index 4220c1ace0..b7df53d3e5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1025,6 +1025,11 @@
</filesets>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.10.1</version>
+ </plugin>
</plugins>
</pluginManagement>
@@ -1218,6 +1223,25 @@
</profile>
+ <profile>
+ <id>doclint-java8-disable</id>
+ <activation>
+ <jdk>[1.8,)</jdk>
+ </activation>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <configuration>
+ <additionalparam>-Xdoclint:all -Xdoclint:-missing</additionalparam>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+
<!-- A series of build profiles where customizations for particular Hadoop releases can be made -->
<!-- Hadoop-a.b.c dependencies can be found at
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 9db81df589..b16ed66aeb 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -136,7 +136,12 @@ object SparkBuild extends PomBuild {
},
publishMavenStyle in MavenCompile := true,
publishLocal in MavenCompile <<= publishTask(publishLocalConfiguration in MavenCompile, deliverLocal),
- publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn
+ publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn,
+
+ javacOptions in (Compile, doc) ++= {
+ val Array(major, minor, _) = System.getProperty("java.version").split("\\.", 3)
+ if (major.toInt >= 1 && minor.toInt >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty
+ }
)
def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = {