diff options
7 files changed, 35 insertions, 6 deletions
diff --git a/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java b/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java index 4e944114e8..37f2e34ceb 100644 --- a/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java +++ b/network/common/src/main/java/org/apache/spark/network/client/TransportClient.java @@ -49,7 +49,7 @@ import org.apache.spark.network.util.NettyUtils; * to perform this setup. * * For example, a typical workflow might be: - * client.sendRPC(new OpenFile("/foo")) --> returns StreamId = 100 + * client.sendRPC(new OpenFile("/foo")) --> returns StreamId = 100 * client.fetchChunk(streamId = 100, chunkIndex = 0, callback) * client.fetchChunk(streamId = 100, chunkIndex = 1, callback) * ... diff --git a/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java b/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java index 731d48d4d9..a6d390e13f 100644 --- a/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java +++ b/network/common/src/main/java/org/apache/spark/network/server/OneForOneStreamManager.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import org.apache.spark.network.buffer.ManagedBuffer; /** - * StreamManager which allows registration of an Iterator<ManagedBuffer>, which are individually + * StreamManager which allows registration of an Iterator<ManagedBuffer>, which are individually * fetched as chunks by the client. Each registered buffer is one chunk. */ public class OneForOneStreamManager extends StreamManager { diff --git a/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java b/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java index 63ca43c046..57113ed12d 100644 --- a/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java +++ b/network/common/src/main/java/org/apache/spark/network/util/LimitedInputStream.java @@ -27,7 +27,7 @@ import com.google.common.base.Preconditions; * Wraps a {@link InputStream}, limiting the number of bytes which can be read. * * This code is from Guava's 14.0 source code, because there is no compatible way to - * use this functionality in both a Guava 11 environment and a Guava >14 environment. + * use this functionality in both a Guava 11 environment and a Guava >14 environment. */ public final class LimitedInputStream extends FilterInputStream { private long left; diff --git a/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java b/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java index b3991a6577..2a4b88b64c 100644 --- a/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java +++ b/network/common/src/main/java/org/apache/spark/network/util/NettyUtils.java @@ -99,7 +99,7 @@ public class NettyUtils { return new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 8, -8, 8); } - /** Returns the remote address on the channel or "<remote address>" if none exists. */ + /** Returns the remote address on the channel or "<remote address>" if none exists. */ public static String getRemoteAddress(Channel channel) { if (channel != null && channel.remoteAddress() != null) { return channel.remoteAddress().toString(); diff --git a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java index 621427d8cb..1af40acf8b 100644 --- a/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java +++ b/network/common/src/main/java/org/apache/spark/network/util/TransportConf.java @@ -72,7 +72,7 @@ public class TransportConf { /** * Time (in milliseconds) that we will wait in order to perform a retry after an IOException. - * Only relevant if maxIORetries > 0. + * Only relevant if maxIORetries > 0. */ public int ioRetryWaitTime() { return conf.getInt("spark.shuffle.io.retryWaitMs", 5000); } @@ -1037,6 +1037,11 @@ </filesets> </configuration> </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-javadoc-plugin</artifactId> + <version>2.10.1</version> + </plugin> </plugins> </pluginManagement> @@ -1230,6 +1235,25 @@ </profile> + <profile> + <id>doclint-java8-disable</id> + <activation> + <jdk>[1.8,)</jdk> + </activation> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-javadoc-plugin</artifactId> + <configuration> + <additionalparam>-Xdoclint:all -Xdoclint:-missing</additionalparam> + </configuration> + </plugin> + </plugins> + </build> + </profile> + <!-- A series of build profiles where customizations for particular Hadoop releases can be made --> <!-- Hadoop-a.b.c dependencies can be found at diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala index d539a3d91a..f73e0f6795 100644 --- a/project/SparkBuild.scala +++ b/project/SparkBuild.scala @@ -136,7 +136,12 @@ object SparkBuild extends PomBuild { }, publishMavenStyle in MavenCompile := true, publishLocal in MavenCompile <<= publishTask(publishLocalConfiguration in MavenCompile, deliverLocal), - publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn + publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn, + + javacOptions in (Compile, doc) ++= { + val Array(major, minor, _) = System.getProperty("java.version").split("\\.", 3) + if (major.toInt >= 1 && minor.toInt >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty + } ) def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = { |