diff options
author | hyukjinkwon <gurwls223@gmail.com> | 2016-11-29 09:41:32 +0000 |
---|---|---|
committer | Sean Owen <sowen@cloudera.com> | 2016-11-29 09:41:32 +0000 |
commit | f830bb9170f6b853565d9dd30ca7418b93a54fe3 (patch) | |
tree | fc0d6d04f1e3759745401f3a2cbfa337d76c40a9 /external/kafka-0-8 | |
parent | 7d5cb3af7621ad6eb85d1ba7f585c3921ca0a242 (diff) | |
download | spark-f830bb9170f6b853565d9dd30ca7418b93a54fe3.tar.gz spark-f830bb9170f6b853565d9dd30ca7418b93a54fe3.tar.bz2 spark-f830bb9170f6b853565d9dd30ca7418b93a54fe3.zip |
[SPARK-3359][DOCS] Make javadoc8 working for unidoc/genjavadoc compatibility in Java API documentation
## What changes were proposed in this pull request?
This PR make `sbt unidoc` complete with Java 8.
This PR roughly includes several fixes as below:
- Fix unrecognisable class and method links in javadoc by changing it from `[[..]]` to `` `...` ``
```diff
- * A column that will be computed based on the data in a [[DataFrame]].
+ * A column that will be computed based on the data in a `DataFrame`.
```
- Fix throws annotations so that they are recognisable in javadoc
- Fix URL links to `<a href="http..."></a>`.
```diff
- * [[http://en.wikipedia.org/wiki/Decision_tree_learning Decision tree]] model for regression.
+ * <a href="http://en.wikipedia.org/wiki/Decision_tree_learning">
+ * Decision tree (Wikipedia)</a> model for regression.
```
```diff
- * see http://en.wikipedia.org/wiki/Receiver_operating_characteristic
+ * see <a href="http://en.wikipedia.org/wiki/Receiver_operating_characteristic">
+ * Receiver operating characteristic (Wikipedia)</a>
```
- Fix < to > to
- `greater than`/`greater than or equal to` or `less than`/`less than or equal to` where applicable.
- Wrap it with `{{{...}}}` to print them in javadoc or use `{code ...}` or `{literal ..}`. Please refer https://github.com/apache/spark/pull/16013#discussion_r89665558
- Fix `</p>` complaint
## How was this patch tested?
Manually tested by `jekyll build` with Java 7 and 8
```
java version "1.7.0_80"
Java(TM) SE Runtime Environment (build 1.7.0_80-b15)
Java HotSpot(TM) 64-Bit Server VM (build 24.80-b11, mixed mode)
```
```
java version "1.8.0_45"
Java(TM) SE Runtime Environment (build 1.8.0_45-b14)
Java HotSpot(TM) 64-Bit Server VM (build 25.45-b02, mixed mode)
```
Author: hyukjinkwon <gurwls223@gmail.com>
Closes #16013 from HyukjinKwon/SPARK-3359-errors-more.
Diffstat (limited to 'external/kafka-0-8')
4 files changed, 27 insertions, 15 deletions
diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala index 35acb7b09f..e0e44d4440 100644 --- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala +++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaCluster.scala @@ -231,7 +231,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { // this 0 here indicates api version, in this case the original ZK backed api. private def defaultConsumerApiVersion: Short = 0 - /** Requires Kafka >= 0.8.1.1. Defaults to the original ZooKeeper backed api version. */ + /** + * Requires Kafka 0.8.1.1 or later. + * Defaults to the original ZooKeeper backed API version. + */ def getConsumerOffsets( groupId: String, topicAndPartitions: Set[TopicAndPartition] @@ -250,7 +253,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { } } - /** Requires Kafka >= 0.8.1.1. Defaults to the original ZooKeeper backed api version. */ + /** + * Requires Kafka 0.8.1.1 or later. + * Defaults to the original ZooKeeper backed API version. + */ def getConsumerOffsetMetadata( groupId: String, topicAndPartitions: Set[TopicAndPartition] @@ -287,7 +293,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { Left(errs) } - /** Requires Kafka >= 0.8.1.1. Defaults to the original ZooKeeper backed api version. */ + /** + * Requires Kafka 0.8.1.1 or later. + * Defaults to the original ZooKeeper backed API version. + */ def setConsumerOffsets( groupId: String, offsets: Map[TopicAndPartition, Long] @@ -305,7 +314,10 @@ class KafkaCluster(val kafkaParams: Map[String, String]) extends Serializable { setConsumerOffsetMetadata(groupId, meta, consumerApiVersion) } - /** Requires Kafka >= 0.8.1.1. Defaults to the original ZooKeeper backed api version. */ + /** + * Requires Kafka 0.8.1.1 or later. + * Defaults to the original ZooKeeper backed API version. + */ def setConsumerOffsetMetadata( groupId: String, metadata: Map[TopicAndPartition, OffsetAndMetadata] diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaInputDStream.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaInputDStream.scala index 3713bda41b..7ff3a98ca5 100644 --- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaInputDStream.scala +++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaInputDStream.scala @@ -38,7 +38,7 @@ import org.apache.spark.util.ThreadUtils * * @param kafkaParams Map of kafka configuration parameters. * See: http://kafka.apache.org/configuration.html - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread. * @param storageLevel RDD storage level. */ diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala index 56f0cb0b16..d5aef8184f 100644 --- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala +++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/KafkaUtils.scala @@ -47,7 +47,7 @@ object KafkaUtils { * @param ssc StreamingContext object * @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..) * @param groupId The group id for this consumer - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread * @param storageLevel Storage level to use for storing the received objects * (default: StorageLevel.MEMORY_AND_DISK_SER_2) @@ -72,7 +72,7 @@ object KafkaUtils { * @param ssc StreamingContext object * @param kafkaParams Map of kafka configuration parameters, * see http://kafka.apache.org/08/configuration.html - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread. * @param storageLevel Storage level to use for storing the received objects * @tparam K type of Kafka message key @@ -97,7 +97,7 @@ object KafkaUtils { * @param jssc JavaStreamingContext object * @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..) * @param groupId The group id for this consumer - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread * @return DStream of (Kafka message key, Kafka message value) */ @@ -115,7 +115,7 @@ object KafkaUtils { * @param jssc JavaStreamingContext object * @param zkQuorum Zookeeper quorum (hostname:port,hostname:port,..). * @param groupId The group id for this consumer. - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread. * @param storageLevel RDD storage level. * @return DStream of (Kafka message key, Kafka message value) @@ -140,7 +140,7 @@ object KafkaUtils { * @param valueDecoderClass Type of kafka value decoder * @param kafkaParams Map of kafka configuration parameters, * see http://kafka.apache.org/08/configuration.html - * @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed + * @param topics Map of (topic_name to numPartitions) to consume. Each partition is consumed * in its own thread * @param storageLevel RDD storage level. * @tparam K type of Kafka message key @@ -396,7 +396,7 @@ object KafkaUtils { * You can access the offsets used in each batch from the generated RDDs (see * [[org.apache.spark.streaming.kafka.HasOffsetRanges]]). * - Failure Recovery: To recover from driver failures, you have to enable checkpointing - * in the [[StreamingContext]]. The information on consumed offset can be + * in the `StreamingContext`. The information on consumed offset can be * recovered from the checkpoint. See the programming guide for details (constraints, etc.). * - End-to-end semantics: This stream ensures that every records is effectively received and * transformed exactly once, but gives no guarantees on whether the transformed data are @@ -448,7 +448,7 @@ object KafkaUtils { * You can access the offsets used in each batch from the generated RDDs (see * [[org.apache.spark.streaming.kafka.HasOffsetRanges]]). * - Failure Recovery: To recover from driver failures, you have to enable checkpointing - * in the [[StreamingContext]]. The information on consumed offset can be + * in the `StreamingContext`. The information on consumed offset can be * recovered from the checkpoint. See the programming guide for details (constraints, etc.). * - End-to-end semantics: This stream ensures that every records is effectively received and * transformed exactly once, but gives no guarantees on whether the transformed data are @@ -499,7 +499,7 @@ object KafkaUtils { * You can access the offsets used in each batch from the generated RDDs (see * [[org.apache.spark.streaming.kafka.HasOffsetRanges]]). * - Failure Recovery: To recover from driver failures, you have to enable checkpointing - * in the [[StreamingContext]]. The information on consumed offset can be + * in the `StreamingContext`. The information on consumed offset can be * recovered from the checkpoint. See the programming guide for details (constraints, etc.). * - End-to-end semantics: This stream ensures that every records is effectively received and * transformed exactly once, but gives no guarantees on whether the transformed data are @@ -565,7 +565,7 @@ object KafkaUtils { * You can access the offsets used in each batch from the generated RDDs (see * [[org.apache.spark.streaming.kafka.HasOffsetRanges]]). * - Failure Recovery: To recover from driver failures, you have to enable checkpointing - * in the [[StreamingContext]]. The information on consumed offset can be + * in the `StreamingContext`. The information on consumed offset can be * recovered from the checkpoint. See the programming guide for details (constraints, etc.). * - End-to-end semantics: This stream ensures that every records is effectively received and * transformed exactly once, but gives no guarantees on whether the transformed data are diff --git a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/OffsetRange.scala b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/OffsetRange.scala index d9b856e469..10d364f987 100644 --- a/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/OffsetRange.scala +++ b/external/kafka-0-8/src/main/scala/org/apache/spark/streaming/kafka/OffsetRange.scala @@ -22,7 +22,7 @@ import kafka.common.TopicAndPartition /** * Represents any object that has a collection of [[OffsetRange]]s. This can be used to access the * offset ranges in RDDs generated by the direct Kafka DStream (see - * [[KafkaUtils.createDirectStream()]]). + * `KafkaUtils.createDirectStream()`). * {{{ * KafkaUtils.createDirectStream(...).foreachRDD { rdd => * val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges |