From aefe79890541bc0829f184e03eb3961739ca8ef2 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Thu, 23 Mar 2017 08:41:30 +0000 Subject: [MINOR][BUILD] Fix javadoc8 break ## What changes were proposed in this pull request? Several javadoc8 breaks have been introduced. This PR proposes fix those instances so that we can build Scala/Java API docs. ``` [error] .../spark/sql/core/target/java/org/apache/spark/sql/streaming/GroupState.java:6: error: reference not found [error] * flatMapGroupsWithState operations on {link KeyValueGroupedDataset}. [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/streaming/GroupState.java:10: error: reference not found [error] * Both, mapGroupsWithState and flatMapGroupsWithState in {link KeyValueGroupedDataset} [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/streaming/GroupState.java:51: error: reference not found [error] * {link GroupStateTimeout.ProcessingTimeTimeout}) or event time (i.e. [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/streaming/GroupState.java:52: error: reference not found [error] * {link GroupStateTimeout.EventTimeTimeout}). [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/streaming/GroupState.java:158: error: reference not found [error] * Spark SQL types (see {link Encoder} for more details). [error] ^ [error] .../spark/mllib/target/java/org/apache/spark/ml/fpm/FPGrowthParams.java:26: error: bad use of '>' [error] * Number of partitions (>=1) used by parallel FP-growth. By default the param is not set, and [error] ^ [error] .../spark/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java:30: error: reference not found [error] * {link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState( [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/KeyValueGroupedDataset.java:211: error: reference not found [error] * See {link GroupState} for more details. [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/KeyValueGroupedDataset.java:232: error: reference not found [error] * See {link GroupState} for more details. [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/KeyValueGroupedDataset.java:254: error: reference not found [error] * See {link GroupState} for more details. [error] ^ [error] .../spark/sql/core/target/java/org/apache/spark/sql/KeyValueGroupedDataset.java:277: error: reference not found [error] * See {link GroupState} for more details. [error] ^ [error] .../spark/core/target/java/org/apache/spark/TaskContextImpl.java:10: error: reference not found [error] * {link TaskMetrics} & {link MetricsSystem} objects are not thread safe. [error] ^ [error] .../spark/core/target/java/org/apache/spark/TaskContextImpl.java:10: error: reference not found [error] * {link TaskMetrics} & {link MetricsSystem} objects are not thread safe. [error] ^ [info] 13 errors ``` ``` jekyll 3.3.1 | Error: Unidoc generation failed ``` ## How was this patch tested? Manually via `jekyll build` Author: hyukjinkwon Closes #17389 from HyukjinKwon/minor-javadoc8-fix. --- .../function/FlatMapGroupsWithStateFunction.java | 2 +- .../apache/spark/sql/KeyValueGroupedDataset.scala | 8 ++++---- .../apache/spark/sql/streaming/GroupState.scala | 22 +++++++++++----------- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'sql/core/src/main') diff --git a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java index 026b37cabb..802949c0dd 100644 --- a/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java +++ b/sql/core/src/main/java/org/apache/spark/api/java/function/FlatMapGroupsWithStateFunction.java @@ -27,7 +27,7 @@ import org.apache.spark.sql.streaming.GroupState; /** * ::Experimental:: * Base interface for a map function used in - * {@link org.apache.spark.sql.KeyValueGroupedDataset#flatMapGroupsWithState( + * {@code org.apache.spark.sql.KeyValueGroupedDataset.flatMapGroupsWithState( * FlatMapGroupsWithStateFunction, org.apache.spark.sql.streaming.OutputMode, * org.apache.spark.sql.Encoder, org.apache.spark.sql.Encoder)} * @since 2.1.1 diff --git a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala index 87c5621768..022c2f5629 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala @@ -298,7 +298,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. - * See [[GroupState]] for more details. + * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. @@ -328,7 +328,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. - * See [[GroupState]] for more details. + * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. @@ -360,7 +360,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. - * See [[GroupState]] for more details. + * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. @@ -400,7 +400,7 @@ class KeyValueGroupedDataset[K, V] private[sql]( * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. - * See [[GroupState]] for more details. + * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala index 60a4d0d8f9..15df906ca7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/GroupState.scala @@ -18,18 +18,18 @@ package org.apache.spark.sql.streaming import org.apache.spark.annotation.{Experimental, InterfaceStability} -import org.apache.spark.sql.{Encoder, KeyValueGroupedDataset} +import org.apache.spark.sql.KeyValueGroupedDataset import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState /** * :: Experimental :: * * Wrapper class for interacting with per-group state data in `mapGroupsWithState` and - * `flatMapGroupsWithState` operations on [[KeyValueGroupedDataset]]. + * `flatMapGroupsWithState` operations on `KeyValueGroupedDataset`. * * Detail description on `[map/flatMap]GroupsWithState` operation * -------------------------------------------------------------- - * Both, `mapGroupsWithState` and `flatMapGroupsWithState` in [[KeyValueGroupedDataset]] + * Both, `mapGroupsWithState` and `flatMapGroupsWithState` in `KeyValueGroupedDataset` * will invoke the user-given function on each group (defined by the grouping function in * `Dataset.groupByKey()`) while maintaining user-defined per-group state between invocations. * For a static batch Dataset, the function will be invoked once per group. For a streaming @@ -70,8 +70,8 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState * `[map|flatMap]GroupsWithState`, but the exact timeout duration/timestamp is configurable per * group by calling `setTimeout...()` in `GroupState`. * - Timeouts can be either based on processing time (i.e. - * [[GroupStateTimeout.ProcessingTimeTimeout]]) or event time (i.e. - * [[GroupStateTimeout.EventTimeTimeout]]). + * `GroupStateTimeout.ProcessingTimeTimeout`) or event time (i.e. + * `GroupStateTimeout.EventTimeTimeout`). * - With `ProcessingTimeTimeout`, the timeout duration can be set by calling * `GroupState.setTimeoutDuration`. The timeout will occur when the clock has advanced by the set * duration. Guarantees provided by this timeout with a duration of D ms are as follows: @@ -177,7 +177,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LogicalGroupState * }}} * * @tparam S User-defined type of the state to be stored for each group. Must be encodable into - * Spark SQL types (see [[Encoder]] for more details). + * Spark SQL types (see `Encoder` for more details). * @since 2.2.0 */ @Experimental @@ -224,7 +224,7 @@ trait GroupState[S] extends LogicalGroupState[S] { /** * Set the timeout duration for this key as a string. For example, "1 hour", "2 days", etc. * - * @note, ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. + * @note ProcessingTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. */ @throws[IllegalArgumentException]("if 'duration' is not a valid duration") @throws[IllegalStateException]("when state is either not initialized, or already removed") @@ -240,7 +240,7 @@ trait GroupState[S] extends LogicalGroupState[S] { * Set the timeout timestamp for this key as milliseconds in epoch time. * This timestamp cannot be older than the current watermark. * - * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. + * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. */ def setTimeoutTimestamp(timestampMs: Long): Unit @@ -254,7 +254,7 @@ trait GroupState[S] extends LogicalGroupState[S] { * The final timestamp (including the additional duration) cannot be older than the * current watermark. * - * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. + * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. */ def setTimeoutTimestamp(timestampMs: Long, additionalDuration: String): Unit @@ -265,7 +265,7 @@ trait GroupState[S] extends LogicalGroupState[S] { * Set the timeout timestamp for this key as a java.sql.Date. * This timestamp cannot be older than the current watermark. * - * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. + * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. */ def setTimeoutTimestamp(timestamp: java.sql.Date): Unit @@ -279,7 +279,7 @@ trait GroupState[S] extends LogicalGroupState[S] { * The final timestamp (including the additional duration) cannot be older than the * current watermark. * - * @note, EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. + * @note EventTimeTimeout must be enabled in `[map/flatmap]GroupsWithStates`. */ def setTimeoutTimestamp(timestamp: java.sql.Date, additionalDuration: String): Unit } -- cgit v1.2.3