aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2014-10-25 23:18:02 -0700
committerXiangrui Meng <meng@databricks.com>2014-10-25 23:18:02 -0700
commitdf7974b8e59d00e8efbb61629418fa6265c1ddab (patch)
tree0ff4a5f2807e1d41993e30db954d8d2a8365b968
parentc6834440085b79f6d3e011f9e55ffd672be855fe (diff)
downloadspark-df7974b8e59d00e8efbb61629418fa6265c1ddab.tar.gz
spark-df7974b8e59d00e8efbb61629418fa6265c1ddab.tar.bz2
spark-df7974b8e59d00e8efbb61629418fa6265c1ddab.zip
SPARK-3359 [DOCS] sbt/sbt unidoc doesn't work with Java 8
This follows https://github.com/apache/spark/pull/2893 , but does not completely fix SPARK-3359 either. This fixes minor scaladoc/javadoc issues that Javadoc 8 will treat as errors. Author: Sean Owen <sowen@cloudera.com> Closes #2909 from srowen/SPARK-3359 and squashes the following commits: f62c347 [Sean Owen] Fix some javadoc issues that javadoc 8 considers errors. This is not all of the errors turned up when javadoc 8 runs on output of genjavadoc.
-rw-r--r--core/src/main/java/org/apache/spark/TaskContext.java2
-rw-r--r--core/src/main/java/org/apache/spark/api/java/function/PairFunction.java3
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala5
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala7
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala2
9 files changed, 21 insertions, 18 deletions
diff --git a/core/src/main/java/org/apache/spark/TaskContext.java b/core/src/main/java/org/apache/spark/TaskContext.java
index 2d998d4c7a..0d6973203e 100644
--- a/core/src/main/java/org/apache/spark/TaskContext.java
+++ b/core/src/main/java/org/apache/spark/TaskContext.java
@@ -71,7 +71,6 @@ public abstract class TaskContext implements Serializable {
/**
* Add a (Java friendly) listener to be executed on task completion.
* This will be called in all situation - success, failure, or cancellation.
- * <p/>
* An example use is for HadoopRDD to register a callback to close the input stream.
*/
public abstract TaskContext addTaskCompletionListener(TaskCompletionListener listener);
@@ -79,7 +78,6 @@ public abstract class TaskContext implements Serializable {
/**
* Add a listener in the form of a Scala closure to be executed on task completion.
* This will be called in all situations - success, failure, or cancellation.
- * <p/>
* An example use is for HadoopRDD to register a callback to close the input stream.
*/
public abstract TaskContext addTaskCompletionListener(final Function1<TaskContext, Unit> f);
diff --git a/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java b/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
index abd9bcc07a..99bf240a17 100644
--- a/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
+++ b/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
@@ -22,7 +22,8 @@ import java.io.Serializable;
import scala.Tuple2;
/**
- * A function that returns key-value pairs (Tuple2<K, V>), and can be used to construct PairRDDs.
+ * A function that returns key-value pairs (Tuple2&lt;K, V&gt;), and can be used to
+ * construct PairRDDs.
*/
public interface PairFunction<T, K, V> extends Serializable {
public Tuple2<K, V> call(T t) throws Exception;
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
index a6123bd108..8e8f7f6c4f 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
@@ -114,7 +114,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[JDouble, Ja
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
- * RDD will be <= us.
+ * RDD will be &lt;= us.
*/
def subtract(other: JavaDoubleRDD): JavaDoubleRDD =
fromRDD(srdd.subtract(other))
@@ -233,11 +233,11 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends JavaRDDLike[JDouble, Ja
* to the left except for the last which is closed
* e.g. for the array
* [1,10,20,50] the buckets are [1,10) [10,20) [20,50]
- * e.g 1<=x<10 , 10<=x<20, 20<=x<50
+ * e.g 1&lt;=x&lt;10 , 10&lt;=x&lt;20, 20&lt;=x&lt;50
* And on the input of 1 and 50 we would have a histogram of 1,0,0
*
* Note: if your histogram is evenly spaced (e.g. [0, 10, 20, 30]) this can be switched
- * from an O(log n) inseration to O(1) per element. (where n = # buckets) if you set evenBuckets
+ * from an O(log n) insertion to O(1) per element. (where n = # buckets) if you set evenBuckets
* to true.
* buckets must be sorted and not contain any duplicates.
* buckets array must be at least two elements
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
index c38b96528d..e37f3acaf6 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
@@ -392,7 +392,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
- * RDD will be <= us.
+ * RDD will be &lt;= us.
*/
def subtract(other: JavaPairRDD[K, V]): JavaPairRDD[K, V] =
fromRDD(rdd.subtract(other))
@@ -413,7 +413,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
- * RDD will be <= us.
+ * RDD will be &lt;= us.
*/
def subtractByKey[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, V] = {
implicit val ctag: ClassTag[W] = fakeClassTag
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
index 45168ba62d..0565adf4d4 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
@@ -215,7 +215,10 @@ class JavaSparkContext(val sc: SparkContext)
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
- * Do `JavaPairRDD<String, String> rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")`,
+ * Do
+ * {{{
+ * JavaPairRDD<String, String> rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")
+ * }}}
*
* <p> then `rdd` contains
* {{{
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
index 4734251127..dfad25d57c 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
@@ -26,7 +26,7 @@ import org.apache.spark.mllib.linalg.{Vector, Vectors}
* :: Experimental ::
* Normalizes samples individually to unit L^p^ norm
*
- * For any 1 <= p < Double.PositiveInfinity, normalizes samples using
+ * For any 1 &lt;= p &lt; Double.PositiveInfinity, normalizes samples using
* sum(abs(vector).^p^)^(1/p)^ as norm.
*
* For p = Double.PositiveInfinity, max(abs(vector)) will be used as norm for normalization.
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
index ec2d481dcc..10a515af88 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
@@ -152,7 +152,7 @@ class RowMatrix(
* storing the right singular vectors, is computed via matrix multiplication as
* U = A * (V * S^-1^), if requested by user. The actual method to use is determined
* automatically based on the cost:
- * - If n is small (n < 100) or k is large compared with n (k > n / 2), we compute the Gramian
+ * - If n is small (n &lt; 100) or k is large compared with n (k > n / 2), we compute the Gramian
* matrix first and then compute its top eigenvalues and eigenvectors locally on the driver.
* This requires a single pass with O(n^2^) storage on each executor and on the driver, and
* O(n^2^ k) time on the driver.
@@ -169,7 +169,8 @@ class RowMatrix(
* @note The conditions that decide which method to use internally and the default parameters are
* subject to change.
*
- * @param k number of leading singular values to keep (0 < k <= n). It might return less than k if
+ * @param k number of leading singular values to keep (0 &lt; k &lt;= n).
+ * It might return less than k if
* there are numerically zero singular values or there are not enough Ritz values
* converged before the maximum number of Arnoldi update iterations is reached (in case
* that matrix A is ill-conditioned).
@@ -192,7 +193,7 @@ class RowMatrix(
/**
* The actual SVD implementation, visible for testing.
*
- * @param k number of leading singular values to keep (0 < k <= n)
+ * @param k number of leading singular values to keep (0 &lt; k &lt;= n)
* @param computeU whether to compute U
* @param rCond the reciprocal condition number
* @param maxIter max number of iterations (if ARPACK is used)
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index ca35100aa9..dce0adffa6 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -196,8 +196,8 @@ object MLUtils {
/**
* Load labeled data from a file. The data format used here is
- * <L>, <f1> <f2> ...
- * where <f1>, <f2> are feature values in Double and <L> is the corresponding label as Double.
+ * L, f1 f2 ...
+ * where f1, f2 are feature values in Double and L is the corresponding label as Double.
*
* @param sc SparkContext
* @param dir Directory to the input data files.
@@ -219,8 +219,8 @@ object MLUtils {
/**
* Save labeled data to a file. The data format used here is
- * <L>, <f1> <f2> ...
- * where <f1>, <f2> are feature values in Double and <L> is the corresponding label as Double.
+ * L, f1 f2 ...
+ * where f1, f2 are feature values in Double and L is the corresponding label as Double.
*
* @param data An RDD of LabeledPoints containing data to be saved.
* @param dir Directory to save the data.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
index e7faba0c7f..1e0ccb368a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
@@ -193,7 +193,7 @@ class JavaSchemaRDD(
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
- * RDD will be <= us.
+ * RDD will be &lt;= us.
*/
def subtract(other: JavaSchemaRDD): JavaSchemaRDD =
this.baseSchemaRDD.subtract(other.baseSchemaRDD).toJavaSchemaRDD