aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/Accumulator.scala3
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala4
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/RDD.scala2
5 files changed, 9 insertions, 10 deletions
diff --git a/core/src/main/scala/org/apache/spark/Accumulator.scala b/core/src/main/scala/org/apache/spark/Accumulator.scala
index 7bea636c94..9d5fbefc82 100644
--- a/core/src/main/scala/org/apache/spark/Accumulator.scala
+++ b/core/src/main/scala/org/apache/spark/Accumulator.scala
@@ -24,8 +24,7 @@ package org.apache.spark
* They can be used to implement counters (as in MapReduce) or sums. Spark natively supports
* accumulators of numeric value types, and programmers can add support for new types.
*
- * An accumulator is created from an initial value `v` by calling
- * [[SparkContext#accumulator SparkContext.accumulator]].
+ * An accumulator is created from an initial value `v` by calling `SparkContext.accumulator`.
* Tasks running on the cluster can then add to it using the `+=` operator.
* However, they cannot read its value. Only the driver program can read the accumulator's value,
* using its [[#value]] method.
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
index 766aea213a..9544475ff0 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
@@ -166,7 +166,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
* Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
* math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
*
- * This method differs from [[sampleByKey]] in that we make additional passes over the RDD to
+ * This method differs from `sampleByKey` in that we make additional passes over the RDD to
* create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
* over all key values with a 99.99% confidence. When sampling without replacement, we need one
* additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
@@ -184,7 +184,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
* Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
* math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
*
- * This method differs from [[sampleByKey]] in that we make additional passes over the RDD to
+ * This method differs from `sampleByKey` in that we make additional passes over the RDD to
* create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
* over all key values with a 99.99% confidence. When sampling without replacement, we need one
* additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
index eda16d957c..91ae1002ab 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala
@@ -393,7 +393,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
def treeReduce(f: JFunction2[T, T, T], depth: Int): T = rdd.treeReduce(f, depth)
/**
- * [[org.apache.spark.api.java.JavaRDDLike#treeReduce]] with suggested depth 2.
+ * `org.apache.spark.api.java.JavaRDDLike.treeReduce` with suggested depth 2.
*/
def treeReduce(f: JFunction2[T, T, T]): T = treeReduce(f, 2)
@@ -440,7 +440,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
}
/**
- * [[org.apache.spark.api.java.JavaRDDLike#treeAggregate]] with suggested depth 2.
+ * `org.apache.spark.api.java.JavaRDDLike.treeAggregate` with suggested depth 2.
*/
def treeAggregate[U](
zeroValue: U,
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index ec12b9963e..d7bfdbad84 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -109,7 +109,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* functions. This method is here for backward compatibility. It does not provide combiner
* classtag information to the shuffle.
*
- * @see [[combineByKeyWithClassTag]]
+ * @see `combineByKeyWithClassTag`
*/
def combineByKey[C](
createCombiner: V => C,
@@ -127,7 +127,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* This method is here for backward compatibility. It does not provide combiner
* classtag information to the shuffle.
*
- * @see [[combineByKeyWithClassTag]]
+ * @see `combineByKeyWithClassTag`
*/
def combineByKey[C](
createCombiner: V => C,
@@ -608,7 +608,7 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
* existing partitioner/parallelism level. This method is here for backward compatibility. It
* does not provide combiner classtag information to the shuffle.
*
- * @see [[combineByKeyWithClassTag]]
+ * @see `combineByKeyWithClassTag`
*/
def combineByKey[C](
createCombiner: V => C,
diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
index 374abccf6a..a7e01f397e 100644
--- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala
@@ -1841,7 +1841,7 @@ abstract class RDD[T: ClassTag](
* Defines implicit functions that provide extra functionalities on RDDs of specific types.
*
* For example, [[RDD.rddToPairRDDFunctions]] converts an RDD into a [[PairRDDFunctions]] for
- * key-value-pair RDDs, and enabling extra functionalities such as [[PairRDDFunctions.reduceByKey]].
+ * key-value-pair RDDs, and enabling extra functionalities such as `PairRDDFunctions.reduceByKey`.
*/
object RDD {