aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHerman van Hovell <hvanhovell@questtec.nl>2015-08-15 10:46:04 +0100
committerSean Owen <sowen@cloudera.com>2015-08-15 10:46:04 +0100
commita85fb6c07fdda5c74d53d6373910dcf5db3ff111 (patch)
treef0f3ce3d432dc004c07e7fb68e5be9bb8457feea
parent7c1e56825b716a7d703dff38254b4739755ac0c4 (diff)
downloadspark-a85fb6c07fdda5c74d53d6373910dcf5db3ff111.tar.gz
spark-a85fb6c07fdda5c74d53d6373910dcf5db3ff111.tar.bz2
spark-a85fb6c07fdda5c74d53d6373910dcf5db3ff111.zip
[SPARK-9980] [BUILD] Fix SBT publishLocal error due to invalid characters in doc
Tiny modification to a few comments ```sbt publishLocal``` work again. Author: Herman van Hovell <hvanhovell@questtec.nl> Closes #8209 from hvanhovell/SPARK-9980.
-rw-r--r--core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java6
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java4
-rw-r--r--examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java2
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/Main.java4
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java2
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java6
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java4
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java8
-rw-r--r--unsafe/src/main/java/org/apache/spark/unsafe/memory/TaskMemoryManager.java2
9 files changed, 19 insertions, 19 deletions
diff --git a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java
index 5f3a4fcf4d..b24eed3952 100644
--- a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java
+++ b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java
@@ -92,9 +92,9 @@ public final class BytesToBytesMap {
/**
* The maximum number of keys that BytesToBytesMap supports. The hash table has to be
- * power-of-2-sized and its backing Java array can contain at most (1 << 30) elements, since
- * that's the largest power-of-2 that's less than Integer.MAX_VALUE. We need two long array
- * entries per key, giving us a maximum capacity of (1 << 29).
+ * power-of-2-sized and its backing Java array can contain at most (1 &lt;&lt; 30) elements,
+ * since that's the largest power-of-2 that's less than Integer.MAX_VALUE. We need two long array
+ * entries per key, giving us a maximum capacity of (1 &lt;&lt; 29).
*/
@VisibleForTesting
static final int MAX_CAPACITY = (1 << 29);
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
index 3f1fe900b0..a377694507 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
@@ -124,7 +124,7 @@ class MyJavaLogisticRegression
/**
* Param for max number of iterations
- * <p/>
+ * <p>
* NOTE: The usual way to add a parameter to a model or algorithm is to include:
* - val myParamName: ParamType
* - def getMyParamName
@@ -222,7 +222,7 @@ class MyJavaLogisticRegressionModel
/**
* Create a copy of the model.
* The copy is shallow, except for the embedded paramMap, which gets a deep copy.
- * <p/>
+ * <p>
* This is used for the defaul implementation of [[transform()]].
*
* In Java, we have to make this method public since Java does not understand Scala's protected
diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
index 02f58f48b0..99b63a2590 100644
--- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
+++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaStatefulNetworkWordCount.java
@@ -45,7 +45,7 @@ import org.apache.spark.streaming.api.java.JavaStreamingContext;
* Usage: JavaStatefulNetworkWordCount <hostname> <port>
* <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive
* data.
- * <p/>
+ * <p>
* To run this on your local machine, you need to first run a Netcat server
* `$ nc -lk 9999`
* and then run the example
diff --git a/launcher/src/main/java/org/apache/spark/launcher/Main.java b/launcher/src/main/java/org/apache/spark/launcher/Main.java
index 62492f9baf..a4e3acc674 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/Main.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/Main.java
@@ -32,7 +32,7 @@ class Main {
/**
* Usage: Main [class] [class args]
- * <p/>
+ * <p>
* This CLI works in two different modes:
* <ul>
* <li>"spark-submit": if <i>class</i> is "org.apache.spark.deploy.SparkSubmit", the
@@ -42,7 +42,7 @@ class Main {
*
* This class works in tandem with the "bin/spark-class" script on Unix-like systems, and
* "bin/spark-class2.cmd" batch script on Windows to execute the final command.
- * <p/>
+ * <p>
* On Unix-like systems, the output is a list of command arguments, separated by the NULL
* character. On Windows, the output is a command line suitable for direct execution from the
* script.
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
index 5f95e2c74f..931a24cfd4 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java
@@ -28,7 +28,7 @@ import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Command builder for internal Spark classes.
- * <p/>
+ * <p>
* This class handles building the command to launch all internal Spark classes except for
* SparkSubmit (which is handled by {@link SparkSubmitCommandBuilder} class.
*/
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
index 03c9358bc8..57993405e4 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java
@@ -193,7 +193,7 @@ public class SparkLauncher {
* Adds a no-value argument to the Spark invocation. If the argument is known, this method
* validates whether the argument is indeed a no-value argument, and throws an exception
* otherwise.
- * <p/>
+ * <p>
* Use this method with caution. It is possible to create an invalid Spark command by passing
* unknown arguments to this method, since those are allowed for forward compatibility.
*
@@ -211,10 +211,10 @@ public class SparkLauncher {
* Adds an argument with a value to the Spark invocation. If the argument name corresponds to
* a known argument, the code validates that the argument actually expects a value, and throws
* an exception otherwise.
- * <p/>
+ * <p>
* It is safe to add arguments modified by other methods in this class (such as
* {@link #setMaster(String)} - the last invocation will be the one to take effect.
- * <p/>
+ * <p>
* Use this method with caution. It is possible to create an invalid Spark command by passing
* unknown arguments to this method, since those are allowed for forward compatibility.
*
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
index 4f354cedee..fc87814a59 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java
@@ -25,11 +25,11 @@ import static org.apache.spark.launcher.CommandBuilderUtils.*;
/**
* Special command builder for handling a CLI invocation of SparkSubmit.
- * <p/>
+ * <p>
* This builder adds command line parsing compatible with SparkSubmit. It handles setting
* driver-side options and special parsing behavior needed for the special-casing certain internal
* Spark applications.
- * <p/>
+ * <p>
* This class has also some special features to aid launching pyspark.
*/
class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java
index 5779eb3fc0..6767cc5079 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitOptionParser.java
@@ -23,7 +23,7 @@ import java.util.regex.Pattern;
/**
* Parser for spark-submit command line options.
- * <p/>
+ * <p>
* This class encapsulates the parsing code for spark-submit command line options, so that there
* is a single list of options that needs to be maintained (well, sort of, but it makes it harder
* to break things).
@@ -80,10 +80,10 @@ class SparkSubmitOptionParser {
* This is the canonical list of spark-submit options. Each entry in the array contains the
* different aliases for the same option; the first element of each entry is the "official"
* name of the option, passed to {@link #handle(String, String)}.
- * <p/>
+ * <p>
* Options not listed here nor in the "switch" list below will result in a call to
* {@link $#handleUnknown(String)}.
- * <p/>
+ * <p>
* These two arrays are visible for tests.
*/
final String[][] opts = {
@@ -130,7 +130,7 @@ class SparkSubmitOptionParser {
/**
* Parse a list of spark-submit command line options.
- * <p/>
+ * <p>
* See SparkSubmitArguments.scala for a more formal description of available options.
*
* @throws IllegalArgumentException If an error is found during parsing.
diff --git a/unsafe/src/main/java/org/apache/spark/unsafe/memory/TaskMemoryManager.java b/unsafe/src/main/java/org/apache/spark/unsafe/memory/TaskMemoryManager.java
index ca70d7f4a4..97b2c93f0d 100644
--- a/unsafe/src/main/java/org/apache/spark/unsafe/memory/TaskMemoryManager.java
+++ b/unsafe/src/main/java/org/apache/spark/unsafe/memory/TaskMemoryManager.java
@@ -60,7 +60,7 @@ public class TaskMemoryManager {
/**
* Maximum supported data page size (in bytes). In principle, the maximum addressable page size is
- * (1L << OFFSET_BITS) bytes, which is 2+ petabytes. However, the on-heap allocator's maximum page
+ * (1L &lt;&lt; OFFSET_BITS) bytes, which is 2+ petabytes. However, the on-heap allocator's maximum page
* size is limited by the maximum amount of data that can be stored in a long[] array, which is
* (2^32 - 1) * 8 bytes (or 16 gigabytes). Therefore, we cap this at 16 gigabytes.
*/