aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorZhang, Liye <liye.zhang@intel.com>2015-08-25 11:48:55 +0100
committerSean Owen <sowen@cloudera.com>2015-08-25 11:48:55 +0100
commit5c14890159a5711072bf395f662b2433a389edf9 (patch)
treee5dab1e0f05ad3fb755484c5b1812270bd0ec30a /core
parent0e6368ffaec1965d0c7f89420e04a974675c7f6e (diff)
downloadspark-5c14890159a5711072bf395f662b2433a389edf9.tar.gz
spark-5c14890159a5711072bf395f662b2433a389edf9.tar.bz2
spark-5c14890159a5711072bf395f662b2433a389edf9.zip
[DOC] add missing parameters in SparkContext.scala for scala doc
Author: Zhang, Liye <liye.zhang@intel.com> Closes #8412 from liyezhang556520/minorDoc.
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala15
1 files changed, 14 insertions, 1 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index 1ddaca8a5b..9849aff85d 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -114,6 +114,7 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
* :: DeveloperApi ::
* Alternative constructor for setting preferred locations where Spark will create executors.
*
+ * @param config a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on.
* Can be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
* from a list of input files or InputFormats for the application.
@@ -145,6 +146,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
+ * @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on.
+ * Can be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
+ * from a list of input files or InputFormats for the application.
*/
def this(
master: String,
@@ -841,6 +845,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
+ *
+ * @param path Directory to the input data files, the path can be comma separated paths as the
+ * list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
*/
def wholeTextFiles(
@@ -889,6 +896,9 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/&#42;` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
+ *
+ * @param path Directory to the input data files, the path can be comma separated paths as the
+ * list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
*/
@Experimental
@@ -918,8 +928,11 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli
* '''Note:''' We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
- * @param path Directory to the input data files
+ * @param path Directory to the input data files, the path can be comma separated paths as the
+ * list of inputs.
* @param recordLength The length at which to split the records
+ * @param conf Configuration for setting up the dataset.
+ *
* @return An RDD of data with values, represented as byte arrays
*/
@Experimental