aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorHolden Karau <holden@pigscanfly.ca>2014-08-30 16:58:17 -0700
committerMatei Zaharia <matei@databricks.com>2014-08-30 16:58:17 -0700
commitba78383bace52b13ee931c6f2df445f721d5080a (patch)
treea7dfe35f1158acf1910e438d0a818eed06f8e09f /core
parentb6cf1348170951396a6a5d8a65fb670382304f5b (diff)
downloadspark-ba78383bace52b13ee931c6f2df445f721d5080a.tar.gz
spark-ba78383bace52b13ee931c6f2df445f721d5080a.tar.bz2
spark-ba78383bace52b13ee931c6f2df445f721d5080a.zip
SPARK-3318: Documentation update in addFile on how to use SparkFiles.get
Rather than specifying the path to SparkFiles we need to use the filename. Author: Holden Karau <holden@pigscanfly.ca> Closes #2210 from holdenk/SPARK-3318-documentation-for-addfiles-should-say-to-use-file-not-path and squashes the following commits: a25d27a [Holden Karau] Update the JavaSparkContext addFile method to be clear about using fileName with SparkFiles as well 0ebcb05 [Holden Karau] Documentation update in addFile on how to use SparkFiles.get to specify filename rather than path
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/SparkContext.scala3
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala2
2 files changed, 2 insertions, 3 deletions
diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala
index a80b3cce60..cb4fb7cfbd 100644
--- a/core/src/main/scala/org/apache/spark/SparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/SparkContext.scala
@@ -796,7 +796,7 @@ class SparkContext(config: SparkConf) extends Logging {
* Add a file to be downloaded with this Spark job on every node.
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
- * use `SparkFiles.get(path)` to find its download location.
+ * use `SparkFiles.get(fileName)` to find its download location.
*/
def addFile(path: String) {
val uri = new URI(path)
@@ -1619,4 +1619,3 @@ private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
-
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
index e0a4815940..8e178bc848 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
@@ -545,7 +545,7 @@ class JavaSparkContext(val sc: SparkContext) extends JavaSparkContextVarargsWork
* Add a file to be downloaded with this Spark job on every node.
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
- * use `SparkFiles.get(path)` to find its download location.
+ * use `SparkFiles.get(fileName)` to find its download location.
*/
def addFile(path: String) {
sc.addFile(path)