aboutsummaryrefslogtreecommitdiff
path: root/core/src/main
diff options
context:
space:
mode:
authorAndy Konwinski <andyk@berkeley.edu>2012-10-08 22:49:17 -0700
committerAndy Konwinski <andyk@berkeley.edu>2012-10-08 22:49:17 -0700
commit1d79ff602890080a45d21ce2799fafa8532cec03 (patch)
treeb6c5a03bb22191ff68aaef675ec3ec29cc4c17e6 /core/src/main
parentac310098ef6a195981080a0ae840533141780943 (diff)
downloadspark-1d79ff602890080a45d21ce2799fafa8532cec03.tar.gz
spark-1d79ff602890080a45d21ce2799fafa8532cec03.tar.bz2
spark-1d79ff602890080a45d21ce2799fafa8532cec03.zip
Fixes a typo, adds scaladoc comments to SparkContext constructors.
Diffstat (limited to 'core/src/main')
-rw-r--r--core/src/main/scala/spark/RDD.scala8
-rw-r--r--core/src/main/scala/spark/SparkContext.scala8
2 files changed, 11 insertions, 5 deletions
diff --git a/core/src/main/scala/spark/RDD.scala b/core/src/main/scala/spark/RDD.scala
index 4d984591bd..17869fb31b 100644
--- a/core/src/main/scala/spark/RDD.scala
+++ b/core/src/main/scala/spark/RDD.scala
@@ -188,7 +188,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
map(x => (x, null)).reduceByKey((x, y) => x, numSplits).map(_._1)
/**
- * Return a sampled subset of this RDD.
+ * Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Int): RDD[T] =
new SampledRDD(this, withReplacement, fraction, seed)
@@ -305,7 +305,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
-
+
/**
* Return an array that contains all of the elements in this RDD.
*/
@@ -471,7 +471,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
}
/**
- * Save this RDD as a text file, using string representations of elements.
+ * Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String) {
this.map(x => (NullWritable.get(), new Text(x.toString)))
@@ -479,7 +479,7 @@ abstract class RDD[T: ClassManifest](@transient sc: SparkContext) extends Serial
}
/**
- * Save this RDD as a SequenceFile of serialized objects.
+ * Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String) {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala
index 84fc541f82..47e002201b 100644
--- a/core/src/main/scala/spark/SparkContext.scala
+++ b/core/src/main/scala/spark/SparkContext.scala
@@ -54,15 +54,21 @@ import spark.storage.BlockManagerMaster
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
+ * @constructor Returns a new SparkContext.
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param jobName A name for your job, to display on the cluster web UI
- * @param sparkHome Location where Spark is instaled on cluster nodes
+ * @param sparkHome Location where Spark is installed on cluster nodes
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
class SparkContext(master: String, jobName: String, val sparkHome: String, val jars: Seq[String])
extends Logging {
+ /**
+ * @constructor Returns a new SparkContext.
+ * @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
+ * @param jobName A name for your job, to display on the cluster web UI
+ */
def this(master: String, jobName: String) = this(master, jobName, null, Nil)
// Ensure logging is initialized before we spawn any threads