aboutsummaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorZheng RuiFeng <ruifengz@foxmail.com>2016-06-06 09:35:47 +0100
committerSean Owen <sowen@cloudera.com>2016-06-06 09:35:47 +0100
commitfd8af397132fa1415a4c19d7f5cb5a41aa6ddb27 (patch)
treea653b3542d0671c8cb8b3ff7fa3755525c0606a4 /core/src
parent32f2f95dbdfb21491e46d4b608fd4e8ac7ab8973 (diff)
downloadspark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.tar.gz
spark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.tar.bz2
spark-fd8af397132fa1415a4c19d7f5cb5a41aa6ddb27.zip
[MINOR] Fix Typos 'an -> a'
## What changes were proposed in this pull request? `an -> a` Use cmds like `find . -name '*.R' | xargs -i sh -c "grep -in ' an [^aeiou]' {} && echo {}"` to generate candidates, and review them one by one. ## How was this patch tested? manual tests Author: Zheng RuiFeng <ruifengz@foxmail.com> Closes #13515 from zhengruifeng/an_a.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/scala/org/apache/spark/Accumulable.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala6
-rw-r--r--core/src/main/scala/org/apache/spark/scheduler/Pool.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala2
-rw-r--r--core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala4
-rw-r--r--core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala2
11 files changed, 16 insertions, 16 deletions
diff --git a/core/src/main/scala/org/apache/spark/Accumulable.scala b/core/src/main/scala/org/apache/spark/Accumulable.scala
index 812145aaee..5532931e2a 100644
--- a/core/src/main/scala/org/apache/spark/Accumulable.scala
+++ b/core/src/main/scala/org/apache/spark/Accumulable.scala
@@ -28,7 +28,7 @@ import org.apache.spark.util.{AccumulatorContext, AccumulatorMetadata, LegacyAcc
/**
- * A data type that can be accumulated, i.e. has an commutative and associative "add" operation,
+ * A data type that can be accumulated, i.e. has a commutative and associative "add" operation,
* but where the result type, `R`, may be different from the element type being added, `T`.
*
* You must define how to add data, and how to merge two of these together. For some data types,
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
index bfb6a35f5b..485a8b4222 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
@@ -789,7 +789,7 @@ class JavaSparkContext(val sc: SparkContext)
def cancelAllJobs(): Unit = sc.cancelAllJobs()
/**
- * Returns an Java map of JavaRDDs that have marked themselves as persistent via cache() call.
+ * Returns a Java map of JavaRDDs that have marked themselves as persistent via cache() call.
* Note that this does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: JMap[java.lang.Integer, JavaRDD[_]] = {
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
index ab5b6c8380..2822eb5d60 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
@@ -919,7 +919,7 @@ private class PythonAccumulatorParam(@transient private val serverHost: String,
}
/**
- * An Wrapper for Python Broadcast, which is written into disk by Python. It also will
+ * A Wrapper for Python Broadcast, which is written into disk by Python. It also will
* write the data into disk after deserialization, then Python can read it from disks.
*/
// scalastyle:off no.finalize
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index 78606e06fb..9be4cadcb4 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -408,12 +408,12 @@ object SparkSubmit {
printErrorAndExit("SparkR is not supported for Mesos cluster.")
}
- // If we're running a R app, set the main class to our specific R runner
+ // If we're running an R app, set the main class to our specific R runner
if (args.isR && deployMode == CLIENT) {
if (args.primaryResource == SPARKR_SHELL) {
args.mainClass = "org.apache.spark.api.r.RBackend"
} else {
- // If a R file is provided, add it to the child arguments and list of files to deploy.
+ // If an R file is provided, add it to the child arguments and list of files to deploy.
// Usage: RRunner <main R file> [app arguments]
args.mainClass = "org.apache.spark.deploy.RRunner"
args.childArgs = ArrayBuffer(args.primaryResource) ++ args.childArgs
@@ -422,7 +422,7 @@ object SparkSubmit {
}
if (isYarnCluster && args.isR) {
- // In yarn-cluster mode for a R app, add primary resource to files
+ // In yarn-cluster mode for an R app, add primary resource to files
// that can be distributed with the job
args.files = mergeFileLists(args.files, args.primaryResource)
}
diff --git a/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala b/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
index 5426bf80ba..2f42916439 100644
--- a/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/JdbcRDD.scala
@@ -34,7 +34,7 @@ private[spark] class JdbcPartition(idx: Int, val lower: Long, val upper: Long) e
// TODO: Expose a jdbcRDD function in SparkContext and mark this as semi-private
/**
- * An RDD that executes an SQL query on a JDBC connection and reads results.
+ * An RDD that executes a SQL query on a JDBC connection and reads results.
* For usage example, see test case JdbcRDDSuite.
*
* @param getConnection a function that returns an open Connection.
@@ -138,7 +138,7 @@ object JdbcRDD {
}
/**
- * Create an RDD that executes an SQL query on a JDBC connection and reads results.
+ * Create an RDD that executes a SQL query on a JDBC connection and reads results.
* For usage example, see test case JavaAPISuite.testJavaJdbcRDD.
*
* @param connectionFactory a factory that returns an open Connection.
@@ -178,7 +178,7 @@ object JdbcRDD {
}
/**
- * Create an RDD that executes an SQL query on a JDBC connection and reads results. Each row is
+ * Create an RDD that executes a SQL query on a JDBC connection and reads results. Each row is
* converted into a `Object` array. For usage example, see test case JavaAPISuite.testJavaJdbcRDD.
*
* @param connectionFactory a factory that returns an open Connection.
diff --git a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
index 732c89c39f..2a69a6c5e8 100644
--- a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
+++ b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala
@@ -26,7 +26,7 @@ import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
/**
- * An Schedulable entity that represents collection of Pools or TaskSetManagers
+ * A Schedulable entity that represents collection of Pools or TaskSetManagers
*/
private[spark] class Pool(
val poolName: String,
diff --git a/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala b/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
index 6657104823..973676398a 100644
--- a/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
+++ b/core/src/test/scala/org/apache/spark/broadcast/BroadcastSuite.scala
@@ -138,7 +138,7 @@ class BroadcastSuite extends SparkFunSuite with LocalSparkContext {
}
/**
- * Verify the persistence of state associated with an TorrentBroadcast in a local-cluster.
+ * Verify the persistence of state associated with a TorrentBroadcast in a local-cluster.
*
* This test creates a broadcast variable, uses it on all executors, and then unpersists it.
* In between each step, this test verifies that the broadcast blocks are present only on the
diff --git a/core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala b/core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala
index a7bb9aa468..dd50e33da3 100644
--- a/core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala
+++ b/core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala
@@ -408,7 +408,7 @@ class StandaloneRestSubmitSuite extends SparkFunSuite with BeforeAndAfterEach {
/**
* Start a [[StandaloneRestServer]] that communicates with the given endpoint.
- * If `faulty` is true, start an [[FaultyStandaloneRestServer]] instead.
+ * If `faulty` is true, start a [[FaultyStandaloneRestServer]] instead.
* Return the master URL that corresponds to the address of this server.
*/
private def startServer(
diff --git a/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala b/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala
index 505cd476ff..acdf21df9a 100644
--- a/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala
@@ -489,7 +489,7 @@ abstract class RpcEnvSuite extends SparkFunSuite with BeforeAndAfterAll {
/**
* Setup an [[RpcEndpoint]] to collect all network events.
*
- * @return the [[RpcEndpointRef]] and an `ConcurrentLinkedQueue` that contains network events.
+ * @return the [[RpcEndpointRef]] and a `ConcurrentLinkedQueue` that contains network events.
*/
private def setupNetworkEndpoint(
_env: RpcEnv,
diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
index b6765f0645..f28f429e0c 100644
--- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
+++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
@@ -1712,7 +1712,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou
}
test("reduce tasks should be placed locally with map output") {
- // Create an shuffleMapRdd with 1 partition
+ // Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
@@ -1733,7 +1733,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeou
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
- // Create an shuffleMapRdd with more partitions
+ // Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
index 0a8bbba6c5..85ca9d39d4 100644
--- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala
@@ -258,7 +258,7 @@ class JsonProtocolSuite extends SparkFunSuite {
}
test("FetchFailed backwards compatibility") {
- // FetchFailed in Spark 1.1.0 does not have an "Message" property.
+ // FetchFailed in Spark 1.1.0 does not have a "Message" property.
val fetchFailed = FetchFailed(BlockManagerId("With or", "without you", 15), 17, 18, 19,
"ignored")
val oldEvent = JsonProtocol.taskEndReasonToJson(fetchFailed)