diff options
Diffstat (limited to 'core')
4 files changed, 6 insertions, 6 deletions
diff --git a/core/src/main/scala/org/apache/spark/CacheManager.scala b/core/src/main/scala/org/apache/spark/CacheManager.scala index 923ff411ce..1ec9ba7755 100644 --- a/core/src/main/scala/org/apache/spark/CacheManager.scala +++ b/core/src/main/scala/org/apache/spark/CacheManager.scala @@ -120,7 +120,7 @@ private[spark] class CacheManager(blockManager: BlockManager) extends Logging { * The effective storage level refers to the level that actually specifies BlockManager put * behavior, not the level originally specified by the user. This is mainly for forcing a * MEMORY_AND_DISK partition to disk if there is not enough room to unroll the partition, - * while preserving the the original semantics of the RDD as specified by the application. + * while preserving the original semantics of the RDD as specified by the application. */ private def putInBlockManager[T]( key: BlockId, diff --git a/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala index d71bb63000..2096a37de9 100644 --- a/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala @@ -76,7 +76,7 @@ class OrderedRDDFunctions[K : Ordering : ClassTag, } /** - * Returns an RDD containing only the elements in the the inclusive range `lower` to `upper`. + * Returns an RDD containing only the elements in the inclusive range `lower` to `upper`. * If the RDD has been partitioned using a `RangePartitioner`, then this operation can be * performed efficiently by only scanning the partitions that might contain matching elements. * Otherwise, a standard `filter` is applied to all partitions. diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index 379dc14ad7..ba773e1e7b 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -655,7 +655,7 @@ class DAGScheduler( /** * Submit a shuffle map stage to run independently and get a JobWaiter object back. The waiter - * can be used to block until the the job finishes executing or can be used to cancel the job. + * can be used to block until the job finishes executing or can be used to cancel the job. * This method is used for adaptive query planning, to run map stages and look at statistics * about their outputs before submitting downstream stages. * diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala index 4b05469c42..e5cd2eddba 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala @@ -47,7 +47,7 @@ import org.apache.spark.util.{ResetSystemProperties, Utils} /** * A collection of tests against the historyserver, including comparing responses from the json * metrics api to a set of known "golden files". If new endpoints / parameters are added, - * cases should be added to this test suite. The expected outcomes can be genered by running + * cases should be added to this test suite. The expected outcomes can be generated by running * the HistoryServerSuite.main. Note that this will blindly generate new expectation files matching * the current behavior -- the developer must verify that behavior is correct. * @@ -274,12 +274,12 @@ class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers implicit val webDriver: WebDriver = new HtmlUnitDriver implicit val formats = org.json4s.DefaultFormats - // this test dir is explictly deleted on successful runs; retained for diagnostics when + // this test dir is explicitly deleted on successful runs; retained for diagnostics when // not val logDir = Utils.createDirectory(System.getProperty("java.io.tmpdir", "logs")) // a new conf is used with the background thread set and running at its fastest - // alllowed refresh rate (1Hz) + // allowed refresh rate (1Hz) val myConf = new SparkConf() .set("spark.history.fs.logDirectory", logDir.getAbsolutePath) .set("spark.eventLog.dir", logDir.getAbsolutePath) |