aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala2
-rw-r--r--core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala4
-rwxr-xr-xdev/run-tests.py2
-rw-r--r--examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java2
-rw-r--r--examples/src/main/python/mllib/naive_bayes_example.py2
-rw-r--r--examples/src/main/python/mllib/ranking_metrics_example.py2
-rw-r--r--examples/src/main/python/mllib/word2vec.py2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkLR.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala4
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala2
-rw-r--r--graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala2
-rw-r--r--project/SparkBuild.scala2
-rw-r--r--python/pyspark/mllib/fpm.py2
-rw-r--r--repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/streaming/OffsetSuite.scala2
19 files changed, 22 insertions, 22 deletions
diff --git a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
index f157a451ef..fa078ee25a 100644
--- a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
+++ b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
@@ -28,7 +28,7 @@ import org.apache.spark._
* of them will be combined together, showed in one line.
*/
private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
- // Carrige return
+ // Carriage return
val CR = '\r'
// Update period of progress bar, in milliseconds
val UPDATE_PERIOD = 200L
diff --git a/core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala b/core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala
index daa795a043..2fb09ead4b 100644
--- a/core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala
+++ b/core/src/test/scala/org/apache/sparktest/ImplicitSuite.scala
@@ -26,11 +26,11 @@ package org.apache.sparktest
*/
class ImplicitSuite {
- // We only want to test if `implict` works well with the compiler, so we don't need a real
+ // We only want to test if `implicit` works well with the compiler, so we don't need a real
// SparkContext.
def mockSparkContext[T]: org.apache.spark.SparkContext = null
- // We only want to test if `implict` works well with the compiler, so we don't need a real RDD.
+ // We only want to test if `implicit` works well with the compiler, so we don't need a real RDD.
def mockRDD[T]: org.apache.spark.rdd.RDD[T] = null
def testRddToPairRDDFunctions(): Unit = {
diff --git a/dev/run-tests.py b/dev/run-tests.py
index b65d1a309c..aa6af564be 100755
--- a/dev/run-tests.py
+++ b/dev/run-tests.py
@@ -563,7 +563,7 @@ def main():
# backwards compatibility checks
if build_tool == "sbt":
- # Note: compatiblity tests only supported in sbt for now
+ # Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima()
# run the test suites
diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
index b9dd3ad957..da2012ad51 100644
--- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
+++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java
@@ -228,7 +228,7 @@ class MyJavaLogisticRegressionModel
* Create a copy of the model.
* The copy is shallow, except for the embedded paramMap, which gets a deep copy.
* <p>
- * This is used for the defaul implementation of [[transform()]].
+ * This is used for the default implementation of [[transform()]].
*
* In Java, we have to make this method public since Java does not understand Scala's protected
* modifier.
diff --git a/examples/src/main/python/mllib/naive_bayes_example.py b/examples/src/main/python/mllib/naive_bayes_example.py
index e7d5893d67..35724f7d6a 100644
--- a/examples/src/main/python/mllib/naive_bayes_example.py
+++ b/examples/src/main/python/mllib/naive_bayes_example.py
@@ -47,7 +47,7 @@ if __name__ == "__main__":
# $example on$
data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine)
- # Split data aproximately into training (60%) and test (40%)
+ # Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4], seed=0)
# Train a naive Bayes model.
diff --git a/examples/src/main/python/mllib/ranking_metrics_example.py b/examples/src/main/python/mllib/ranking_metrics_example.py
index 327791966c..21333deded 100644
--- a/examples/src/main/python/mllib/ranking_metrics_example.py
+++ b/examples/src/main/python/mllib/ranking_metrics_example.py
@@ -47,7 +47,7 @@ if __name__ == "__main__":
# Instantiate regression metrics to compare predicted and actual ratings
metrics = RegressionMetrics(scoreAndLabels)
- # Root mean sqaured error
+ # Root mean squared error
print("RMSE = %s" % metrics.rootMeanSquaredError)
# R-squared
diff --git a/examples/src/main/python/mllib/word2vec.py b/examples/src/main/python/mllib/word2vec.py
index 40d1b88792..4e7d4f7610 100644
--- a/examples/src/main/python/mllib/word2vec.py
+++ b/examples/src/main/python/mllib/word2vec.py
@@ -16,7 +16,7 @@
#
# This example uses text8 file from http://mattmahoney.net/dc/text8.zip
-# The file was downloadded, unziped and split into multiple lines using
+# The file was downloaded, unzipped and split into multiple lines using
#
# wget http://mattmahoney.net/dc/text8.zip
# unzip text8.zip
diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
index a3901850f2..f2e4c96fa5 100644
--- a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala
@@ -30,7 +30,7 @@ import breeze.linalg.{DenseVector, Vector}
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
*/
object LocalFileLR {
- val D = 10 // Numer of dimensions
+ val D = 10 // Number of dimensions
val rand = new Random(42)
case class DataPoint(x: Vector[Double], y: Double)
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
index e4486b949f..f7eb9e9936 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
@@ -35,7 +35,7 @@ import org.apache.spark._
* org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs.
*/
object SparkHdfsLR {
- val D = 10 // Numer of dimensions
+ val D = 10 // Number of dimensions
val rand = new Random(42)
case class DataPoint(x: Vector[Double], y: Double)
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
index 132800e6e4..036e3d24c9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
@@ -36,7 +36,7 @@ import org.apache.spark._
*/
object SparkLR {
val N = 10000 // Number of data points
- val D = 10 // Numer of dimensions
+ val D = 10 // Number of dimensions
val R = 0.7 // Scaling factor
val ITERATIONS = 5
val rand = new Random(42)
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index 620ff07631..94b67cb29b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -60,10 +60,10 @@ object RDDRelation {
// Write out an RDD as a parquet file with overwrite mode.
df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")
- // Read in parquet file. Parquet files are self-describing so the schmema is preserved.
+ // Read in parquet file. Parquet files are self-describing so the schema is preserved.
val parquetFile = sqlContext.read.parquet("pair.parquet")
- // Queries can be run using the DSL on parequet files just like the original RDD.
+ // Queries can be run using the DSL on parquet files just like the original RDD.
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
// These files can also be registered as tables.
diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
index c386e39d52..5b69963cc8 100644
--- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala
@@ -44,7 +44,7 @@ object TwitterPopularTags {
val filters = args.takeRight(args.length - 4)
// Set the system properties so that Twitter4j library used by twitter stream
- // can use them to generat OAuth credentials
+ // can use them to generate OAuth credentials
System.setProperty("twitter4j.oauth.consumerKey", consumerKey)
System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret)
System.setProperty("twitter4j.oauth.accessToken", accessToken)
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
index 094a63472e..4d6b899c83 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala
@@ -21,7 +21,7 @@ import org.apache.spark.SparkFunSuite
class EdgeSuite extends SparkFunSuite {
test ("compare") {
- // decending order
+ // descending order
val testEdges: Array[Edge[Int]] = Array(
Edge(0x7FEDCBA987654321L, -0x7FEDCBA987654321L, 1),
Edge(0x2345L, 0x1234L, 1),
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
index 94a24b527b..fd2f8d3875 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/fpm/PrefixSpan.scala
@@ -541,7 +541,7 @@ object PrefixSpan extends Logging {
}
/**
- * Represents a frequence sequence.
+ * Represents a frequent sequence.
* @param sequence a sequence of itemsets stored as an Array of Arrays
* @param freq frequency
* @tparam Item item type
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 1ba6a07513..a380c4cca2 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -688,7 +688,7 @@ object Unidoc {
"-noqualifier", "java.lang"
),
- // Use GitHub repository for Scaladoc source linke
+ // Use GitHub repository for Scaladoc source links
unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}",
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq(
diff --git a/python/pyspark/mllib/fpm.py b/python/pyspark/mllib/fpm.py
index 5c9706cb8c..f339e50891 100644
--- a/python/pyspark/mllib/fpm.py
+++ b/python/pyspark/mllib/fpm.py
@@ -127,7 +127,7 @@ class PrefixSpanModel(JavaModelWrapper):
@since("1.6.0")
def freqSequences(self):
- """Gets frequence sequences"""
+ """Gets frequent sequences"""
return self.call("getFreqSequences").map(lambda x: PrefixSpan.FreqSequence(x[0], x[1]))
diff --git a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
index 7fcb423575..fc260c0310 100644
--- a/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
+++ b/repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkIMain.scala
@@ -72,7 +72,7 @@ import org.apache.spark.annotation.DeveloperApi
* all variables defined by that code. To extract the result of an
* interpreted line to show the user, a second "result object" is created
* which imports the variables exported by the above object and then
- * exports members called "$eval" and "$print". To accomodate user expressions
+ * exports members called "$eval" and "$print". To accommodate user expressions
* that read from variables or methods defined in previous statements, "import"
* statements are used.
*
@@ -1515,7 +1515,7 @@ import org.apache.spark.annotation.DeveloperApi
exprTyper.symbolOfLine(code)
/**
- * Constucts type information based on the provided expression's final
+ * Constructs type information based on the provided expression's final
* result or the definition provided.
*
* @param expr The expression or definition
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
index 8e432e8f3d..46b3877a7c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
@@ -29,7 +29,7 @@ private case object OracleDialect extends JdbcDialect {
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
// Handle NUMBER fields that have no precision/scale in special way
- // because JDBC ResultSetMetaData converts this to 0 procision and -127 scale
+ // because JDBC ResultSetMetaData converts this to 0 precision and -127 scale
// For more details, please see
// https://github.com/apache/spark/pull/8780#issuecomment-145598968
// and
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/OffsetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/OffsetSuite.scala
index 989465826d..9590af4e77 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/OffsetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/OffsetSuite.scala
@@ -23,7 +23,7 @@ import org.apache.spark.sql.execution.streaming.{CompositeOffset, LongOffset, Of
trait OffsetSuite extends SparkFunSuite {
/** Creates test to check all the comparisons of offsets given a `one` that is less than `two`. */
def compare(one: Offset, two: Offset): Unit = {
- test(s"comparision $one <=> $two") {
+ test(s"comparison $one <=> $two") {
assert(one < two)
assert(one <= two)
assert(one <= one)