From 941b270b706d3b4aea73dbf102cfb6eee0beff63 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Thu, 3 Mar 2016 22:42:12 +0000 Subject: [MINOR] Fix typos in comments and testcase name of code ## What changes were proposed in this pull request? This PR fixes typos in comments and testcase name of code. ## How was this patch tested? manual. Author: Dongjoon Hyun Closes #11481 from dongjoon-hyun/minor_fix_typos_in_code. --- .../java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java | 2 +- examples/src/main/python/mllib/naive_bayes_example.py | 2 +- examples/src/main/python/mllib/ranking_metrics_example.py | 2 +- examples/src/main/python/mllib/word2vec.py | 2 +- examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala | 2 +- examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala | 2 +- examples/src/main/scala/org/apache/spark/examples/SparkLR.scala | 2 +- .../src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala | 4 ++-- .../org/apache/spark/examples/streaming/TwitterPopularTags.scala | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) (limited to 'examples/src') diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java index b9dd3ad957..da2012ad51 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaDeveloperApiExample.java @@ -228,7 +228,7 @@ class MyJavaLogisticRegressionModel * Create a copy of the model. * The copy is shallow, except for the embedded paramMap, which gets a deep copy. *

- * This is used for the defaul implementation of [[transform()]]. + * This is used for the default implementation of [[transform()]]. * * In Java, we have to make this method public since Java does not understand Scala's protected * modifier. diff --git a/examples/src/main/python/mllib/naive_bayes_example.py b/examples/src/main/python/mllib/naive_bayes_example.py index e7d5893d67..35724f7d6a 100644 --- a/examples/src/main/python/mllib/naive_bayes_example.py +++ b/examples/src/main/python/mllib/naive_bayes_example.py @@ -47,7 +47,7 @@ if __name__ == "__main__": # $example on$ data = sc.textFile('data/mllib/sample_naive_bayes_data.txt').map(parseLine) - # Split data aproximately into training (60%) and test (40%) + # Split data approximately into training (60%) and test (40%) training, test = data.randomSplit([0.6, 0.4], seed=0) # Train a naive Bayes model. diff --git a/examples/src/main/python/mllib/ranking_metrics_example.py b/examples/src/main/python/mllib/ranking_metrics_example.py index 327791966c..21333deded 100644 --- a/examples/src/main/python/mllib/ranking_metrics_example.py +++ b/examples/src/main/python/mllib/ranking_metrics_example.py @@ -47,7 +47,7 @@ if __name__ == "__main__": # Instantiate regression metrics to compare predicted and actual ratings metrics = RegressionMetrics(scoreAndLabels) - # Root mean sqaured error + # Root mean squared error print("RMSE = %s" % metrics.rootMeanSquaredError) # R-squared diff --git a/examples/src/main/python/mllib/word2vec.py b/examples/src/main/python/mllib/word2vec.py index 40d1b88792..4e7d4f7610 100644 --- a/examples/src/main/python/mllib/word2vec.py +++ b/examples/src/main/python/mllib/word2vec.py @@ -16,7 +16,7 @@ # # This example uses text8 file from http://mattmahoney.net/dc/text8.zip -# The file was downloadded, unziped and split into multiple lines using +# The file was downloaded, unzipped and split into multiple lines using # # wget http://mattmahoney.net/dc/text8.zip # unzip text8.zip diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala index a3901850f2..f2e4c96fa5 100644 --- a/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala +++ b/examples/src/main/scala/org/apache/spark/examples/LocalFileLR.scala @@ -30,7 +30,7 @@ import breeze.linalg.{DenseVector, Vector} * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs. */ object LocalFileLR { - val D = 10 // Numer of dimensions + val D = 10 // Number of dimensions val rand = new Random(42) case class DataPoint(x: Vector[Double], y: Double) diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala index e4486b949f..f7eb9e9936 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala @@ -35,7 +35,7 @@ import org.apache.spark._ * org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS based on your needs. */ object SparkHdfsLR { - val D = 10 // Numer of dimensions + val D = 10 // Number of dimensions val rand = new Random(42) case class DataPoint(x: Vector[Double], y: Double) diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala index 132800e6e4..036e3d24c9 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkLR.scala @@ -36,7 +36,7 @@ import org.apache.spark._ */ object SparkLR { val N = 10000 // Number of data points - val D = 10 // Numer of dimensions + val D = 10 // Number of dimensions val R = 0.7 // Scaling factor val ITERATIONS = 5 val rand = new Random(42) diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala index 620ff07631..94b67cb29b 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala @@ -60,10 +60,10 @@ object RDDRelation { // Write out an RDD as a parquet file with overwrite mode. df.write.mode(SaveMode.Overwrite).parquet("pair.parquet") - // Read in parquet file. Parquet files are self-describing so the schmema is preserved. + // Read in parquet file. Parquet files are self-describing so the schema is preserved. val parquetFile = sqlContext.read.parquet("pair.parquet") - // Queries can be run using the DSL on parequet files just like the original RDD. + // Queries can be run using the DSL on parquet files just like the original RDD. parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println) // These files can also be registered as tables. diff --git a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala index c386e39d52..5b69963cc8 100644 --- a/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala +++ b/examples/src/main/scala/org/apache/spark/examples/streaming/TwitterPopularTags.scala @@ -44,7 +44,7 @@ object TwitterPopularTags { val filters = args.takeRight(args.length - 4) // Set the system properties so that Twitter4j library used by twitter stream - // can use them to generat OAuth credentials + // can use them to generate OAuth credentials System.setProperty("twitter4j.oauth.consumerKey", consumerKey) System.setProperty("twitter4j.oauth.consumerSecret", consumerSecret) System.setProperty("twitter4j.oauth.accessToken", accessToken) -- cgit v1.2.3