aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorlfzCarlosC <lfz.carlos@gmail.com>2016-05-25 10:53:53 -0700
committerAndrew Or <andrew@databricks.com>2016-05-25 10:53:57 -0700
commit02c8072eea72425e89256347e1f373a3e76e6eba (patch)
tree1e30843ada001df76e3fc472bf7011165ac1e7ae
parentd6d3e50719b01005aa0e77349fc9a6ff88fecce3 (diff)
downloadspark-02c8072eea72425e89256347e1f373a3e76e6eba.tar.gz
spark-02c8072eea72425e89256347e1f373a3e76e6eba.tar.bz2
spark-02c8072eea72425e89256347e1f373a3e76e6eba.zip
[MINOR][MLLIB][STREAMING][SQL] Fix typos
fixed typos for source code for components [mllib] [streaming] and [SQL] None and obvious. Author: lfzCarlosC <lfz.carlos@gmail.com> Closes #13298 from lfzCarlosC/master.
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala2
-rw-r--r--mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala2
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala2
-rw-r--r--sql/hive-thriftserver/if/TCLIService.thrift4
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java2
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java2
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala2
-rw-r--r--streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java2
11 files changed, 12 insertions, 12 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
index 9457c6e9e3..bb4b37ef21 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/IDF.scala
@@ -204,7 +204,7 @@ private object IDFModel {
* Transforms a term frequency (TF) vector to a TF-IDF vector with a IDF vector
*
* @param idf an IDF vector
- * @param v a term frequence vector
+ * @param v a term frequency vector
* @return a TF-IDF vector
*/
def transform(idf: Vector, v: Vector): Vector = {
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
index 9748fbf2c9..c3de5d75f4 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/KolmogorovSmirnovTest.scala
@@ -45,7 +45,7 @@ import org.apache.spark.rdd.RDD
* many elements are in each partition. Once these three values have been returned for every
* partition, we can collect and operate locally. Locally, we can now adjust each distance by the
* appropriate constant (the cumulative sum of number of elements in the prior partitions divided by
- * thedata set size). Finally, we take the maximum absolute value, and this is the statistic.
+ * the data set size). Finally, we take the maximum absolute value, and this is the statistic.
*/
private[stat] object KolmogorovSmirnovTest extends Logging {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
index 0a9250b71f..8b7e21b679 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
@@ -591,7 +591,7 @@ case class Expand(
}
// This operator can reuse attributes (for example making them null when doing a roll up) so
- // the contraints of the child may no longer be valid.
+ // the constraints of the child may no longer be valid.
override protected def validConstraints: Set[Expression] = Set.empty[Expression]
}
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
index e7dccd1b22..3f94255256 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/ColumnVector.java
@@ -504,7 +504,7 @@ public abstract class ColumnVector implements AutoCloseable {
/**
* Returns a utility object to get structs.
- * provided to keep API compabilitity with InternalRow for code generation
+ * provided to keep API compatibility with InternalRow for code generation
*/
public ColumnarBatch.Row getStruct(int rowId, int size) {
resultStruct.rowId = rowId;
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
index f8f8bc7d6f..984b84fd13 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala
@@ -188,7 +188,7 @@ class StateStoreSuite extends SparkFunSuite with BeforeAndAfter with PrivateMeth
provider.getStore(-1)
}
- // Prepare some data in the stoer
+ // Prepare some data in the store
val store = provider.getStore(0)
put(store, "a", 1)
assert(store.commit() === 1)
diff --git a/sql/hive-thriftserver/if/TCLIService.thrift b/sql/hive-thriftserver/if/TCLIService.thrift
index baf583fb3e..7cd6fa37ce 100644
--- a/sql/hive-thriftserver/if/TCLIService.thrift
+++ b/sql/hive-thriftserver/if/TCLIService.thrift
@@ -661,7 +661,7 @@ union TGetInfoValue {
// The function returns general information about the data source
// using the same keys as ODBC.
struct TGetInfoReq {
- // The sesssion to run this request against
+ // The session to run this request against
1: required TSessionHandle sessionHandle
2: required TGetInfoType infoType
@@ -1032,7 +1032,7 @@ enum TFetchOrientation {
FETCH_PRIOR,
// Return the rowset at the given fetch offset relative
- // to the curren rowset.
+ // to the current rowset.
// NOT SUPPORTED
FETCH_RELATIVE,
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
index 16ad9a991e..d1aadad04c 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceStateChangeListener.java
@@ -29,7 +29,7 @@ public interface ServiceStateChangeListener {
* have changed state before this callback is invoked.
*
* This operation is invoked on the thread that initiated the state change,
- * while the service itself in in a sychronized section.
+ * while the service itself in in a synchronized section.
* <ol>
* <li>Any long-lived operation here will prevent the service state
* change from completing in a timely manner.</li>
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
index a35405484a..5014cedd87 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -456,7 +456,7 @@ public class SQLOperation extends ExecuteStatementOperation {
private HiveConf getConfigForOperation() throws HiveSQLException {
HiveConf sqlOperationConf = getParentSession().getHiveConf();
if (!getConfOverlay().isEmpty() || shouldRunAsync()) {
- // clone the partent session config for this query
+ // clone the parent session config for this query
sqlOperationConf = new HiveConf(sqlOperationConf);
// apply overlay query specific settings, if any
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
index 0a10dba8b4..c56a107d42 100644
--- a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
/**
* HiveSessionHookContext.
* Interface passed to the HiveServer2 session hook execution. This enables
- * the hook implementation to accesss session config, user and session handle
+ * the hook implementation to access session config, user and session handle
*/
public interface HiveSessionHookContext {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index 46579ecd85..081d85acb9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -143,7 +143,7 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
* converted to a data source table, using the data source set by spark.sql.sources.default.
* The table in CTAS statement will be converted when it meets any of the following conditions:
* - The CTAS does not specify any of a SerDe (ROW FORMAT SERDE), a File Format (STORED AS), or
- * a Storage Hanlder (STORED BY), and the value of hive.default.fileformat in hive-site.xml
+ * a Storage Handler (STORED BY), and the value of hive.default.fileformat in hive-site.xml
* is either TextFile or SequenceFile.
* - The CTAS statement specifies TextFile (STORED AS TEXTFILE) as the file format and no SerDe
* is specified (no ROW FORMAT SERDE clause).
diff --git a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
index 662889e779..3c5cc7e2ca 100644
--- a/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
+++ b/streaming/src/main/java/org/apache/spark/streaming/util/WriteAheadLogRecordHandle.java
@@ -23,7 +23,7 @@ package org.apache.spark.streaming.util;
* This abstract class represents a handle that refers to a record written in a
* {@link org.apache.spark.streaming.util.WriteAheadLog WriteAheadLog}.
* It must contain all the information necessary for the record to be read and returned by
- * an implemenation of the WriteAheadLog class.
+ * an implementation of the WriteAheadLog class.
*
* @see org.apache.spark.streaming.util.WriteAheadLog
*/