aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorJacek Laskowski <jacek@japila.pl>2016-04-02 08:12:04 -0700
committerSean Owen <sowen@cloudera.com>2016-04-02 08:12:04 -0700
commit06694f1c68cb752ea311144f0dbe50e92e1393cf (patch)
tree3e4de2ebd92f4909b3245f6686a400aefe56eae1 /sql
parent67d753516da9b6318cd4001bb7ae91703aaf098d (diff)
downloadspark-06694f1c68cb752ea311144f0dbe50e92e1393cf.tar.gz
spark-06694f1c68cb752ea311144f0dbe50e92e1393cf.tar.bz2
spark-06694f1c68cb752ea311144f0dbe50e92e1393cf.zip
[MINOR] Typo fixes
## What changes were proposed in this pull request? Typo fixes. No functional changes. ## How was this patch tested? Built the sources and ran with samples. Author: Jacek Laskowski <jacek@japila.pl> Closes #11802 from jaceklaskowski/typo-fixes.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala12
4 files changed, 10 insertions, 10 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
index ecf4285c46..aceeb8aadc 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/LogicalPlan.scala
@@ -79,13 +79,13 @@ abstract class LogicalPlan extends QueryPlan[LogicalPlan] with Logging {
/**
* Computes [[Statistics]] for this plan. The default implementation assumes the output
- * cardinality is the product of of all child plan's cardinality, i.e. applies in the case
+ * cardinality is the product of all child plan's cardinality, i.e. applies in the case
* of cartesian joins.
*
* [[LeafNode]]s must override this.
*/
def statistics: Statistics = {
- if (children.size == 0) {
+ if (children.isEmpty) {
throw new UnsupportedOperationException(s"LeafNode $nodeName must implement statistics.")
}
Statistics(sizeInBytes = children.map(_.statistics.sizeInBytes).product)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
index d7cd84fd24..c5df028485 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
@@ -37,7 +37,7 @@ class ExperimentalMethods private[sql]() {
/**
* Allows extra strategies to be injected into the query planner at runtime. Note this API
- * should be consider experimental and is not intended to be stable across releases.
+ * should be considered experimental and is not intended to be stable across releases.
*
* @since 1.3.0
*/
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala
index f5b083c216..0ed1ed41b0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala
@@ -32,7 +32,7 @@ import org.apache.spark.util.collection.CompactBuffer
/**
* Performs an inner hash join of two child relations. When the output RDD of this operator is
* being constructed, a Spark job is asynchronously started to calculate the values for the
- * broadcasted relation. This data is then placed in a Spark broadcast variable. The streamed
+ * broadcast relation. This data is then placed in a Spark broadcast variable. The streamed
* relation is not shuffled.
*/
case class BroadcastHashJoin(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 74906050ac..baf947d037 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -2232,7 +2232,7 @@ object functions {
/**
* Splits str around pattern (pattern is a regular expression).
- * NOTE: pattern is a string represent the regular expression.
+ * NOTE: pattern is a string representation of the regular expression.
*
* @group string_funcs
* @since 1.5.0
@@ -2267,9 +2267,9 @@ object functions {
/**
* Translate any character in the src by a character in replaceString.
- * The characters in replaceString is corresponding to the characters in matchingString.
- * The translate will happen when any character in the string matching with the character
- * in the matchingString.
+ * The characters in replaceString correspond to the characters in matchingString.
+ * The translate will happen when any character in the string matches the character
+ * in the `matchingString`.
*
* @group string_funcs
* @since 1.5.0
@@ -2692,7 +2692,7 @@ object functions {
//////////////////////////////////////////////////////////////////////////////////////////////
/**
- * Returns true if the array contain the value
+ * Returns true if the array contains `value`
* @group collection_funcs
* @since 1.5.0
*/
@@ -2920,7 +2920,7 @@ object functions {
/**
* Defines a user-defined function (UDF) using a Scala closure. For this variant, the caller must
- * specifcy the output data type, and there is no automatic input type coercion.
+ * specify the output data type, and there is no automatic input type coercion.
*
* @param f A closure in Scala
* @param dataType The output data type of the UDF