aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorSean Zhong <seanzhong@databricks.com>2016-08-22 13:31:38 -0700
committerYin Huai <yhuai@databricks.com>2016-08-22 13:31:38 -0700
commit929cb8beed9b7014231580cc002853236a5337d6 (patch)
tree36bdbf83517f6c766324c2ca0920915c2bcbecf8 /sql/core
parent6f3cd36f93c11265449fdce3323e139fec8ab22d (diff)
downloadspark-929cb8beed9b7014231580cc002853236a5337d6.tar.gz
spark-929cb8beed9b7014231580cc002853236a5337d6.tar.bz2
spark-929cb8beed9b7014231580cc002853236a5337d6.zip
[MINOR][SQL] Fix some typos in comments and test hints
## What changes were proposed in this pull request? Fix some typos in comments and test hints ## How was this patch tested? N/A. Author: Sean Zhong <seanzhong@databricks.com> Closes #14755 from clockfly/fix_minor_typo.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala6
3 files changed, 7 insertions, 7 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
index eb105bd09a..0d51dc9ff8 100644
--- a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
+++ b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java
@@ -99,7 +99,7 @@ public final class UnsafeKVExternalSorter {
// The array will be used to do in-place sort, which require half of the space to be empty.
assert(map.numKeys() <= map.getArray().size() / 2);
// During spilling, the array in map will not be used, so we can borrow that and use it
- // as the underline array for in-memory sorter (it's always large enough).
+ // as the underlying array for in-memory sorter (it's always large enough).
// Since we will not grow the array, it's fine to pass `null` as consumer.
final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter(
null, taskMemoryManager, recordComparator, prefixComparator, map.getArray(),
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
index 4b8adf5230..4e072a92cc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala
@@ -32,9 +32,9 @@ import org.apache.spark.unsafe.KVIterator
* An iterator used to evaluate aggregate functions. It operates on [[UnsafeRow]]s.
*
* This iterator first uses hash-based aggregation to process input rows. It uses
- * a hash map to store groups and their corresponding aggregation buffers. If we
- * this map cannot allocate memory from memory manager, it spill the map into disk
- * and create a new one. After processed all the input, then merge all the spills
+ * a hash map to store groups and their corresponding aggregation buffers. If
+ * this map cannot allocate memory from memory manager, it spills the map into disk
+ * and creates a new one. After processed all the input, then merge all the spills
* together using external sorter, and do sort-based aggregation.
*
* The process has the following step:
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
index 484e438033..c7af40227d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
@@ -358,11 +358,11 @@ abstract class QueryTest extends PlanTest {
*/
def assertEmptyMissingInput(query: Dataset[_]): Unit = {
assert(query.queryExecution.analyzed.missingInput.isEmpty,
- s"The analyzed logical plan has missing inputs: ${query.queryExecution.analyzed}")
+ s"The analyzed logical plan has missing inputs:\n${query.queryExecution.analyzed}")
assert(query.queryExecution.optimizedPlan.missingInput.isEmpty,
- s"The optimized logical plan has missing inputs: ${query.queryExecution.optimizedPlan}")
+ s"The optimized logical plan has missing inputs:\n${query.queryExecution.optimizedPlan}")
assert(query.queryExecution.executedPlan.missingInput.isEmpty,
- s"The physical plan has missing inputs: ${query.queryExecution.executedPlan}")
+ s"The physical plan has missing inputs:\n${query.queryExecution.executedPlan}")
}
}