diff options
Diffstat (limited to 'sql/core')
3 files changed, 7 insertions, 7 deletions
diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java index eb105bd09a..0d51dc9ff8 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java @@ -99,7 +99,7 @@ public final class UnsafeKVExternalSorter { // The array will be used to do in-place sort, which require half of the space to be empty. assert(map.numKeys() <= map.getArray().size() / 2); // During spilling, the array in map will not be used, so we can borrow that and use it - // as the underline array for in-memory sorter (it's always large enough). + // as the underlying array for in-memory sorter (it's always large enough). // Since we will not grow the array, it's fine to pass `null` as consumer. final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter( null, taskMemoryManager, recordComparator, prefixComparator, map.getArray(), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala index 4b8adf5230..4e072a92cc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/TungstenAggregationIterator.scala @@ -32,9 +32,9 @@ import org.apache.spark.unsafe.KVIterator * An iterator used to evaluate aggregate functions. It operates on [[UnsafeRow]]s. * * This iterator first uses hash-based aggregation to process input rows. It uses - * a hash map to store groups and their corresponding aggregation buffers. If we - * this map cannot allocate memory from memory manager, it spill the map into disk - * and create a new one. After processed all the input, then merge all the spills + * a hash map to store groups and their corresponding aggregation buffers. If + * this map cannot allocate memory from memory manager, it spills the map into disk + * and creates a new one. After processed all the input, then merge all the spills * together using external sorter, and do sort-based aggregation. * * The process has the following step: diff --git a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala index 484e438033..c7af40227d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala @@ -358,11 +358,11 @@ abstract class QueryTest extends PlanTest { */ def assertEmptyMissingInput(query: Dataset[_]): Unit = { assert(query.queryExecution.analyzed.missingInput.isEmpty, - s"The analyzed logical plan has missing inputs: ${query.queryExecution.analyzed}") + s"The analyzed logical plan has missing inputs:\n${query.queryExecution.analyzed}") assert(query.queryExecution.optimizedPlan.missingInput.isEmpty, - s"The optimized logical plan has missing inputs: ${query.queryExecution.optimizedPlan}") + s"The optimized logical plan has missing inputs:\n${query.queryExecution.optimizedPlan}") assert(query.queryExecution.executedPlan.missingInput.isEmpty, - s"The physical plan has missing inputs: ${query.queryExecution.executedPlan}") + s"The physical plan has missing inputs:\n${query.queryExecution.executedPlan}") } } |