aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala14
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala14
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystRowConverter.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ExchangeCoordinator.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/r/MapPartitionsRWrapper.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/streaming/ContinuousQuery.scala2
17 files changed, 33 insertions, 33 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 88fa5cd21d..b248583d79 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -314,7 +314,7 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
- * Loads an `JavaRDD[String]` storing JSON objects (one object per record) and
+ * Loads a `JavaRDD[String]` storing JSON objects (one object per record) and
* returns the result as a [[DataFrame]].
*
* Unless the schema is specified using [[schema]] function, this function goes through the
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index 2e14c5d486..0fb2400d1b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -370,7 +370,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: DeveloperApi ::
- * Creates a [[DataFrame]] from an [[JavaRDD]] containing [[Row]]s using the given schema.
+ * Creates a [[DataFrame]] from a [[JavaRDD]] containing [[Row]]s using the given schema.
* It is important to make sure that the structure of every [[Row]] of the provided RDD matches
* the provided schema. Otherwise, there will be runtime exception.
*
@@ -384,7 +384,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: DeveloperApi ::
- * Creates a [[DataFrame]] from an [[java.util.List]] containing [[Row]]s using the given schema.
+ * Creates a [[DataFrame]] from a [[java.util.List]] containing [[Row]]s using the given schema.
* It is important to make sure that the structure of every [[Row]] of the provided List matches
* the provided schema. Otherwise, there will be runtime exception.
*
@@ -421,7 +421,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
}
/**
- * Applies a schema to an List of Java Beans.
+ * Applies a schema to a List of Java Beans.
*
* WARNING: Since there is no guaranteed ordering for fields in a Java Bean,
* SELECT * queries will return the columns in an undefined order.
@@ -552,7 +552,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from 0 to `end` (exclusive) with step value 1.
+ * in a range from 0 to `end` (exclusive) with step value 1.
*
* @since 2.0.0
* @group dataset
@@ -563,7 +563,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with step value 1.
+ * in a range from `start` to `end` (exclusive) with step value 1.
*
* @since 2.0.0
* @group dataset
@@ -574,7 +574,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with an step value.
+ * in a range from `start` to `end` (exclusive) with a step value.
*
* @since 2.0.0
* @group dataset
@@ -587,7 +587,7 @@ class SQLContext private[sql](val sparkSession: SparkSession)
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with an step value, with partition number
+ * in a range from `start` to `end` (exclusive) with a step value, with partition number
* specified.
*
* @since 2.0.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index b7ea2a8917..440952572d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -33,7 +33,7 @@ abstract class SQLImplicits {
protected def _sqlContext: SQLContext
/**
- * Converts $"col name" into an [[Column]].
+ * Converts $"col name" into a [[Column]].
*
* @since 2.0.0
*/
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
index 7d7fd0399d..f5b16d07ad 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SparkSession.scala
@@ -305,7 +305,7 @@ class SparkSession private(
/**
* :: DeveloperApi ::
- * Creates a [[DataFrame]] from an [[JavaRDD]] containing [[Row]]s using the given schema.
+ * Creates a [[DataFrame]] from a [[JavaRDD]] containing [[Row]]s using the given schema.
* It is important to make sure that the structure of every [[Row]] of the provided RDD matches
* the provided schema. Otherwise, there will be runtime exception.
*
@@ -319,7 +319,7 @@ class SparkSession private(
/**
* :: DeveloperApi ::
- * Creates a [[DataFrame]] from an [[java.util.List]] containing [[Row]]s using the given schema.
+ * Creates a [[DataFrame]] from a [[java.util.List]] containing [[Row]]s using the given schema.
* It is important to make sure that the structure of every [[Row]] of the provided List matches
* the provided schema. Otherwise, there will be runtime exception.
*
@@ -365,7 +365,7 @@ class SparkSession private(
}
/**
- * Applies a schema to an List of Java Beans.
+ * Applies a schema to a List of Java Beans.
*
* WARNING: Since there is no guaranteed ordering for fields in a Java Bean,
* SELECT * queries will return the columns in an undefined order.
@@ -475,7 +475,7 @@ class SparkSession private(
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from 0 to `end` (exclusive) with step value 1.
+ * in a range from 0 to `end` (exclusive) with step value 1.
*
* @since 2.0.0
* @group dataset
@@ -486,7 +486,7 @@ class SparkSession private(
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with step value 1.
+ * in a range from `start` to `end` (exclusive) with step value 1.
*
* @since 2.0.0
* @group dataset
@@ -499,7 +499,7 @@ class SparkSession private(
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with an step value.
+ * in a range from `start` to `end` (exclusive) with a step value.
*
* @since 2.0.0
* @group dataset
@@ -512,7 +512,7 @@ class SparkSession private(
/**
* :: Experimental ::
* Creates a [[Dataset]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end` (exclusive) with an step value, with partition number
+ * in a range from `start` to `end` (exclusive) with a step value, with partition number
* specified.
*
* @since 2.0.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala
index 9dc367920e..a8cc72f2e7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala
@@ -274,7 +274,7 @@ class SQLBuilder(logicalPlan: LogicalPlan) extends Logging {
// 5. the table alias for output columns of generator.
// 6. the AS keyword
// 7. the column alias, can be more than one, e.g. AS key, value
- // An concrete example: "tbl LATERAL VIEW EXPLODE(map_col) sub_q AS key, value", and the builder
+ // A concrete example: "tbl LATERAL VIEW EXPLODE(map_col) sub_q AS key, value", and the builder
// will put it in FROM clause later.
build(
childSQL,
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala
index f392b135ce..3f7f849885 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/SortBasedAggregationIterator.scala
@@ -86,7 +86,7 @@ class SortBasedAggregationIterator(
// The aggregation buffer used by the sort-based aggregation.
private[this] val sortBasedAggregationBuffer: MutableRow = newBuffer
- // An SafeProjection to turn UnsafeRow into GenericInternalRow, because UnsafeRow can't be
+ // A SafeProjection to turn UnsafeRow into GenericInternalRow, because UnsafeRow can't be
// compared to MutableRow (aggregation buffer) directly.
private[this] val safeProj: Projection = FromUnsafeProjection(valueAttributes.map(_.dataType))
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala
index 4ceb710f4b..b047bc0641 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/udaf.scala
@@ -202,7 +202,7 @@ sealed trait BufferSetterGetterUtils {
}
/**
- * A Mutable [[Row]] representing an mutable aggregation buffer.
+ * A Mutable [[Row]] representing a mutable aggregation buffer.
*/
private[sql] class MutableAggregationBufferImpl (
schema: StructType,
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
index 1041bab9d5..7a14879b8b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/GenerateColumnAccessor.scala
@@ -58,7 +58,7 @@ class MutableUnsafeRow(val writer: UnsafeRowWriter) extends GenericMutableRow(nu
}
/**
- * Generates bytecode for an [[ColumnarIterator]] for columnar cache.
+ * Generates bytecode for a [[ColumnarIterator]] for columnar cache.
*/
object GenerateColumnAccessor extends CodeGenerator[Seq[DataType], ColumnarIterator] with Logging {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
index 350508c1d9..7503285ee2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
@@ -41,7 +41,7 @@ import org.apache.spark.sql.execution.SparkPlan
* is only done on top level columns, but formats should support pruning of nested columns as
* well.
* - Construct a reader function by passing filters and the schema into the FileFormat.
- * - Using an partition pruning predicates, enumerate the list of files that should be read.
+ * - Using a partition pruning predicates, enumerate the list of files that should be read.
* - Split the files into tasks and construct a FileScanRDD.
* - Add any projection or filters that must be evaluated after the scan.
*
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala
index aeee2600a1..733fcbfea1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonParser.scala
@@ -50,7 +50,7 @@ object JacksonParser extends Logging {
/**
* Parse the current token (and related children) according to a desired schema
- * This is an wrapper for the method `convertField()` to handle a row wrapped
+ * This is a wrapper for the method `convertField()` to handle a row wrapped
* with an array.
*/
def convertRootField(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystRowConverter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystRowConverter.scala
index 6bf82bee67..85b0bc17ed 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystRowConverter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystRowConverter.scala
@@ -68,7 +68,7 @@ private[parquet] trait HasParentContainerUpdater {
}
/**
- * A convenient converter class for Parquet group types with an [[HasParentContainerUpdater]].
+ * A convenient converter class for Parquet group types with a [[HasParentContainerUpdater]].
*/
private[parquet] abstract class CatalystGroupConverter(val updater: ParentContainerUpdater)
extends GroupConverter with HasParentContainerUpdater
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ExchangeCoordinator.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ExchangeCoordinator.scala
index fb60d68f98..2ea6ee38a9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ExchangeCoordinator.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ExchangeCoordinator.scala
@@ -47,10 +47,10 @@ import org.apache.spark.sql.execution.{ShuffledRowRDD, SparkPlan}
* partitions.
*
* The workflow of this coordinator is described as follows:
- * - Before the execution of a [[SparkPlan]], for an [[ShuffleExchange]] operator,
+ * - Before the execution of a [[SparkPlan]], for a [[ShuffleExchange]] operator,
* if an [[ExchangeCoordinator]] is assigned to it, it registers itself to this coordinator.
* This happens in the `doPrepare` method.
- * - Once we start to execute a physical plan, an [[ShuffleExchange]] registered to this
+ * - Once we start to execute a physical plan, a [[ShuffleExchange]] registered to this
* coordinator will call `postShuffleRDD` to get its corresponding post-shuffle
* [[ShuffledRowRDD]].
* If this coordinator has made the decision on how to shuffle data, this [[ShuffleExchange]]
@@ -61,7 +61,7 @@ import org.apache.spark.sql.execution.{ShuffledRowRDD, SparkPlan}
* post-shuffle partitions and pack multiple pre-shuffle partitions with continuous indices
* to a single post-shuffle partition whenever necessary.
* - Finally, this coordinator will create post-shuffle [[ShuffledRowRDD]]s for all registered
- * [[ShuffleExchange]]s. So, when an [[ShuffleExchange]] calls `postShuffleRDD`, this coordinator
+ * [[ShuffleExchange]]s. So, when a [[ShuffleExchange]] calls `postShuffleRDD`, this coordinator
* can lookup the corresponding [[RDD]].
*
* The strategy used to determine the number of post-shuffle partitions is described as follows.
@@ -98,8 +98,8 @@ private[sql] class ExchangeCoordinator(
@volatile private[this] var estimated: Boolean = false
/**
- * Registers an [[ShuffleExchange]] operator to this coordinator. This method is only allowed to
- * be called in the `doPrepare` method of an [[ShuffleExchange]] operator.
+ * Registers a [[ShuffleExchange]] operator to this coordinator. This method is only allowed to
+ * be called in the `doPrepare` method of a [[ShuffleExchange]] operator.
*/
@GuardedBy("this")
def registerExchange(exchange: ShuffleExchange): Unit = synchronized {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala
index f0efa52c3d..32f0bc5bf9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala
@@ -30,7 +30,7 @@ import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.util.collection.BitSet
/**
- * Performs an sort merge join of two child relations.
+ * Performs a sort merge join of two child relations.
*/
case class SortMergeJoinExec(
leftKeys: Seq[Expression],
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/r/MapPartitionsRWrapper.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/r/MapPartitionsRWrapper.scala
index dc6f2ef371..6c76328c74 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/r/MapPartitionsRWrapper.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/r/MapPartitionsRWrapper.scala
@@ -40,7 +40,7 @@ private[sql] case class MapPartitionsRWrapper(
val (newIter, deserializer, colNames) =
if (!isSerializedRData) {
- // Serialize each row into an byte array that can be deserialized in the R worker
+ // Serialize each row into a byte array that can be deserialized in the R worker
(iter.asInstanceOf[Iterator[Row]].map {row => rowToRBytes(row)},
SerializationFormats.ROW, inputSchema.fieldNames)
} else {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala b/sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala
index 48925910ac..eac658c617 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/expressions/udaf.scala
@@ -133,7 +133,7 @@ abstract class UserDefinedAggregateFunction extends Serializable {
/**
* :: Experimental ::
- * A [[Row]] representing an mutable aggregation buffer.
+ * A [[Row]] representing a mutable aggregation buffer.
*
* This is not meant to be extended outside of Spark.
*/
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
index 9f6137d6e3..0d6f98416b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
@@ -52,7 +52,7 @@ private[sql] class SharedState(val sparkContext: SparkContext) {
org.apache.spark.util.Utils.getContextOrSparkClassLoader)
/**
- * Create a SQLListener then add it into SparkContext, and create an SQLTab if there is SparkUI.
+ * Create a SQLListener then add it into SparkContext, and create a SQLTab if there is SparkUI.
*/
private def createListenerAndUI(sc: SparkContext): SQLListener = {
if (SparkSession.sqlListener.get() == null) {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ContinuousQuery.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ContinuousQuery.scala
index 451cfd85e3..3bbb0b8a88 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ContinuousQuery.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ContinuousQuery.scala
@@ -93,7 +93,7 @@ trait ContinuousQuery {
def awaitTermination(timeoutMs: Long): Boolean
/**
- * Blocks until all available data in the source has been processed an committed to the sink.
+ * Blocks until all available data in the source has been processed and committed to the sink.
* This method is intended for testing. Note that in the case of continually arriving data, this
* method may block forever. Additionally, this method is only guaranteed to block until data that
* has been synchronously appended data to a [[org.apache.spark.sql.execution.streaming.Source]]