aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2014-09-04 15:06:08 -0700
committerMichael Armbrust <michael@databricks.com>2014-09-04 15:06:08 -0700
commitdc1ba9e9fc169962a9282ea6644dce09281ff598 (patch)
treede2535f9752894949ee890404ac6787c6e7f7e16
parent4feb46c5feca8d48ec340dc9c8d0eccbcd41f505 (diff)
downloadspark-dc1ba9e9fc169962a9282ea6644dce09281ff598.tar.gz
spark-dc1ba9e9fc169962a9282ea6644dce09281ff598.tar.bz2
spark-dc1ba9e9fc169962a9282ea6644dce09281ff598.zip
[SPARK-3378] [DOCS] Replace the word "SparkSQL" with right word "Spark SQL"
Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #2251 from sarutak/SPARK-3378 and squashes the following commits: 0bfe234 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into SPARK-3378 bb5938f [Kousuke Saruta] Replaced rest of "SparkSQL" with "Spark SQL" 6df66de [Kousuke Saruta] Replaced "SparkSQL" with "Spark SQL"
-rwxr-xr-xdev/run-tests2
-rw-r--r--docs/programming-guide.md2
-rw-r--r--python/pyspark/sql.py6
-rwxr-xr-xpython/run-tests2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala2
6 files changed, 8 insertions, 8 deletions
diff --git a/dev/run-tests b/dev/run-tests
index d751961605..90a8ce16f0 100755
--- a/dev/run-tests
+++ b/dev/run-tests
@@ -89,7 +89,7 @@ echo "========================================================================="
echo "Running Spark unit tests"
echo "========================================================================="
-# Build Spark; we always build with Hive because the PySpark SparkSQL tests need it.
+# Build Spark; we always build with Hive because the PySpark Spark SQL tests need it.
# echo "q" is needed because sbt on encountering a build file with failure
# (either resolution or compilation) prompts the user for input either q, r,
# etc to quit or retry. This echo is there to make it not block.
diff --git a/docs/programming-guide.md b/docs/programming-guide.md
index 6ae780d940..624cc744df 100644
--- a/docs/programming-guide.md
+++ b/docs/programming-guide.md
@@ -385,7 +385,7 @@ Apart from text files, Spark's Python API also supports several other data forma
* SequenceFile and Hadoop Input/Output Formats
-**Note** this feature is currently marked ```Experimental``` and is intended for advanced users. It may be replaced in future with read/write support based on SparkSQL, in which case SparkSQL is the preferred approach.
+**Note** this feature is currently marked ```Experimental``` and is intended for advanced users. It may be replaced in future with read/write support based on Spark SQL, in which case Spark SQL is the preferred approach.
**Writable Support**
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index aaa35dadc2..e7f573cf6d 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -900,7 +900,7 @@ def _create_cls(dataType):
class SQLContext:
- """Main entry point for SparkSQL functionality.
+ """Main entry point for Spark SQL functionality.
A SQLContext can be used create L{SchemaRDD}s, register L{SchemaRDD}s as
tables, execute SQL over tables, cache tables, and read parquet files.
@@ -946,7 +946,7 @@ class SQLContext:
@property
def _ssql_ctx(self):
- """Accessor for the JVM SparkSQL context.
+ """Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
@@ -1507,7 +1507,7 @@ class SchemaRDD(RDD):
"""An RDD of L{Row} objects that has an associated schema.
The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
- utilize the relational query api exposed by SparkSQL.
+ utilize the relational query api exposed by Spark SQL.
For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
L{SchemaRDD} is not operated on directly, as it's underlying
diff --git a/python/run-tests b/python/run-tests
index d671da4003..f2a80b4f18 100755
--- a/python/run-tests
+++ b/python/run-tests
@@ -28,7 +28,7 @@ FAILED=0
rm -f unit-tests.log
-# Remove the metastore and warehouse directory created by the HiveContext tests in SparkSQL
+# Remove the metastore and warehouse directory created by the HiveContext tests in Spark SQL
rm -rf metastore warehouse
function run_test() {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala b/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
index 6c67934bda..e9d04ce7aa 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/Row.scala
@@ -25,7 +25,7 @@ import scala.math.BigDecimal
import org.apache.spark.sql.catalyst.expressions.{Row => ScalaRow}
/**
- * A result row from a SparkSQL query.
+ * A result row from a Spark SQL query.
*/
class Row(private[spark] val row: ScalaRow) extends Serializable {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
index 544abfc324..abed299cd9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/parquet/FakeParquetSerDe.scala
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector
import org.apache.hadoop.io.Writable
/**
- * A placeholder that allows SparkSQL users to create metastore tables that are stored as
+ * A placeholder that allows Spark SQL users to create metastore tables that are stored as
* parquet files. It is only intended to pass the checks that the serde is valid and exists
* when a CREATE TABLE is run. The actual work of decoding will be done by ParquetTableScan
* when "spark.sql.hive.convertMetastoreParquet" is set to true.