aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark
diff options
context:
space:
mode:
authorKousuke Saruta <sarutak@oss.nttdata.co.jp>2014-09-04 15:06:08 -0700
committerMichael Armbrust <michael@databricks.com>2014-09-04 15:06:08 -0700
commitdc1ba9e9fc169962a9282ea6644dce09281ff598 (patch)
treede2535f9752894949ee890404ac6787c6e7f7e16 /python/pyspark
parent4feb46c5feca8d48ec340dc9c8d0eccbcd41f505 (diff)
downloadspark-dc1ba9e9fc169962a9282ea6644dce09281ff598.tar.gz
spark-dc1ba9e9fc169962a9282ea6644dce09281ff598.tar.bz2
spark-dc1ba9e9fc169962a9282ea6644dce09281ff598.zip
[SPARK-3378] [DOCS] Replace the word "SparkSQL" with right word "Spark SQL"
Author: Kousuke Saruta <sarutak@oss.nttdata.co.jp> Closes #2251 from sarutak/SPARK-3378 and squashes the following commits: 0bfe234 [Kousuke Saruta] Merge branch 'master' of git://git.apache.org/spark into SPARK-3378 bb5938f [Kousuke Saruta] Replaced rest of "SparkSQL" with "Spark SQL" 6df66de [Kousuke Saruta] Replaced "SparkSQL" with "Spark SQL"
Diffstat (limited to 'python/pyspark')
-rw-r--r--python/pyspark/sql.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index aaa35dadc2..e7f573cf6d 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -900,7 +900,7 @@ def _create_cls(dataType):
class SQLContext:
- """Main entry point for SparkSQL functionality.
+ """Main entry point for Spark SQL functionality.
A SQLContext can be used create L{SchemaRDD}s, register L{SchemaRDD}s as
tables, execute SQL over tables, cache tables, and read parquet files.
@@ -946,7 +946,7 @@ class SQLContext:
@property
def _ssql_ctx(self):
- """Accessor for the JVM SparkSQL context.
+ """Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
@@ -1507,7 +1507,7 @@ class SchemaRDD(RDD):
"""An RDD of L{Row} objects that has an associated schema.
The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can
- utilize the relational query api exposed by SparkSQL.
+ utilize the relational query api exposed by Spark SQL.
For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the
L{SchemaRDD} is not operated on directly, as it's underlying