aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/python
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-04-08 13:31:45 -0700
committerReynold Xin <rxin@databricks.com>2015-04-08 13:31:45 -0700
commit6ada4f6f52cf1d992c7ab0c32318790cf08b0a0d (patch)
tree495c9bb86bb98de40365538bebcf9144547d8cce /examples/src/main/python
parent66159c35010af35098dd1ec75475bb5d4d0fd6ca (diff)
downloadspark-6ada4f6f52cf1d992c7ab0c32318790cf08b0a0d.tar.gz
spark-6ada4f6f52cf1d992c7ab0c32318790cf08b0a0d.tar.bz2
spark-6ada4f6f52cf1d992c7ab0c32318790cf08b0a0d.zip
[SPARK-6781] [SQL] use sqlContext in python shell
Use `sqlContext` in PySpark shell, make it consistent with SQL programming guide. `sqlCtx` is also kept for compatibility. Author: Davies Liu <davies@databricks.com> Closes #5425 from davies/sqlCtx and squashes the following commits: af67340 [Davies Liu] sqlCtx -> sqlContext 15a278f [Davies Liu] use sqlContext in python shell
Diffstat (limited to 'examples/src/main/python')
-rw-r--r--examples/src/main/python/ml/simple_text_classification_pipeline.py2
-rw-r--r--examples/src/main/python/mllib/dataset_example.py6
2 files changed, 4 insertions, 4 deletions
diff --git a/examples/src/main/python/ml/simple_text_classification_pipeline.py b/examples/src/main/python/ml/simple_text_classification_pipeline.py
index d281f4fa44..c73edb7fd6 100644
--- a/examples/src/main/python/ml/simple_text_classification_pipeline.py
+++ b/examples/src/main/python/ml/simple_text_classification_pipeline.py
@@ -33,7 +33,7 @@ pipeline in Python. Run with:
if __name__ == "__main__":
sc = SparkContext(appName="SimpleTextClassificationPipeline")
- sqlCtx = SQLContext(sc)
+ sqlContext = SQLContext(sc)
# Prepare training documents, which are labeled.
LabeledDocument = Row("id", "text", "label")
diff --git a/examples/src/main/python/mllib/dataset_example.py b/examples/src/main/python/mllib/dataset_example.py
index b5a70db2b9..fcbf56cbf0 100644
--- a/examples/src/main/python/mllib/dataset_example.py
+++ b/examples/src/main/python/mllib/dataset_example.py
@@ -44,19 +44,19 @@ if __name__ == "__main__":
print >> sys.stderr, "Usage: dataset_example.py <libsvm file>"
exit(-1)
sc = SparkContext(appName="DatasetExample")
- sqlCtx = SQLContext(sc)
+ sqlContext = SQLContext(sc)
if len(sys.argv) == 2:
input = sys.argv[1]
else:
input = "data/mllib/sample_libsvm_data.txt"
points = MLUtils.loadLibSVMFile(sc, input)
- dataset0 = sqlCtx.inferSchema(points).setName("dataset0").cache()
+ dataset0 = sqlContext.inferSchema(points).setName("dataset0").cache()
summarize(dataset0)
tempdir = tempfile.NamedTemporaryFile(delete=False).name
os.unlink(tempdir)
print "Save dataset as a Parquet file to %s." % tempdir
dataset0.saveAsParquetFile(tempdir)
print "Load it back and summarize it again."
- dataset1 = sqlCtx.parquetFile(tempdir).setName("dataset1").cache()
+ dataset1 = sqlContext.parquetFile(tempdir).setName("dataset1").cache()
summarize(dataset1)
shutil.rmtree(tempdir)