aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--python/docs/Makefile8
-rw-r--r--python/pyspark/ml/regression.py2
-rw-r--r--python/pyspark/sql/context.py2
-rw-r--r--python/pyspark/sql/dataframe.py2
4 files changed, 7 insertions, 7 deletions
diff --git a/python/docs/Makefile b/python/docs/Makefile
index 903009790b..905e0215c2 100644
--- a/python/docs/Makefile
+++ b/python/docs/Makefile
@@ -2,10 +2,10 @@
#
# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = _build
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+PAPER ?=
+BUILDDIR ?= _build
export PYTHONPATH=$(realpath ..):$(realpath ../lib/py4j-0.9.2-src.zip)
diff --git a/python/pyspark/ml/regression.py b/python/pyspark/ml/regression.py
index 316d7e30bc..c064fe500c 100644
--- a/python/pyspark/ml/regression.py
+++ b/python/pyspark/ml/regression.py
@@ -28,7 +28,7 @@ from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
- 'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel'
+ 'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index 4008332c84..11dfcfe13e 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -405,7 +405,7 @@ class SQLContext(object):
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
- Py4JJavaError:...
+ Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index d473d6b534..b4fa836893 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -60,7 +60,7 @@ class DataFrame(object):
people = sqlContext.read.parquet("...")
department = sqlContext.read.parquet("...")
- people.filter(people.age > 30).join(department, people.deptId == department.id)) \
+ people.filter(people.age > 30).join(department, people.deptId == department.id)\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. note:: Experimental