aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql')
-rw-r--r--python/pyspark/sql/column.py4
-rw-r--r--python/pyspark/sql/dataframe.py20
2 files changed, 17 insertions, 7 deletions
diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index 3866a49c0b..19ec6fcc5d 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -22,7 +22,7 @@ if sys.version >= '3':
basestring = str
long = int
-from pyspark import since
+from pyspark import copy_func, since
from pyspark.context import SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.types import *
@@ -337,7 +337,7 @@ class Column(object):
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
- astype = cast
+ astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.")
@since(1.3)
def between(self, lowerBound, upperBound):
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 7008e8fadf..7e1854c43b 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -26,7 +26,7 @@ if sys.version >= '3':
else:
from itertools import imap as map
-from pyspark import since
+from pyspark import copy_func, since
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer
from pyspark.storagelevel import StorageLevel
@@ -829,8 +829,6 @@ class DataFrame(object):
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
- where = filter
-
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
@@ -1361,8 +1359,20 @@ class DataFrame(object):
# Pandas compatibility
##########################################################################################
- groupby = groupBy
- drop_duplicates = dropDuplicates
+ groupby = copy_func(
+ groupBy,
+ sinceversion=1.4,
+ doc=":func:`groupby` is an alias for :func:`groupBy`.")
+
+ drop_duplicates = copy_func(
+ dropDuplicates,
+ sinceversion=1.4,
+ doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
+
+ where = copy_func(
+ filter,
+ sinceversion=1.3,
+ doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):