From 8e0b030606927741f91317660cd14a8a5ed6e5f9 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Mon, 14 Mar 2016 19:25:49 -0700 Subject: [SPARK-10380][SQL] Fix confusing documentation examples for astype/drop_duplicates. ## What changes were proposed in this pull request? We have seen users getting confused by the documentation for astype and drop_duplicates, because the examples in them do not use these functions (but do uses their aliases). This patch simply removes all examples for these functions, and say that they are aliases. ## How was this patch tested? Existing PySpark unit tests. Closes #11543. Author: Reynold Xin Closes #11698 from rxin/SPARK-10380. --- python/pyspark/sql/column.py | 4 ++-- python/pyspark/sql/dataframe.py | 20 +++++++++++++++----- 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'python/pyspark/sql') diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py index 3866a49c0b..19ec6fcc5d 100644 --- a/python/pyspark/sql/column.py +++ b/python/pyspark/sql/column.py @@ -22,7 +22,7 @@ if sys.version >= '3': basestring = str long = int -from pyspark import since +from pyspark import copy_func, since from pyspark.context import SparkContext from pyspark.rdd import ignore_unicode_prefix from pyspark.sql.types import * @@ -337,7 +337,7 @@ class Column(object): raise TypeError("unexpected type: %s" % type(dataType)) return Column(jc) - astype = cast + astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.") @since(1.3) def between(self, lowerBound, upperBound): diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index 7008e8fadf..7e1854c43b 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -26,7 +26,7 @@ if sys.version >= '3': else: from itertools import imap as map -from pyspark import since +from pyspark import copy_func, since from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix from pyspark.serializers import BatchedSerializer, PickleSerializer, UTF8Deserializer from pyspark.storagelevel import StorageLevel @@ -829,8 +829,6 @@ class DataFrame(object): raise TypeError("condition should be string or Column") return DataFrame(jdf, self.sql_ctx) - where = filter - @ignore_unicode_prefix @since(1.3) def groupBy(self, *cols): @@ -1361,8 +1359,20 @@ class DataFrame(object): # Pandas compatibility ########################################################################################## - groupby = groupBy - drop_duplicates = dropDuplicates + groupby = copy_func( + groupBy, + sinceversion=1.4, + doc=":func:`groupby` is an alias for :func:`groupBy`.") + + drop_duplicates = copy_func( + dropDuplicates, + sinceversion=1.4, + doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.") + + where = copy_func( + filter, + sinceversion=1.3, + doc=":func:`where` is an alias for :func:`filter`.") def _to_scala_map(sc, jm): -- cgit v1.2.3