aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/functions.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql/functions.py')
-rw-r--r--python/pyspark/sql/functions.py24
1 files changed, 0 insertions, 24 deletions
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index 25594d79c2..7c15e38458 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -149,12 +149,8 @@ _binary_mathfunctions = {
}
_window_functions = {
- 'rowNumber':
- """.. note:: Deprecated in 1.6, use row_number instead.""",
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
- 'denseRank':
- """.. note:: Deprecated in 1.6, use dense_rank instead.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
@@ -171,13 +167,9 @@ _window_functions = {
place and that the next person came in third.
This is equivalent to the RANK function in SQL.""",
- 'cumeDist':
- """.. note:: Deprecated in 1.6, use cume_dist instead.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
- 'percentRank':
- """.. note:: Deprecated in 1.6, use percent_rank instead.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
@@ -318,14 +310,6 @@ def isnull(col):
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
-@since(1.4)
-def monotonicallyIncreasingId():
- """
- .. note:: Deprecated in 1.6, use monotonically_increasing_id instead.
- """
- return monotonically_increasing_id()
-
-
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
@@ -434,14 +418,6 @@ def shiftRightUnsigned(col, numBits):
return Column(jc)
-@since(1.4)
-def sparkPartitionId():
- """
- .. note:: Deprecated in 1.6, use spark_partition_id instead.
- """
- return spark_partition_id()
-
-
@since(1.6)
def spark_partition_id():
"""A column for partition ID of the Spark task.