aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorYanbo Liang <ybliang8@gmail.com>2016-02-12 12:43:13 -0800
committerDavies Liu <davies.liu@gmail.com>2016-02-12 12:43:13 -0800
commit90de6b2fae71d05415610be70300625c409f6092 (patch)
tree471eadb784d56770c3a7ee570237b1d4e1a36333 /python
parentac7d6af1cafc6b159d1df6cf349bb0c7ffca01cd (diff)
downloadspark-90de6b2fae71d05415610be70300625c409f6092.tar.gz
spark-90de6b2fae71d05415610be70300625c409f6092.tar.bz2
spark-90de6b2fae71d05415610be70300625c409f6092.zip
[SPARK-12962] [SQL] [PySpark] PySpark support covar_samp and covar_pop
PySpark support ```covar_samp``` and ```covar_pop```. cc rxin davies marmbrus Author: Yanbo Liang <ybliang8@gmail.com> Closes #10876 from yanboliang/spark-12962.
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql/functions.py41
1 files changed, 35 insertions, 6 deletions
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index 680493e0e6..416d722bba 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -250,17 +250,46 @@ def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
- >>> a = [x * x - 2 * x + 3.5 for x in range(20)]
- >>> b = range(20)
- >>> corrDf = sqlContext.createDataFrame(zip(a, b))
- >>> corrDf = corrDf.agg(corr(corrDf._1, corrDf._2).alias('c'))
- >>> corrDf.selectExpr('abs(c - 0.9572339139475857) < 1e-16 as t').collect()
- [Row(t=True)]
+ >>> a = range(20)
+ >>> b = [2 * x for x in range(20)]
+ >>> df = sqlContext.createDataFrame(zip(a, b), ["a", "b"])
+ >>> df.agg(corr("a", "b").alias('c')).collect()
+ [Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
+@since(2.0)
+def covar_pop(col1, col2):
+ """Returns a new :class:`Column` for the population covariance of ``col1``
+ and ``col2``.
+
+ >>> a = [1] * 10
+ >>> b = [1] * 10
+ >>> df = sqlContext.createDataFrame(zip(a, b), ["a", "b"])
+ >>> df.agg(covar_pop("a", "b").alias('c')).collect()
+ [Row(c=0.0)]
+ """
+ sc = SparkContext._active_spark_context
+ return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
+
+
+@since(2.0)
+def covar_samp(col1, col2):
+ """Returns a new :class:`Column` for the sample covariance of ``col1``
+ and ``col2``.
+
+ >>> a = [1] * 10
+ >>> b = [1] * 10
+ >>> df = sqlContext.createDataFrame(zip(a, b), ["a", "b"])
+ >>> df.agg(covar_samp("a", "b").alias('c')).collect()
+ [Row(c=0.0)]
+ """
+ sc = SparkContext._active_spark_context
+ return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
+
+
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.