aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-03-26 00:01:24 -0700
committerReynold Xin <rxin@databricks.com>2015-03-26 00:01:32 -0700
commit0ba759985288f5df6940c37f5f401bc31de53a1c (patch)
treedd309547ba0eeb0498dac156b83e2d1e2da18815 /python/pyspark
parent9edb34fc38147bd150340e006f05d346b4a40f8c (diff)
downloadspark-0ba759985288f5df6940c37f5f401bc31de53a1c.tar.gz
spark-0ba759985288f5df6940c37f5f401bc31de53a1c.tar.bz2
spark-0ba759985288f5df6940c37f5f401bc31de53a1c.zip
[SPARK-6536] [PySpark] Column.inSet() in Python
``` >>> df[df.name.inSet("Bob", "Mike")].collect() [Row(age=5, name=u'Bob')] >>> df[df.age.inSet([1, 2, 3])].collect() [Row(age=2, name=u'Alice')] ``` Author: Davies Liu <davies@databricks.com> Closes #5190 from davies/in and squashes the following commits: 6b73a47 [Davies Liu] Column.inSet() in Python (cherry picked from commit f535802977c5a3ce45894d89fdf59f8723f023c8) Signed-off-by: Reynold Xin <rxin@databricks.com>
Diffstat (limited to 'python/pyspark')
-rw-r--r--python/pyspark/sql/dataframe.py17
1 files changed, 17 insertions, 0 deletions
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index 5cb89da7a8..bf7c47b726 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -985,6 +985,23 @@ class Column(object):
__getslice__ = substr
+ def inSet(self, *cols):
+ """ A boolean expression that is evaluated to true if the value of this
+ expression is contained by the evaluated values of the arguments.
+
+ >>> df[df.name.inSet("Bob", "Mike")].collect()
+ [Row(age=5, name=u'Bob')]
+ >>> df[df.age.inSet([1, 2, 3])].collect()
+ [Row(age=2, name=u'Alice')]
+ """
+ if len(cols) == 1 and isinstance(cols[0], (list, set)):
+ cols = cols[0]
+ cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols]
+ sc = SparkContext._active_spark_context
+ jcols = ListConverter().convert(cols, sc._gateway._gateway_client)
+ jc = getattr(self._jc, "in")(sc._jvm.PythonUtils.toSeq(jcols))
+ return Column(jc)
+
# order
asc = _unary_op("asc", "Returns a sort expression based on the"
" ascending order of the given column name.")