aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-06-23 15:51:16 -0700
committerDavies Liu <davies@databricks.com>2015-06-23 15:51:16 -0700
commit7fb5ae5024284593204779ff463bfbdb4d1c6da5 (patch)
treeac48f191ea7cdbf4db05cbc67039b7e3602830cf /python
parentd96d7b55746cf034e3935ec4b22614a99e48c498 (diff)
downloadspark-7fb5ae5024284593204779ff463bfbdb4d1c6da5.tar.gz
spark-7fb5ae5024284593204779ff463bfbdb4d1c6da5.tar.bz2
spark-7fb5ae5024284593204779ff463bfbdb4d1c6da5.zip
[SPARK-8573] [SPARK-8568] [SQL] [PYSPARK] raise Exception if column is used in booelan expression
It's a common mistake that user will put Column in a boolean expression (together with `and` , `or`), which does not work as expected, we should raise a exception in that case, and suggest user to use `&`, `|` instead. Author: Davies Liu <davies@databricks.com> Closes #6961 from davies/column_bool and squashes the following commits: 9f19beb [Davies Liu] update message af74bd6 [Davies Liu] fix tests 07dff84 [Davies Liu] address comments, fix tests f70c08e [Davies Liu] raise Exception if column is used in booelan expression
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql/column.py5
-rw-r--r--python/pyspark/sql/tests.py10
2 files changed, 14 insertions, 1 deletions
diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index 1ecec5b126..0a85da7443 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -396,6 +396,11 @@ class Column(object):
jc = self._jc.over(window._jspec)
return Column(jc)
+ def __nonzero__(self):
+ raise ValueError("Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
+ "'~' for 'not' when building DataFrame boolean expressions.")
+ __bool__ = __nonzero__
+
def __repr__(self):
return 'Column<%s>' % self._jc.toString().encode('utf8')
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 13f4556943..e6a434e4b2 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -164,6 +164,14 @@ class SQLTests(ReusedPySparkTestCase):
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
+ def test_and_in_expression(self):
+ self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
+ self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
+ self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
+ self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
+ self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
+ self.assertRaises(ValueError, lambda: not self.df.key == 1)
+
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
@@ -408,7 +416,7 @@ class SQLTests(ReusedPySparkTestCase):
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
- cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7, ci and cs, ci or cs]
+ cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))