aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorzero323 <zero323@users.noreply.github.com>2017-02-13 15:23:56 -0800
committerHolden Karau <holden@us.ibm.com>2017-02-13 15:23:56 -0800
commite02ac303c6356cdf7fffec7361311d828a723afe (patch)
treea9546d0c1f69c2325b23ffbf06c9e7b28ae916fd /python
parent0169360ef58891ca10a8d64d1c8637c7b873cbdd (diff)
downloadspark-e02ac303c6356cdf7fffec7361311d828a723afe.tar.gz
spark-e02ac303c6356cdf7fffec7361311d828a723afe.tar.bz2
spark-e02ac303c6356cdf7fffec7361311d828a723afe.zip
[SPARK-19429][PYTHON][SQL] Support slice arguments in Column.__getitem__
## What changes were proposed in this pull request? - Add support for `slice` arguments in `Column.__getitem__`. - Remove obsolete `__getslice__` bindings. ## How was this patch tested? Existing unit tests, additional tests covering `[]` with `slice`. Author: zero323 <zero323@users.noreply.github.com> Closes #16771 from zero323/SPARK-19429.
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql/column.py11
-rw-r--r--python/pyspark/sql/tests.py8
2 files changed, 16 insertions, 3 deletions
diff --git a/python/pyspark/sql/column.py b/python/pyspark/sql/column.py
index ec059d6258..73c8672eff 100644
--- a/python/pyspark/sql/column.py
+++ b/python/pyspark/sql/column.py
@@ -180,7 +180,6 @@ class Column(object):
# container operators
__contains__ = _bin_op("contains")
- __getitem__ = _bin_op("apply")
# bitwise operators
bitwiseOR = _bin_op("bitwiseOR")
@@ -236,6 +235,14 @@ class Column(object):
raise AttributeError(item)
return self.getField(item)
+ def __getitem__(self, k):
+ if isinstance(k, slice):
+ if k.step is not None:
+ raise ValueError("slice with step is not supported.")
+ return self.substr(k.start, k.stop)
+ else:
+ return _bin_op("apply")(self, k)
+
def __iter__(self):
raise TypeError("Column is not iterable")
@@ -267,8 +274,6 @@ class Column(object):
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
- __getslice__ = substr
-
@ignore_unicode_prefix
@since(1.5)
def isin(self, *cols):
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index ab9d3f6c94..d9d03337ff 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -874,6 +874,14 @@ class SQLTests(ReusedPySparkTestCase):
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
+ def test_column_getitem(self):
+ from pyspark.sql.functions import col
+
+ self.assertIsInstance(col("foo")[1:3], Column)
+ self.assertIsInstance(col("foo")[0], Column)
+ self.assertIsInstance(col("foo")["bar"], Column)
+ self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
+
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())