aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/types.py
diff options
context:
space:
mode:
Diffstat (limited to 'python/pyspark/sql/types.py')
-rw-r--r--python/pyspark/sql/types.py22
1 files changed, 21 insertions, 1 deletions
diff --git a/python/pyspark/sql/types.py b/python/pyspark/sql/types.py
index f7cd4b80ca..30ab130f29 100644
--- a/python/pyspark/sql/types.py
+++ b/python/pyspark/sql/types.py
@@ -1359,7 +1359,13 @@ def _create_row(fields, values):
class Row(tuple):
"""
- A row in L{DataFrame}. The fields in it can be accessed like attributes.
+ A row in L{DataFrame}.
+ The fields in it can be accessed:
+
+ * like attributes (``row.key``)
+ * like dictionary values (``row[key]``)
+
+ ``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
@@ -1371,6 +1377,10 @@ class Row(tuple):
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
+ >>> 'name' in row
+ True
+ >>> 'wrong_key' in row
+ False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
@@ -1378,6 +1388,10 @@ class Row(tuple):
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
+ >>> 'name' in Person
+ True
+ >>> 'wrong_key' in Person
+ False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
@@ -1431,6 +1445,12 @@ class Row(tuple):
else:
return dict(zip(self.__fields__, self))
+ def __contains__(self, item):
+ if hasattr(self, "__fields__"):
+ return item in self.__fields__
+ else:
+ return super(Row, self).__contains__(item)
+
# let object acts like class
def __call__(self, *args):
"""create new Row object"""