aboutsummaryrefslogtreecommitdiff
path: root/python
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2015-01-29 16:23:20 -0800
committerReynold Xin <rxin@databricks.com>2015-01-29 16:23:20 -0800
commit0bb15f22d1694d3ac0476eb14142b1b1cc781690 (patch)
tree4a2fbac49ea829e1005632645aeaa35c80d4e9bd /python
parentc00d517d660ddc3c7b4302651e5567534a819905 (diff)
downloadspark-0bb15f22d1694d3ac0476eb14142b1b1cc781690.tar.gz
spark-0bb15f22d1694d3ac0476eb14142b1b1cc781690.tar.bz2
spark-0bb15f22d1694d3ac0476eb14142b1b1cc781690.zip
[SPARK-5464] Fix help() for Python DataFrame instances
This fixes an exception that prevented users from calling `help()` on Python DataFrame instances. Author: Josh Rosen <joshrosen@databricks.com> Closes #4278 from JoshRosen/SPARK-5464-python-dataframe-help-command and squashes the following commits: 08f95f7 [Josh Rosen] Fix exception when calling help() on Python DataFrame instances
Diffstat (limited to 'python')
-rw-r--r--python/pyspark/sql.py6
-rw-r--r--python/pyspark/tests.py10
2 files changed, 13 insertions, 3 deletions
diff --git a/python/pyspark/sql.py b/python/pyspark/sql.py
index e636f992ec..3f2d7ac825 100644
--- a/python/pyspark/sql.py
+++ b/python/pyspark/sql.py
@@ -2136,9 +2136,9 @@ class DataFrame(object):
def __getattr__(self, name):
""" Return the column by given name """
- if isinstance(name, basestring):
- return Column(self._jdf.apply(name))
- raise AttributeError
+ if name.startswith("__"):
+ raise AttributeError(name)
+ return Column(self._jdf.apply(name))
def alias(self, name):
""" Alias the current DataFrame """
diff --git a/python/pyspark/tests.py b/python/pyspark/tests.py
index 081a77fbb0..bec1961f26 100644
--- a/python/pyspark/tests.py
+++ b/python/pyspark/tests.py
@@ -23,6 +23,7 @@ from array import array
from fileinput import input
from glob import glob
import os
+import pydoc
import re
import shutil
import subprocess
@@ -1032,6 +1033,15 @@ class SQLTests(ReusedPySparkTestCase):
from pyspark.sql import Aggregator as Agg
# self.assertEqual((0, '100'), tuple(g.agg(Agg.first(df.key), Agg.last(df.value)).first()))
+ def test_help_command(self):
+ # Regression test for SPARK-5464
+ rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
+ df = self.sqlCtx.jsonRDD(rdd)
+ # render_doc() reproduces the help() exception without printing output
+ pydoc.render_doc(df)
+ pydoc.render_doc(df.foo)
+ pydoc.render_doc(df.take(1))
+
class InputFormatTests(ReusedPySparkTestCase):