diff options
author | Yijie Shen <henry.yijieshen@gmail.com> | 2015-08-05 17:28:23 -0700 |
---|---|---|
committer | Davies Liu <davies.liu@gmail.com> | 2015-08-05 17:28:23 -0700 |
commit | 8c320e45b5c9ffd7f0e35c1c7e6b5fc355377ea6 (patch) | |
tree | b2d46b66383a5a6b263024b055134a1c9a96cf3f /python/pyspark | |
parent | a018b85716fd510ae95a3c66d676bbdb90f8d4e7 (diff) | |
download | spark-8c320e45b5c9ffd7f0e35c1c7e6b5fc355377ea6.tar.gz spark-8c320e45b5c9ffd7f0e35c1c7e6b5fc355377ea6.tar.bz2 spark-8c320e45b5c9ffd7f0e35c1c7e6b5fc355377ea6.zip |
[SPARK-6591] [SQL] Python data source load options should auto convert common types into strings
JIRA: https://issues.apache.org/jira/browse/SPARK-6591
Author: Yijie Shen <henry.yijieshen@gmail.com>
Closes #7926 from yjshen/py_dsload_opt and squashes the following commits:
b207832 [Yijie Shen] fix style
efdf834 [Yijie Shen] resolve comment
7a8f6a2 [Yijie Shen] lowercase
822e769 [Yijie Shen] convert load opts to string
Diffstat (limited to 'python/pyspark')
-rw-r--r-- | python/pyspark/sql/readwriter.py | 17 |
1 files changed, 14 insertions, 3 deletions
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index dea8bad79e..bf6ac084bb 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -24,6 +24,16 @@ from pyspark.sql.types import * __all__ = ["DataFrameReader", "DataFrameWriter"] +def to_str(value): + """ + A wrapper over str(), but convert bool values to lower case string + """ + if isinstance(value, bool): + return str(value).lower() + else: + return str(value) + + class DataFrameReader(object): """ Interface used to load a :class:`DataFrame` from external storage systems @@ -77,7 +87,7 @@ class DataFrameReader(object): def option(self, key, value): """Adds an input option for the underlying data source. """ - self._jreader = self._jreader.option(key, value) + self._jreader = self._jreader.option(key, to_str(value)) return self @since(1.4) @@ -85,7 +95,7 @@ class DataFrameReader(object): """Adds input options for the underlying data source. """ for k in options: - self._jreader = self._jreader.option(k, options[k]) + self._jreader = self._jreader.option(k, to_str(options[k])) return self @since(1.4) @@ -97,7 +107,8 @@ class DataFrameReader(object): :param schema: optional :class:`StructType` for the input schema. :param options: all other string options - >>> df = sqlContext.read.load('python/test_support/sql/parquet_partitioned') + >>> df = sqlContext.read.load('python/test_support/sql/parquet_partitioned', opt1=True, + ... opt2=1, opt3='str') >>> df.dtypes [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')] """ |