aboutsummaryrefslogtreecommitdiff
path: root/python/pyspark/sql/readwriter.py
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2015-06-22 13:51:23 -0700
committerYin Huai <yhuai@databricks.com>2015-06-22 13:51:23 -0700
commit5ab9fcfb01a0ad2f6c103f67c1a785d3b49e33f0 (patch)
tree150b4f251fdaa2cd07e0350ba2078503238c9e2f /python/pyspark/sql/readwriter.py
parentda7bbb9435dae9a3bedad578599d96ea858f349e (diff)
downloadspark-5ab9fcfb01a0ad2f6c103f67c1a785d3b49e33f0.tar.gz
spark-5ab9fcfb01a0ad2f6c103f67c1a785d3b49e33f0.tar.bz2
spark-5ab9fcfb01a0ad2f6c103f67c1a785d3b49e33f0.zip
[SPARK-8532] [SQL] In Python's DataFrameWriter, save/saveAsTable/json/parquet/jdbc always override mode
https://issues.apache.org/jira/browse/SPARK-8532 This PR has two changes. First, it fixes the bug that save actions (i.e. `save/saveAsTable/json/parquet/jdbc`) always override mode. Second, it adds input argument `partitionBy` to `save/saveAsTable/parquet`. Author: Yin Huai <yhuai@databricks.com> Closes #6937 from yhuai/SPARK-8532 and squashes the following commits: f972d5d [Yin Huai] davies's comment. d37abd2 [Yin Huai] style. d21290a [Yin Huai] Python doc. 889eb25 [Yin Huai] Minor refactoring and add partitionBy to save, saveAsTable, and parquet. 7fbc24b [Yin Huai] Use None instead of "error" as the default value of mode since JVM-side already uses "error" as the default value. d696dff [Yin Huai] Python style. 88eb6c4 [Yin Huai] If mode is "error", do not call mode method. c40c461 [Yin Huai] Regression test.
Diffstat (limited to 'python/pyspark/sql/readwriter.py')
-rw-r--r--python/pyspark/sql/readwriter.py30
1 files changed, 19 insertions, 11 deletions
diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index f036644acc..1b7bc0f9a1 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -218,7 +218,10 @@ class DataFrameWriter(object):
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
- self._jwrite = self._jwrite.mode(saveMode)
+ # At the JVM side, the default value of mode is already set to "error".
+ # So, if the given saveMode is None, we will not call JVM-side's mode method.
+ if saveMode is not None:
+ self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
@@ -253,11 +256,12 @@ class DataFrameWriter(object):
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
- self._jwrite = self._jwrite.partitionBy(_to_seq(self._sqlContext._sc, cols))
+ if len(cols) > 0:
+ self._jwrite = self._jwrite.partitionBy(_to_seq(self._sqlContext._sc, cols))
return self
@since(1.4)
- def save(self, path=None, format=None, mode="error", **options):
+ def save(self, path=None, format=None, mode=None, partitionBy=(), **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
@@ -272,11 +276,12 @@ class DataFrameWriter(object):
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
+ :param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
- self.mode(mode).options(**options)
+ self.partitionBy(partitionBy).mode(mode).options(**options)
if format is not None:
self.format(format)
if path is None:
@@ -296,7 +301,7 @@ class DataFrameWriter(object):
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
- def saveAsTable(self, name, format=None, mode="error", **options):
+ def saveAsTable(self, name, format=None, mode=None, partitionBy=(), **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
@@ -312,15 +317,16 @@ class DataFrameWriter(object):
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
+ :param partitionBy: names of partitioning columns
:param options: all other string options
"""
- self.mode(mode).options(**options)
+ self.partitionBy(partitionBy).mode(mode).options(**options)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
- def json(self, path, mode="error"):
+ def json(self, path, mode=None):
"""Saves the content of the :class:`DataFrame` in JSON format at the specified path.
:param path: the path in any Hadoop supported file system
@@ -333,10 +339,10 @@ class DataFrameWriter(object):
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
- self._jwrite.mode(mode).json(path)
+ self.mode(mode)._jwrite.json(path)
@since(1.4)
- def parquet(self, path, mode="error"):
+ def parquet(self, path, mode=None, partitionBy=()):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
@@ -346,13 +352,15 @@ class DataFrameWriter(object):
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
+ :param partitionBy: names of partitioning columns
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
- self._jwrite.mode(mode).parquet(path)
+ self.partitionBy(partitionBy).mode(mode)
+ self._jwrite.parquet(path)
@since(1.4)
- def jdbc(self, url, table, mode="error", properties={}):
+ def jdbc(self, url, table, mode=None, properties={}):
"""Saves the content of the :class:`DataFrame` to a external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;\