aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authormadhukar <phatak.dev@gmail.com>2015-05-11 17:04:11 -0700
committerReynold Xin <rxin@databricks.com>2015-05-11 17:04:11 -0700
commit57255dcd794222f4db5df1e549ebc7b896cebfdc (patch)
treeedede56e7c8f7a379f07e64a63ca64d4801eca0c /sql
parent4f4dbb030c208caba18f314a1ef1751696627d26 (diff)
downloadspark-57255dcd794222f4db5df1e549ebc7b896cebfdc.tar.gz
spark-57255dcd794222f4db5df1e549ebc7b896cebfdc.tar.bz2
spark-57255dcd794222f4db5df1e549ebc7b896cebfdc.zip
[SPARK-7084] improve saveAsTable documentation
Author: madhukar <phatak.dev@gmail.com> Closes #5654 from phatak-dev/master and squashes the following commits: 386f407 [madhukar] #5654 updated for all the methods 2c997c5 [madhukar] Merge branch 'master' of https://github.com/apache/spark 00bc819 [madhukar] Merge branch 'master' of https://github.com/apache/spark 2a802c6 [madhukar] #5654 updated the doc according to comments 866e8df [madhukar] [SPARK-7084] improve saveAsTable documentation
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala18
1 files changed, 18 insertions, 0 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index c92ca607fb..729cfc1da2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -1192,6 +1192,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental
@@ -1208,6 +1211,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental
@@ -1232,6 +1238,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental
@@ -1248,6 +1257,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental
@@ -1264,6 +1276,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental
@@ -1285,6 +1300,9 @@ class DataFrame private[sql](
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
+ *
+ * Also note that while this function can persist the table metadata into Hive's metastore,
+ * the table will NOT be accessible from Hive.
* @group output
*/
@Experimental