aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorWenchen Fan <cloud0fan@163.com>2015-10-08 12:42:10 -0700
committerYin Huai <yhuai@databricks.com>2015-10-08 12:42:10 -0700
commitaf2a5544875b23b3b62fb6d4f3bf432828720008 (patch)
treed8282c85ee2989d8e8d6e5f4216934d126eca20c /sql/core
parent82d275f27c3e9211ce69c5c8685a0fe90c0be26f (diff)
downloadspark-af2a5544875b23b3b62fb6d4f3bf432828720008.tar.gz
spark-af2a5544875b23b3b62fb6d4f3bf432828720008.tar.bz2
spark-af2a5544875b23b3b62fb6d4f3bf432828720008.zip
[SPARK-10337] [SQL] fix hive views on non-hive-compatible tables.
add a new config to deal with this special case. Author: Wenchen Fan <cloud0fan@163.com> Closes #8990 from cloud-fan/view-master.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala15
1 files changed, 13 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
index e7bbc7d5db..8f0f8910b3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
@@ -319,6 +319,15 @@ private[spark] object SQLConf {
doc = "When true, some predicates will be pushed down into the Hive metastore so that " +
"unmatching partitions can be eliminated earlier.")
+ val CANONICALIZE_VIEW = booleanConf("spark.sql.canonicalizeView",
+ defaultValue = Some(false),
+ doc = "When true, CREATE VIEW will be handled by Spark SQL instead of Hive native commands. " +
+ "Note that this function is experimental and should ony be used when you are using " +
+ "non-hive-compatible tables written by Spark SQL. The SQL string used to create " +
+ "view should be fully qualified, i.e. use `tbl1`.`col1` instead of `*` whenever " +
+ "possible, or you may get wrong result.",
+ isPublic = false)
+
val COLUMN_NAME_OF_CORRUPT_RECORD = stringConf("spark.sql.columnNameOfCorruptRecord",
defaultValue = Some("_corrupt_record"),
doc = "<TODO>")
@@ -362,7 +371,7 @@ private[spark] object SQLConf {
val PARTITION_DISCOVERY_ENABLED = booleanConf("spark.sql.sources.partitionDiscovery.enabled",
defaultValue = Some(true),
- doc = "When true, automtically discover data partitions.")
+ doc = "When true, automatically discover data partitions.")
val PARTITION_COLUMN_TYPE_INFERENCE =
booleanConf("spark.sql.sources.partitionColumnTypeInference.enabled",
@@ -372,7 +381,7 @@ private[spark] object SQLConf {
val PARTITION_MAX_FILES =
intConf("spark.sql.sources.maxConcurrentWrites",
defaultValue = Some(5),
- doc = "The maximum number of concurent files to open before falling back on sorting when " +
+ doc = "The maximum number of concurrent files to open before falling back on sorting when " +
"writing out files using dynamic partitioning.")
// The output committer class used by HadoopFsRelation. The specified class needs to be a
@@ -471,6 +480,8 @@ private[sql] class SQLConf extends Serializable with CatalystConf {
private[spark] def metastorePartitionPruning: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING)
+ private[spark] def canonicalizeView: Boolean = getConf(CANONICALIZE_VIEW)
+
private[spark] def sortMergeJoinEnabled: Boolean = getConf(SORTMERGE_JOIN)
private[spark] def codegenEnabled: Boolean = getConf(CODEGEN_ENABLED, getConf(TUNGSTEN_ENABLED))