aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2016-03-29 20:56:01 +0800
committerCheng Lian <lian@databricks.com>2016-03-29 20:56:01 +0800
commita632bb56f8867df39a78d7f01fb870f548b09815 (patch)
treea1a1c3a74ad8b07c33e9d593b8c54cbbbfcd026f /sql/core
parent425bcf6d6844732fe402af05472ad87b4e032cb6 (diff)
downloadspark-a632bb56f8867df39a78d7f01fb870f548b09815.tar.gz
spark-a632bb56f8867df39a78d7f01fb870f548b09815.tar.bz2
spark-a632bb56f8867df39a78d7f01fb870f548b09815.zip
[SPARK-14208][SQL] Renames spark.sql.parquet.fileScan
## What changes were proposed in this pull request? Renames SQL option `spark.sql.parquet.fileScan` since now all `HadoopFsRelation` based data sources are being migrated to `FileScanRDD` code path. ## How was this patch tested? None. Author: Cheng Lian <lian@databricks.com> Closes #12003 from liancheng/spark-14208-option-renaming.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala8
2 files changed, 5 insertions, 5 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
index 76a724e51e..20fda95154 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala
@@ -60,7 +60,7 @@ private[sql] object FileSourceStrategy extends Strategy with Logging {
files.fileFormat.isInstanceOf[parquet.DefaultSource] ||
files.fileFormat.toString == "ORC" ||
files.fileFormat.isInstanceOf[json.DefaultSource]) &&
- files.sqlContext.conf.parquetFileScan =>
+ files.sqlContext.conf.useFileScan =>
// Filters on this relation fall into four categories based on where we can use them to avoid
// reading unneeded data:
// - partition keys only - used to prune directories to read
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 77af0e000b..ca6ba4c643 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -288,9 +288,9 @@ object SQLConf {
defaultValue = Some(true),
doc = "Whether the query analyzer should be case sensitive or not.")
- val PARQUET_FILE_SCAN = booleanConf("spark.sql.parquet.fileScan",
+ val USE_FILE_SCAN = booleanConf("spark.sql.sources.fileScan",
defaultValue = Some(true),
- doc = "Use the new FileScanRDD path for reading parquet data.",
+ doc = "Use the new FileScanRDD path for reading HDSF based data sources.",
isPublic = false)
val PARQUET_SCHEMA_MERGING_ENABLED = booleanConf("spark.sql.parquet.mergeSchema",
@@ -583,9 +583,9 @@ class SQLConf extends Serializable with CatalystConf with ParserConf with Loggin
def useCompression: Boolean = getConf(COMPRESS_CACHED)
- def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
+ def useFileScan: Boolean = getConf(USE_FILE_SCAN)
- def parquetFileScan: Boolean = getConf(PARQUET_FILE_SCAN)
+ def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA)