aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala19
1 files changed, 9 insertions, 10 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index 2f3826f72b..63dc1fd71e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -127,17 +127,16 @@ case class DataSource(
}
private def inferFileFormatSchema(format: FileFormat): StructType = {
- val caseInsensitiveOptions = new CaseInsensitiveMap(options)
- val allPaths = caseInsensitiveOptions.get("path")
- val globbedPaths = allPaths.toSeq.flatMap { path =>
- val hdfsPath = new Path(path)
- val fs = hdfsPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
- val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
- SparkHadoopUtil.get.globPathIfNecessary(qualified)
- }.toArray
-
- val fileCatalog: FileCatalog = new HDFSFileCatalog(sparkSession, options, globbedPaths, None)
userSpecifiedSchema.orElse {
+ val caseInsensitiveOptions = new CaseInsensitiveMap(options)
+ val allPaths = caseInsensitiveOptions.get("path")
+ val globbedPaths = allPaths.toSeq.flatMap { path =>
+ val hdfsPath = new Path(path)
+ val fs = hdfsPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
+ val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
+ SparkHadoopUtil.get.globPathIfNecessary(qualified)
+ }.toArray
+ val fileCatalog: FileCatalog = new HDFSFileCatalog(sparkSession, options, globbedPaths, None)
format.inferSchema(
sparkSession,
caseInsensitiveOptions,