aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorJacek Laskowski <jacek@japila.pl>2016-08-23 12:59:25 +0200
committerHerman van Hovell <hvanhovell@databricks.com>2016-08-23 12:59:25 +0200
commit9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3 (patch)
treeb57f2b071d3e199618536a850a3c37fdc2b49ca3 /sql
parentcc33460a51d2890fe8f50f5b6b87003d6d210f04 (diff)
downloadspark-9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3.tar.gz
spark-9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3.tar.bz2
spark-9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3.zip
[SPARK-17199] Use CatalystConf.resolver for case-sensitivity comparison
## What changes were proposed in this pull request? Use `CatalystConf.resolver` consistently for case-sensitivity comparison (removed dups). ## How was this patch tested? Local build. Waiting for Jenkins to ensure clean build and test. Author: Jacek Laskowski <jacek@japila.pl> Closes #14771 from jaceklaskowski/17199-catalystconf-resolver.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala6
4 files changed, 5 insertions, 27 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 41e0e6d65e..e559f235c5 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -64,13 +64,7 @@ class Analyzer(
this(catalog, conf, conf.optimizerMaxIterations)
}
- def resolver: Resolver = {
- if (conf.caseSensitiveAnalysis) {
- caseSensitiveResolution
- } else {
- caseInsensitiveResolution
- }
- }
+ def resolver: Resolver = conf.resolver
protected val fixedPoint = FixedPoint(maxIterations)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index 5ad6ae0956..b783d69974 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -394,13 +394,7 @@ case class DataSource(
sparkSession, globbedPaths, options, partitionSchema, !checkPathExist)
val dataSchema = userSpecifiedSchema.map { schema =>
- val equality =
- if (sparkSession.sessionState.conf.caseSensitiveAnalysis) {
- org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
- } else {
- org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
- }
-
+ val equality = sparkSession.sessionState.conf.resolver
StructType(schema.filterNot(f => partitionColumns.exists(equality(_, f.name))))
}.orElse {
format.inferSchema(
@@ -430,7 +424,7 @@ case class DataSource(
relation
}
- /** Writes the give [[DataFrame]] out to this [[DataSource]]. */
+ /** Writes the given [[DataFrame]] out to this [[DataSource]]. */
def write(
mode: SaveMode,
data: DataFrame): BaseRelation = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
index 5eba7df060..a6621054fc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
@@ -45,13 +45,7 @@ import org.apache.spark.unsafe.types.UTF8String
*/
case class DataSourceAnalysis(conf: CatalystConf) extends Rule[LogicalPlan] {
- def resolver: Resolver = {
- if (conf.caseSensitiveAnalysis) {
- caseSensitiveResolution
- } else {
- caseInsensitiveResolution
- }
- }
+ def resolver: Resolver = conf.resolver
// Visible for testing.
def convertStaticPartitions(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
index 117d6672ee..0f7d958136 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
@@ -102,11 +102,7 @@ class FileStreamSinkWriter(
// Get the actual partition columns as attributes after matching them by name with
// the given columns names.
private val partitionColumns = partitionColumnNames.map { col =>
- val nameEquality = if (data.sparkSession.sessionState.conf.caseSensitiveAnalysis) {
- org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
- } else {
- org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
- }
+ val nameEquality = data.sparkSession.sessionState.conf.resolver
data.logicalPlan.output.find(f => nameEquality(f.name, col)).getOrElse {
throw new RuntimeException(s"Partition column $col not found in schema $dataSchema")
}