aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorEric Liang <ekl@databricks.com>2016-11-09 15:00:46 +0800
committerWenchen Fan <wenchen@databricks.com>2016-11-09 15:00:46 +0800
commit4afa39e223c70e91b6ee19e9ea76fa9115203d74 (patch)
tree1dfd0300fe96940878c495d209594ac8f8090137 /sql/hive
parent55964c15a7b639f920dfe6c104ae4fdcd673705c (diff)
downloadspark-4afa39e223c70e91b6ee19e9ea76fa9115203d74.tar.gz
spark-4afa39e223c70e91b6ee19e9ea76fa9115203d74.tar.bz2
spark-4afa39e223c70e91b6ee19e9ea76fa9115203d74.zip
[SPARK-18333][SQL] Revert hacks in parquet and orc reader to support case insensitive resolution
## What changes were proposed in this pull request? These are no longer needed after https://issues.apache.org/jira/browse/SPARK-17183 cc cloud-fan ## How was this patch tested? Existing parquet and orc tests. Author: Eric Liang <ekl@databricks.com> Closes #15799 from ericl/sc-4929.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala12
1 files changed, 1 insertions, 11 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
index 7c519a0743..42c92ed5ca 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
@@ -305,17 +305,7 @@ private[orc] object OrcRelation extends HiveInspectors {
def setRequiredColumns(
conf: Configuration, physicalSchema: StructType, requestedSchema: StructType): Unit = {
- val caseInsensitiveFieldMap: Map[String, Int] = physicalSchema.fieldNames
- .zipWithIndex
- .map(f => (f._1.toLowerCase, f._2))
- .toMap
- val ids = requestedSchema.map { a =>
- val exactMatch: Option[Int] = physicalSchema.getFieldIndex(a.name)
- val res = exactMatch.getOrElse(
- caseInsensitiveFieldMap.getOrElse(a.name,
- throw new IllegalArgumentException(s"""Field "$a.name" does not exist.""")))
- res: Integer
- }
+ val ids = requestedSchema.map(a => physicalSchema.fieldIndex(a.name): Integer)
val (sortedIDs, sortedNames) = ids.zip(requestedSchema.fieldNames).sorted.unzip
HiveShim.appendReadColumns(conf, sortedIDs, sortedNames)
}