aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/scala/org
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2016-07-20 16:49:46 -0700
committerYin Huai <yhuai@databricks.com>2016-07-20 16:49:46 -0700
commite651900bd562cc29a3eb13e92a5147979e347f61 (patch)
treec2bf0c7ddcc0d11b5310046c798e0d72baf49966 /sql/core/src/test/scala/org
parente3cd5b3050711af69fc1dfc518b11bf1a86b6a4c (diff)
downloadspark-e651900bd562cc29a3eb13e92a5147979e347f61.tar.gz
spark-e651900bd562cc29a3eb13e92a5147979e347f61.tar.bz2
spark-e651900bd562cc29a3eb13e92a5147979e347f61.zip
[SPARK-16344][SQL] Decoding Parquet array of struct with a single field named "element"
## What changes were proposed in this pull request? Due to backward-compatibility reasons, the following Parquet schema is ambiguous: ``` optional group f (LIST) { repeated group list { optional group element { optional int32 element; } } } ``` According to the parquet-format spec, when interpreted as a standard 3-level layout, this type is equivalent to the following SQL type: ``` ARRAY<STRUCT<element: INT>> ``` However, when interpreted as a legacy 2-level layout, it's equivalent to ``` ARRAY<STRUCT<element: STRUCT<element: INT>>> ``` Historically, to disambiguate these cases, we employed two methods: - `ParquetSchemaConverter.isElementType()` Used to disambiguate the above cases while converting Parquet types to Spark types. - `ParquetRowConverter.isElementType()` Used to disambiguate the above cases while instantiating row converters that convert Parquet records to Spark rows. Unfortunately, these two methods make different decision about the above problematic Parquet type, and caused SPARK-16344. `ParquetRowConverter.isElementType()` is necessary for Spark 1.4 and earlier versions because Parquet requested schemata are directly converted from Spark schemata in these versions. The converted Parquet schemata may be incompatible with actual schemata of the underlying physical files when the files are written by a system/library that uses a schema conversion scheme that is different from Spark when writing Parquet LIST and MAP fields. In Spark 1.5, Parquet requested schemata are always properly tailored from schemata of physical files to be read. Thus `ParquetRowConverter.isElementType()` is no longer necessary. This PR replaces this method with a simply yet accurate scheme: whenever an ambiguous Parquet type is hit, convert the type in question back to a Spark type using `ParquetSchemaConverter` and check whether it matches the corresponding Spark type. ## How was this patch tested? New test cases added in `ParquetHiveCompatibilitySuite` and `ParquetQuerySuite`. Author: Cheng Lian <lian@databricks.com> Closes #14014 from liancheng/spark-16344-for-master-and-2.0.
Diffstat (limited to 'sql/core/src/test/scala/org')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala16
1 files changed, 15 insertions, 1 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
index 7c394e0b0c..02b94452a1 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala
@@ -26,7 +26,7 @@ import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions.SpecificMutableRow
import org.apache.spark.sql.execution.BatchedDataSourceScanExec
-import org.apache.spark.sql.execution.datasources.parquet.TestingUDT.{NestedStruct, NestedStructUDT}
+import org.apache.spark.sql.execution.datasources.parquet.TestingUDT.{NestedStruct, NestedStructUDT, SingleElement}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
@@ -668,9 +668,23 @@ class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext
}
}
}
+
+ test("SPARK-16344: array of struct with a single field named 'element'") {
+ withTempPath { dir =>
+ val path = dir.getCanonicalPath
+ Seq(Tuple1(Array(SingleElement(42)))).toDF("f").write.parquet(path)
+
+ checkAnswer(
+ sqlContext.read.parquet(path),
+ Row(Array(Row(42)))
+ )
+ }
+ }
}
object TestingUDT {
+ case class SingleElement(element: Long)
+
@SQLUserDefinedType(udt = classOf[NestedStructUDT])
case class NestedStruct(a: Integer, b: Long, c: Double)