aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2016-10-10 19:14:01 -0700
committerShixiong Zhu <shixiong@databricks.com>2016-10-10 19:14:01 -0700
commitd5ec4a3e014494a3d991a6350caffbc3b17be0fd (patch)
treeb28f51d02eddff387ed1a343dcb4a3d62ad3f11a /sql
parent03c40202f36ea9fc93071b79fed21ed3f2190ba1 (diff)
downloadspark-d5ec4a3e014494a3d991a6350caffbc3b17be0fd.tar.gz
spark-d5ec4a3e014494a3d991a6350caffbc3b17be0fd.tar.bz2
spark-d5ec4a3e014494a3d991a6350caffbc3b17be0fd.zip
[SPARK-17738][TEST] Fix flaky test in ColumnTypeSuite
## What changes were proposed in this pull request? The default buffer size is not big enough for randomly generated MapType. ## How was this patch tested? Ran the tests in 100 times, it never fail (it fail 8 times before the patch). Author: Davies Liu <davies@databricks.com> Closes #15395 from davies/flaky_map.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala7
1 files changed, 4 insertions, 3 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
index 8bf9f521e2..5f2a3aaff6 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnTypeSuite.scala
@@ -101,14 +101,15 @@ class ColumnTypeSuite extends SparkFunSuite with Logging {
def testColumnType[JvmType](columnType: ColumnType[JvmType]): Unit = {
- val buffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE).order(ByteOrder.nativeOrder())
val proj = UnsafeProjection.create(Array[DataType](columnType.dataType))
val converter = CatalystTypeConverters.createToScalaConverter(columnType.dataType)
val seq = (0 until 4).map(_ => proj(makeRandomRow(columnType)).copy())
+ val totalSize = seq.map(_.getSizeInBytes).sum
+ val bufferSize = Math.max(DEFAULT_BUFFER_SIZE, totalSize)
test(s"$columnType append/extract") {
- buffer.rewind()
- seq.foreach(columnType.append(_, 0, buffer))
+ val buffer = ByteBuffer.allocate(bufferSize).order(ByteOrder.nativeOrder())
+ seq.foreach(r => columnType.append(columnType.getField(r, 0), buffer))
buffer.rewind()
seq.foreach { row =>