aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst/src/test/java
diff options
context:
space:
mode:
authorAdam Roberts <aroberts@uk.ibm.com>2016-09-15 09:37:12 +0100
committerSean Owen <sowen@cloudera.com>2016-09-15 09:37:12 +0100
commitf893e262500e2f183de88e984300dd5b085e1f71 (patch)
treea192608e814f0da441dc05f7f5d87eb727ecff98 /sql/catalyst/src/test/java
parentd15b4f90e64f7ec5cf14c7c57d2cb4234c3ce677 (diff)
downloadspark-f893e262500e2f183de88e984300dd5b085e1f71.tar.gz
spark-f893e262500e2f183de88e984300dd5b085e1f71.tar.bz2
spark-f893e262500e2f183de88e984300dd5b085e1f71.zip
[SPARK-17524][TESTS] Use specified spark.buffer.pageSize
## What changes were proposed in this pull request? This PR has the appendRowUntilExceedingPageSize test in RowBasedKeyValueBatchSuite use whatever spark.buffer.pageSize value a user has specified to prevent a test failure for anyone testing Apache Spark on a box with a reduced page size. The test is currently hardcoded to use the default page size which is 64 MB so this minor PR is a test improvement ## How was this patch tested? Existing unit tests with 1 MB page size and with 64 MB (the default) page size Author: Adam Roberts <aroberts@uk.ibm.com> Closes #15079 from a-roberts/patch-5.
Diffstat (limited to 'sql/catalyst/src/test/java')
-rw-r--r--sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatchSuite.java6
1 files changed, 4 insertions, 2 deletions
diff --git a/sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatchSuite.java b/sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatchSuite.java
index 0dd129cea7..fb3dbe8ed1 100644
--- a/sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatchSuite.java
+++ b/sql/catalyst/src/test/java/org/apache/spark/sql/catalyst/expressions/RowBasedKeyValueBatchSuite.java
@@ -338,15 +338,17 @@ public class RowBasedKeyValueBatchSuite {
@Test
public void appendRowUntilExceedingPageSize() throws Exception {
+ // Use default size or spark.buffer.pageSize if specified
+ int pageSizeToUse = (int) memoryManager.pageSizeBytes();
RowBasedKeyValueBatch batch = RowBasedKeyValueBatch.allocate(keySchema,
- valueSchema, taskMemoryManager, 64 * 1024 * 1024); //enough capacity
+ valueSchema, taskMemoryManager, pageSizeToUse); //enough capacity
try {
UnsafeRow key = makeKeyRow(1, "A");
UnsafeRow value = makeValueRow(1, 1);
int recordLength = 8 + key.getSizeInBytes() + value.getSizeInBytes() + 8;
int totalSize = 4;
int numRows = 0;
- while (totalSize + recordLength < 64 * 1024 * 1024) { // default page size
+ while (totalSize + recordLength < pageSizeToUse) {
appendRow(batch, key, value);
totalSize += recordLength;
numRows++;