aboutsummaryrefslogtreecommitdiff
path: root/unsafe
diff options
context:
space:
mode:
authorDavies Liu <davies@databricks.com>2015-08-06 09:10:57 -0700
committerDavies Liu <davies.liu@gmail.com>2015-08-06 09:10:57 -0700
commit5b965d64ee1687145ba793da749659c8f67384e8 (patch)
treea163c8545572b3270fac7159e0d2b6dba5fa4795 /unsafe
parentaead18ffca36830e854fba32a1cac11a0b2e31d5 (diff)
downloadspark-5b965d64ee1687145ba793da749659c8f67384e8.tar.gz
spark-5b965d64ee1687145ba793da749659c8f67384e8.tar.bz2
spark-5b965d64ee1687145ba793da749659c8f67384e8.zip
[SPARK-9644] [SQL] Support update DecimalType with precision > 18 in UnsafeRow
In order to support update a varlength (actually fixed length) object, the space should be preserved even it's null. And, we can't call setNullAt(i) for it anymore, we because setNullAt(i) will remove the offset of the preserved space, should call setDecimal(i, null, precision) instead. After this, we can do hash based aggregation on DecimalType with precision > 18. In a tests, this could decrease the end-to-end run time of aggregation query from 37 seconds (sort based) to 24 seconds (hash based). cc rxin Author: Davies Liu <davies@databricks.com> Closes #7978 from davies/update_decimal and squashes the following commits: bed8100 [Davies Liu] isSettable -> isMutable 923c9eb [Davies Liu] address comments and fix bug 385891d [Davies Liu] Merge branch 'master' of github.com:apache/spark into update_decimal 36a1872 [Davies Liu] fix tests cd6c524 [Davies Liu] support set decimal with precision > 18
Diffstat (limited to 'unsafe')
-rw-r--r--unsafe/src/main/java/org/apache/spark/unsafe/PlatformDependent.java26
1 files changed, 26 insertions, 0 deletions
diff --git a/unsafe/src/main/java/org/apache/spark/unsafe/PlatformDependent.java b/unsafe/src/main/java/org/apache/spark/unsafe/PlatformDependent.java
index 192c6714b2..b2de2a2590 100644
--- a/unsafe/src/main/java/org/apache/spark/unsafe/PlatformDependent.java
+++ b/unsafe/src/main/java/org/apache/spark/unsafe/PlatformDependent.java
@@ -18,6 +18,7 @@
package org.apache.spark.unsafe;
import java.lang.reflect.Field;
+import java.math.BigInteger;
import sun.misc.Unsafe;
@@ -87,6 +88,14 @@ public final class PlatformDependent {
_UNSAFE.putDouble(object, offset, value);
}
+ public static Object getObjectVolatile(Object object, long offset) {
+ return _UNSAFE.getObjectVolatile(object, offset);
+ }
+
+ public static void putObjectVolatile(Object object, long offset, Object value) {
+ _UNSAFE.putObjectVolatile(object, offset, value);
+ }
+
public static long allocateMemory(long size) {
return _UNSAFE.allocateMemory(size);
}
@@ -107,6 +116,10 @@ public final class PlatformDependent {
public static final int DOUBLE_ARRAY_OFFSET;
+ // Support for resetting final fields while deserializing
+ public static final long BIG_INTEGER_SIGNUM_OFFSET;
+ public static final long BIG_INTEGER_MAG_OFFSET;
+
/**
* Limits the number of bytes to copy per {@link Unsafe#copyMemory(long, long, long)} to
* allow safepoint polling during a large copy.
@@ -129,11 +142,24 @@ public final class PlatformDependent {
INT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(int[].class);
LONG_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(long[].class);
DOUBLE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(double[].class);
+
+ long signumOffset = 0;
+ long magOffset = 0;
+ try {
+ signumOffset = _UNSAFE.objectFieldOffset(BigInteger.class.getDeclaredField("signum"));
+ magOffset = _UNSAFE.objectFieldOffset(BigInteger.class.getDeclaredField("mag"));
+ } catch (Exception ex) {
+ // should not happen
+ }
+ BIG_INTEGER_SIGNUM_OFFSET = signumOffset;
+ BIG_INTEGER_MAG_OFFSET = magOffset;
} else {
BYTE_ARRAY_OFFSET = 0;
INT_ARRAY_OFFSET = 0;
LONG_ARRAY_OFFSET = 0;
DOUBLE_ARRAY_OFFSET = 0;
+ BIG_INTEGER_SIGNUM_OFFSET = 0;
+ BIG_INTEGER_MAG_OFFSET = 0;
}
}