aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Edmonds <edmonds@debian.org>2014-09-18 20:36:42 -0400
committerRobert Edmonds <edmonds@debian.org>2014-09-18 20:36:42 -0400
commitcc0a047384414991009d95ada9e89b9a92aac531 (patch)
treeb9ff77603a5e5d289b7d707315e58f1a0f24761e
parent8d5f1e1efb6c1a99aa4997d519767d5865e06e03 (diff)
downloadprotobuf-cc0a047384414991009d95ada9e89b9a92aac531.tar.gz
protobuf-cc0a047384414991009d95ada9e89b9a92aac531.tar.bz2
protobuf-cc0a047384414991009d95ada9e89b9a92aac531.zip
generic atomicops: promote Acquire_Store() and Release_Load() to use SEQ_CST fence
__atomic_store_n() cannot take a memory model argument of __ATOMIC_ACQUIRE, and __atomic_load_n() cannot take a memory model argument of __ATOMIC_RELEASE, per the GCC documentation: https://gcc.gnu.org/onlinedocs/gcc-4.9.1/gcc/_005f_005fatomic-Builtins.html On Clang this generates a -Watomic-memory-ordering warning. Promote the fences in Acquire_Store() and Release_Load() to the stronger __ATOMIC_SEQ_CST memory model, which ought to be safe. Note that there are no actual uses of Acquire_Store() or Release_Load() in protobuf, though. This follows the TSAN atomicops implementation, which also uses SEQ_CST fences for these functions. (Fixes #25.)
-rw-r--r--src/google/protobuf/stubs/atomicops_internals_generic_gcc.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
index e30bb444..dd7abf6f 100644
--- a/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
+++ b/src/google/protobuf/stubs/atomicops_internals_generic_gcc.h
@@ -83,7 +83,7 @@ inline void MemoryBarrier() {
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __atomic_store_n(ptr, value, __ATOMIC_ACQUIRE);
+ __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -99,7 +99,7 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELEASE);
+ return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
}
#ifdef __LP64__