aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib/scudo/standalone/atomic_helpers.h
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/scudo/standalone/atomic_helpers.h')
-rw-r--r--compiler-rt/lib/scudo/standalone/atomic_helpers.h139
1 files changed, 139 insertions, 0 deletions
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
new file mode 100644
index 000000000000..47037d764e25
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -0,0 +1,139 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+ memory_order_relaxed = 0,
+ memory_order_consume = 1,
+ memory_order_acquire = 2,
+ memory_order_release = 3,
+ memory_order_acq_rel = 4,
+ memory_order_seq_cst = 5
+};
+COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
+COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
+COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
+COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
+COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
+COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+
+struct atomic_u8 {
+ typedef u8 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+ typedef u16 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+ typedef s32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+ typedef u32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+ ALIGNED(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+ typedef uptr Type;
+ volatile Type ValDoNotUse;
+};
+
+template <typename T>
+INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type V;
+ __atomic_load(&A->ValDoNotUse, &V, MO);
+ return V;
+}
+
+template <typename T>
+INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type R;
+ __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+ return R;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+ __ATOMIC_RELAXED);
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
+ __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+ return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+ atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+ typename T::Type Cmp,
+ typename T::Type Xchg) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+ return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_