aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/libcxx/include/atomic
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/libcxx/include/atomic')
-rw-r--r--contrib/llvm-project/libcxx/include/atomic2835
1 files changed, 2835 insertions, 0 deletions
diff --git a/contrib/llvm-project/libcxx/include/atomic b/contrib/llvm-project/libcxx/include/atomic
new file mode 100644
index 000000000000..07ea56f65064
--- /dev/null
+++ b/contrib/llvm-project/libcxx/include/atomic
@@ -0,0 +1,2835 @@
+// -*- C++ -*-
+//===--------------------------- atomic -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP_ATOMIC
+#define _LIBCPP_ATOMIC
+
+/*
+ atomic synopsis
+
+namespace std
+{
+
+// feature test macro [version.syn]
+
+#define __cpp_lib_atomic_is_always_lock_free
+#define __cpp_lib_atomic_flag_test
+#define __cpp_lib_atomic_lock_free_type_aliases
+#define __cpp_lib_atomic_wait
+
+ // order and consistency
+
+ enum memory_order: unspecified // enum class in C++20
+ {
+ relaxed,
+ consume, // load-consume
+ acquire, // load-acquire
+ release, // store-release
+ acq_rel, // store-release load-acquire
+ seq_cst // store-release load-acquire
+ };
+
+ inline constexpr auto memory_order_relaxed = memory_order::relaxed;
+ inline constexpr auto memory_order_consume = memory_order::consume;
+ inline constexpr auto memory_order_acquire = memory_order::acquire;
+ inline constexpr auto memory_order_release = memory_order::release;
+ inline constexpr auto memory_order_acq_rel = memory_order::acq_rel;
+ inline constexpr auto memory_order_seq_cst = memory_order::seq_cst;
+
+template <class T> T kill_dependency(T y) noexcept;
+
+// lock-free property
+
+#define ATOMIC_BOOL_LOCK_FREE unspecified
+#define ATOMIC_CHAR_LOCK_FREE unspecified
+#define ATOMIC_CHAR8_T_LOCK_FREE unspecified // C++20
+#define ATOMIC_CHAR16_T_LOCK_FREE unspecified
+#define ATOMIC_CHAR32_T_LOCK_FREE unspecified
+#define ATOMIC_WCHAR_T_LOCK_FREE unspecified
+#define ATOMIC_SHORT_LOCK_FREE unspecified
+#define ATOMIC_INT_LOCK_FREE unspecified
+#define ATOMIC_LONG_LOCK_FREE unspecified
+#define ATOMIC_LLONG_LOCK_FREE unspecified
+#define ATOMIC_POINTER_LOCK_FREE unspecified
+
+template <class T>
+struct atomic
+{
+ using value_type = T;
+
+ static constexpr bool is_always_lock_free;
+ bool is_lock_free() const volatile noexcept;
+ bool is_lock_free() const noexcept;
+
+ atomic() noexcept = default; // until C++20
+ constexpr atomic() noexcept(is_nothrow_default_constructible_v<T>); // since C++20
+ constexpr atomic(T desr) noexcept;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ T load(memory_order m = memory_order_seq_cst) const volatile noexcept;
+ T load(memory_order m = memory_order_seq_cst) const noexcept;
+ operator T() const volatile noexcept;
+ operator T() const noexcept;
+ void store(T desr, memory_order m = memory_order_seq_cst) volatile noexcept;
+ void store(T desr, memory_order m = memory_order_seq_cst) noexcept;
+ T operator=(T) volatile noexcept;
+ T operator=(T) noexcept;
+
+ T exchange(T desr, memory_order m = memory_order_seq_cst) volatile noexcept;
+ T exchange(T desr, memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_weak(T& expc, T desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) noexcept;
+ bool compare_exchange_strong(T& expc, T desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_strong(T& expc, T desr,
+ memory_order s, memory_order f) noexcept;
+ bool compare_exchange_weak(T& expc, T desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_weak(T& expc, T desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_strong(T& expc, T desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_strong(T& expc, T desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+
+ void wait(T, memory_order = memory_order::seq_cst) const volatile noexcept;
+ void wait(T, memory_order = memory_order::seq_cst) const noexcept;
+ void notify_one() volatile noexcept;
+ void notify_one() noexcept;
+ void notify_all() volatile noexcept;
+ void notify_all() noexcept;
+};
+
+template <>
+struct atomic<integral>
+{
+ using value_type = integral;
+ using difference_type = value_type;
+
+ static constexpr bool is_always_lock_free;
+ bool is_lock_free() const volatile noexcept;
+ bool is_lock_free() const noexcept;
+
+ atomic() noexcept = default;
+ constexpr atomic(integral desr) noexcept;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ integral load(memory_order m = memory_order_seq_cst) const volatile noexcept;
+ integral load(memory_order m = memory_order_seq_cst) const noexcept;
+ operator integral() const volatile noexcept;
+ operator integral() const noexcept;
+ void store(integral desr, memory_order m = memory_order_seq_cst) volatile noexcept;
+ void store(integral desr, memory_order m = memory_order_seq_cst) noexcept;
+ integral operator=(integral desr) volatile noexcept;
+ integral operator=(integral desr) noexcept;
+
+ integral exchange(integral desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral exchange(integral desr, memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_weak(integral& expc, integral desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_weak(integral& expc, integral desr,
+ memory_order s, memory_order f) noexcept;
+ bool compare_exchange_strong(integral& expc, integral desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_strong(integral& expc, integral desr,
+ memory_order s, memory_order f) noexcept;
+ bool compare_exchange_weak(integral& expc, integral desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_weak(integral& expc, integral desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_strong(integral& expc, integral desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_strong(integral& expc, integral desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+
+ integral fetch_add(integral op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral fetch_add(integral op, memory_order m = memory_order_seq_cst) noexcept;
+ integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) noexcept;
+ integral fetch_and(integral op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral fetch_and(integral op, memory_order m = memory_order_seq_cst) noexcept;
+ integral fetch_or(integral op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral fetch_or(integral op, memory_order m = memory_order_seq_cst) noexcept;
+ integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) noexcept;
+
+ integral operator++(int) volatile noexcept;
+ integral operator++(int) noexcept;
+ integral operator--(int) volatile noexcept;
+ integral operator--(int) noexcept;
+ integral operator++() volatile noexcept;
+ integral operator++() noexcept;
+ integral operator--() volatile noexcept;
+ integral operator--() noexcept;
+ integral operator+=(integral op) volatile noexcept;
+ integral operator+=(integral op) noexcept;
+ integral operator-=(integral op) volatile noexcept;
+ integral operator-=(integral op) noexcept;
+ integral operator&=(integral op) volatile noexcept;
+ integral operator&=(integral op) noexcept;
+ integral operator|=(integral op) volatile noexcept;
+ integral operator|=(integral op) noexcept;
+ integral operator^=(integral op) volatile noexcept;
+ integral operator^=(integral op) noexcept;
+
+ void wait(integral, memory_order = memory_order::seq_cst) const volatile noexcept;
+ void wait(integral, memory_order = memory_order::seq_cst) const noexcept;
+ void notify_one() volatile noexcept;
+ void notify_one() noexcept;
+ void notify_all() volatile noexcept;
+ void notify_all() noexcept;
+};
+
+template <class T>
+struct atomic<T*>
+{
+ using value_type = T*;
+ using difference_type = ptrdiff_t;
+
+ static constexpr bool is_always_lock_free;
+ bool is_lock_free() const volatile noexcept;
+ bool is_lock_free() const noexcept;
+
+ atomic() noexcept = default; // until C++20
+ constexpr atomic() noexcept; // since C++20
+ constexpr atomic(T* desr) noexcept;
+ atomic(const atomic&) = delete;
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+
+ T* load(memory_order m = memory_order_seq_cst) const volatile noexcept;
+ T* load(memory_order m = memory_order_seq_cst) const noexcept;
+ operator T*() const volatile noexcept;
+ operator T*() const noexcept;
+ void store(T* desr, memory_order m = memory_order_seq_cst) volatile noexcept;
+ void store(T* desr, memory_order m = memory_order_seq_cst) noexcept;
+ T* operator=(T*) volatile noexcept;
+ T* operator=(T*) noexcept;
+
+ T* exchange(T* desr, memory_order m = memory_order_seq_cst) volatile noexcept;
+ T* exchange(T* desr, memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_weak(T*& expc, T* desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_weak(T*& expc, T* desr,
+ memory_order s, memory_order f) noexcept;
+ bool compare_exchange_strong(T*& expc, T* desr,
+ memory_order s, memory_order f) volatile noexcept;
+ bool compare_exchange_strong(T*& expc, T* desr,
+ memory_order s, memory_order f) noexcept;
+ bool compare_exchange_weak(T*& expc, T* desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_weak(T*& expc, T* desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+ bool compare_exchange_strong(T*& expc, T* desr,
+ memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool compare_exchange_strong(T*& expc, T* desr,
+ memory_order m = memory_order_seq_cst) noexcept;
+ T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept;
+ T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) volatile noexcept;
+ T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept;
+
+ T* operator++(int) volatile noexcept;
+ T* operator++(int) noexcept;
+ T* operator--(int) volatile noexcept;
+ T* operator--(int) noexcept;
+ T* operator++() volatile noexcept;
+ T* operator++() noexcept;
+ T* operator--() volatile noexcept;
+ T* operator--() noexcept;
+ T* operator+=(ptrdiff_t op) volatile noexcept;
+ T* operator+=(ptrdiff_t op) noexcept;
+ T* operator-=(ptrdiff_t op) volatile noexcept;
+ T* operator-=(ptrdiff_t op) noexcept;
+
+ void wait(T*, memory_order = memory_order::seq_cst) const volatile noexcept;
+ void wait(T*, memory_order = memory_order::seq_cst) const noexcept;
+ void notify_one() volatile noexcept;
+ void notify_one() noexcept;
+ void notify_all() volatile noexcept;
+ void notify_all() noexcept;
+};
+
+
+template <class T>
+ bool atomic_is_lock_free(const volatile atomic<T>* obj) noexcept;
+
+template <class T>
+ bool atomic_is_lock_free(const atomic<T>* obj) noexcept;
+
+template <class T>
+ void atomic_store(volatile atomic<T>* obj, T desr) noexcept;
+
+template <class T>
+ void atomic_store(atomic<T>* obj, T desr) noexcept;
+
+template <class T>
+ void atomic_store_explicit(volatile atomic<T>* obj, T desr, memory_order m) noexcept;
+
+template <class T>
+ void atomic_store_explicit(atomic<T>* obj, T desr, memory_order m) noexcept;
+
+template <class T>
+ T atomic_load(const volatile atomic<T>* obj) noexcept;
+
+template <class T>
+ T atomic_load(const atomic<T>* obj) noexcept;
+
+template <class T>
+ T atomic_load_explicit(const volatile atomic<T>* obj, memory_order m) noexcept;
+
+template <class T>
+ T atomic_load_explicit(const atomic<T>* obj, memory_order m) noexcept;
+
+template <class T>
+ T atomic_exchange(volatile atomic<T>* obj, T desr) noexcept;
+
+template <class T>
+ T atomic_exchange(atomic<T>* obj, T desr) noexcept;
+
+template <class T>
+ T atomic_exchange_explicit(volatile atomic<T>* obj, T desr, memory_order m) noexcept;
+
+template <class T>
+ T atomic_exchange_explicit(atomic<T>* obj, T desr, memory_order m) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_weak(volatile atomic<T>* obj, T* expc, T desr) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_weak(atomic<T>* obj, T* expc, T desr) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_strong(volatile atomic<T>* obj, T* expc, T desr) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_strong(atomic<T>* obj, T* expc, T desr) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_weak_explicit(volatile atomic<T>* obj, T* expc,
+ T desr,
+ memory_order s, memory_order f) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_weak_explicit(atomic<T>* obj, T* expc, T desr,
+ memory_order s, memory_order f) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_strong_explicit(volatile atomic<T>* obj,
+ T* expc, T desr,
+ memory_order s, memory_order f) noexcept;
+
+template <class T>
+ bool atomic_compare_exchange_strong_explicit(atomic<T>* obj, T* expc,
+ T desr,
+ memory_order s, memory_order f) noexcept;
+
+template <class T>
+ void atomic_wait(const volatile atomic<T>* obj, T old) noexcept;
+
+template <class T>
+ void atomic_wait(const atomic<T>* obj, T old) noexcept;
+
+template <class T>
+ void atomic_wait_explicit(const volatile atomic<T>* obj, T old, memory_order m) noexcept;
+
+template <class T>
+ void atomic_wait_explicit(const atomic<T>* obj, T old, memory_order m) noexcept;
+
+template <class T>
+ void atomic_one(volatile atomic<T>* obj) noexcept;
+
+template <class T>
+ void atomic_one(atomic<T>* obj) noexcept;
+
+template <class T>
+ void atomic_all(volatile atomic<T>* obj) noexcept;
+
+template <class T>
+ void atomic_all(atomic<T>* obj) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_add(volatile atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_add(atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_add_explicit(volatile atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+template <class Integral>
+ Integral atomic_fetch_add_explicit(atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+template <class Integral>
+ Integral atomic_fetch_sub(volatile atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_sub(atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_sub_explicit(volatile atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_sub_explicit(atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_and(volatile atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_and(atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_and_explicit(volatile atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_and_explicit(atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_or(volatile atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_or(atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_or_explicit(volatile atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_or_explicit(atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_xor(volatile atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_xor(atomic<Integral>* obj, Integral op) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_xor_explicit(volatile atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class Integral>
+ Integral atomic_fetch_xor_explicit(atomic<Integral>* obj, Integral op,
+ memory_order m) noexcept;
+
+template <class T>
+ T* atomic_fetch_add(volatile atomic<T*>* obj, ptrdiff_t op) noexcept;
+
+template <class T>
+ T* atomic_fetch_add(atomic<T*>* obj, ptrdiff_t op) noexcept;
+
+template <class T>
+ T* atomic_fetch_add_explicit(volatile atomic<T*>* obj, ptrdiff_t op,
+ memory_order m) noexcept;
+
+template <class T>
+ T* atomic_fetch_add_explicit(atomic<T*>* obj, ptrdiff_t op, memory_order m) noexcept;
+
+template <class T>
+ T* atomic_fetch_sub(volatile atomic<T*>* obj, ptrdiff_t op) noexcept;
+
+template <class T>
+ T* atomic_fetch_sub(atomic<T*>* obj, ptrdiff_t op) noexcept;
+
+template <class T>
+ T* atomic_fetch_sub_explicit(volatile atomic<T*>* obj, ptrdiff_t op,
+ memory_order m) noexcept;
+
+template <class T>
+ T* atomic_fetch_sub_explicit(atomic<T*>* obj, ptrdiff_t op, memory_order m) noexcept;
+
+// Atomics for standard typedef types
+
+typedef atomic<bool> atomic_bool;
+typedef atomic<char> atomic_char;
+typedef atomic<signed char> atomic_schar;
+typedef atomic<unsigned char> atomic_uchar;
+typedef atomic<short> atomic_short;
+typedef atomic<unsigned short> atomic_ushort;
+typedef atomic<int> atomic_int;
+typedef atomic<unsigned int> atomic_uint;
+typedef atomic<long> atomic_long;
+typedef atomic<unsigned long> atomic_ulong;
+typedef atomic<long long> atomic_llong;
+typedef atomic<unsigned long long> atomic_ullong;
+typedef atomic<char8_t> atomic_char8_t; // C++20
+typedef atomic<char16_t> atomic_char16_t;
+typedef atomic<char32_t> atomic_char32_t;
+typedef atomic<wchar_t> atomic_wchar_t;
+
+typedef atomic<int_least8_t> atomic_int_least8_t;
+typedef atomic<uint_least8_t> atomic_uint_least8_t;
+typedef atomic<int_least16_t> atomic_int_least16_t;
+typedef atomic<uint_least16_t> atomic_uint_least16_t;
+typedef atomic<int_least32_t> atomic_int_least32_t;
+typedef atomic<uint_least32_t> atomic_uint_least32_t;
+typedef atomic<int_least64_t> atomic_int_least64_t;
+typedef atomic<uint_least64_t> atomic_uint_least64_t;
+
+typedef atomic<int_fast8_t> atomic_int_fast8_t;
+typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
+typedef atomic<int_fast16_t> atomic_int_fast16_t;
+typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
+typedef atomic<int_fast32_t> atomic_int_fast32_t;
+typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
+typedef atomic<int_fast64_t> atomic_int_fast64_t;
+typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
+
+typedef atomic<int8_t> atomic_int8_t;
+typedef atomic<uint8_t> atomic_uint8_t;
+typedef atomic<int16_t> atomic_int16_t;
+typedef atomic<uint16_t> atomic_uint16_t;
+typedef atomic<int32_t> atomic_int32_t;
+typedef atomic<uint32_t> atomic_uint32_t;
+typedef atomic<int64_t> atomic_int64_t;
+typedef atomic<uint64_t> atomic_uint64_t;
+
+typedef atomic<intptr_t> atomic_intptr_t;
+typedef atomic<uintptr_t> atomic_uintptr_t;
+typedef atomic<size_t> atomic_size_t;
+typedef atomic<ptrdiff_t> atomic_ptrdiff_t;
+typedef atomic<intmax_t> atomic_intmax_t;
+typedef atomic<uintmax_t> atomic_uintmax_t;
+
+// flag type and operations
+
+typedef struct atomic_flag
+{
+ atomic_flag() noexcept = default; // until C++20
+ constexpr atomic_flag() noexcept; // since C++20
+ atomic_flag(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) volatile = delete;
+
+ bool test(memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool test(memory_order m = memory_order_seq_cst) noexcept;
+ bool test_and_set(memory_order m = memory_order_seq_cst) volatile noexcept;
+ bool test_and_set(memory_order m = memory_order_seq_cst) noexcept;
+ void clear(memory_order m = memory_order_seq_cst) volatile noexcept;
+ void clear(memory_order m = memory_order_seq_cst) noexcept;
+
+ void wait(bool, memory_order = memory_order::seq_cst) const volatile noexcept;
+ void wait(bool, memory_order = memory_order::seq_cst) const noexcept;
+ void notify_one() volatile noexcept;
+ void notify_one() noexcept;
+ void notify_all() volatile noexcept;
+ void notify_all() noexcept;
+} atomic_flag;
+
+bool atomic_flag_test(volatile atomic_flag* obj) noexcept;
+bool atomic_flag_test(atomic_flag* obj) noexcept;
+bool atomic_flag_test_explicit(volatile atomic_flag* obj,
+ memory_order m) noexcept;
+bool atomic_flag_test_explicit(atomic_flag* obj, memory_order m) noexcept;
+bool atomic_flag_test_and_set(volatile atomic_flag* obj) noexcept;
+bool atomic_flag_test_and_set(atomic_flag* obj) noexcept;
+bool atomic_flag_test_and_set_explicit(volatile atomic_flag* obj,
+ memory_order m) noexcept;
+bool atomic_flag_test_and_set_explicit(atomic_flag* obj, memory_order m) noexcept;
+void atomic_flag_clear(volatile atomic_flag* obj) noexcept;
+void atomic_flag_clear(atomic_flag* obj) noexcept;
+void atomic_flag_clear_explicit(volatile atomic_flag* obj, memory_order m) noexcept;
+void atomic_flag_clear_explicit(atomic_flag* obj, memory_order m) noexcept;
+
+void atomic_wait(const volatile atomic_flag* obj, T old) noexcept;
+void atomic_wait(const atomic_flag* obj, T old) noexcept;
+void atomic_wait_explicit(const volatile atomic_flag* obj, T old, memory_order m) noexcept;
+void atomic_wait_explicit(const atomic_flag* obj, T old, memory_order m) noexcept;
+void atomic_one(volatile atomic_flag* obj) noexcept;
+void atomic_one(atomic_flag* obj) noexcept;
+void atomic_all(volatile atomic_flag* obj) noexcept;
+void atomic_all(atomic_flag* obj) noexcept;
+
+// fences
+
+void atomic_thread_fence(memory_order m) noexcept;
+void atomic_signal_fence(memory_order m) noexcept;
+
+// deprecated
+
+template <class T>
+ void atomic_init(volatile atomic<T>* obj, typename atomic<T>::value_type desr) noexcept;
+
+template <class T>
+ void atomic_init(atomic<T>* obj, typename atomic<T>::value_type desr) noexcept;
+
+#define ATOMIC_VAR_INIT(value) see below
+
+#define ATOMIC_FLAG_INIT see below
+
+} // std
+
+*/
+
+#include <__availability>
+#include <__config>
+#include <__threading_support>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+#include <version>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+#pragma GCC system_header
+#endif
+
+#ifdef _LIBCPP_HAS_NO_THREADS
+# error <atomic> is not supported on this single threaded system
+#endif
+#ifdef _LIBCPP_HAS_NO_ATOMIC_HEADER
+# error <atomic> is not implemented
+#endif
+#ifdef kill_dependency
+# error C++ standard library is incompatible with <stdatomic.h>
+#endif
+
+#define _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) \
+ _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_consume || \
+ __m == memory_order_acquire || \
+ __m == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
+#define _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) \
+ _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || \
+ __m == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
+#define _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__m, __f) \
+ _LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || \
+ __f == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+// Figure out what the underlying type for `memory_order` would be if it were
+// declared as an unscoped enum (accounting for -fshort-enums). Use this result
+// to pin the underlying type in C++20.
+enum __legacy_memory_order {
+ __mo_relaxed,
+ __mo_consume,
+ __mo_acquire,
+ __mo_release,
+ __mo_acq_rel,
+ __mo_seq_cst
+};
+
+typedef underlying_type<__legacy_memory_order>::type __memory_order_underlying_t;
+
+#if _LIBCPP_STD_VER > 17
+
+enum class memory_order : __memory_order_underlying_t {
+ relaxed = __mo_relaxed,
+ consume = __mo_consume,
+ acquire = __mo_acquire,
+ release = __mo_release,
+ acq_rel = __mo_acq_rel,
+ seq_cst = __mo_seq_cst
+};
+
+inline constexpr auto memory_order_relaxed = memory_order::relaxed;
+inline constexpr auto memory_order_consume = memory_order::consume;
+inline constexpr auto memory_order_acquire = memory_order::acquire;
+inline constexpr auto memory_order_release = memory_order::release;
+inline constexpr auto memory_order_acq_rel = memory_order::acq_rel;
+inline constexpr auto memory_order_seq_cst = memory_order::seq_cst;
+
+#else
+
+typedef enum memory_order {
+ memory_order_relaxed = __mo_relaxed,
+ memory_order_consume = __mo_consume,
+ memory_order_acquire = __mo_acquire,
+ memory_order_release = __mo_release,
+ memory_order_acq_rel = __mo_acq_rel,
+ memory_order_seq_cst = __mo_seq_cst,
+} memory_order;
+
+#endif // _LIBCPP_STD_VER > 17
+
+template <typename _Tp> _LIBCPP_INLINE_VISIBILITY
+bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp const& __rhs) {
+ return _VSTD::memcmp(&__lhs, &__rhs, sizeof(_Tp)) == 0;
+}
+
+static_assert((is_same<underlying_type<memory_order>::type, __memory_order_underlying_t>::value),
+ "unexpected underlying type for std::memory_order");
+
+#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || \
+ defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS)
+
+// [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
+// the default operator= in an object is not volatile, a byte-by-byte copy
+// is required.
+template <typename _Tp, typename _Tv> _LIBCPP_INLINE_VISIBILITY
+typename enable_if<is_assignable<_Tp&, _Tv>::value>::type
+__cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
+ __a_value = __val;
+}
+template <typename _Tp, typename _Tv> _LIBCPP_INLINE_VISIBILITY
+typename enable_if<is_assignable<_Tp&, _Tv>::value>::type
+__cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) {
+ volatile char* __to = reinterpret_cast<volatile char*>(&__a_value);
+ volatile char* __end = __to + sizeof(_Tp);
+ volatile const char* __from = reinterpret_cast<volatile const char*>(&__val);
+ while (__to != __end)
+ *__to++ = *__from++;
+}
+
+#endif
+
+#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP)
+
+template <typename _Tp>
+struct __cxx_atomic_base_impl {
+
+ _LIBCPP_INLINE_VISIBILITY
+#ifndef _LIBCPP_CXX03_LANG
+ __cxx_atomic_base_impl() _NOEXCEPT = default;
+#else
+ __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {}
+#endif // _LIBCPP_CXX03_LANG
+ _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT
+ : __a_value(value) {}
+ _Tp __a_value;
+};
+
+_LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
+ (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
+ (__order == memory_order_release ? __ATOMIC_RELEASE:
+ (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
+ (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL:
+ __ATOMIC_CONSUME))));
+}
+
+_LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
+ (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
+ (__order == memory_order_release ? __ATOMIC_RELAXED:
+ (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
+ (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE:
+ __ATOMIC_CONSUME))));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+ __cxx_atomic_assign_volatile(__a->__a_value, __val);
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
+ __a->__a_value = __val;
+}
+
+_LIBCPP_INLINE_VISIBILITY inline
+void __cxx_atomic_thread_fence(memory_order __order) {
+ __atomic_thread_fence(__to_gcc_order(__order));
+}
+
+_LIBCPP_INLINE_VISIBILITY inline
+void __cxx_atomic_signal_fence(memory_order __order) {
+ __atomic_signal_fence(__to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val,
+ memory_order __order) {
+ __atomic_store(&__a->__a_value, &__val,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val,
+ memory_order __order) {
+ __atomic_store(&__a->__a_value, &__val,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a,
+ memory_order __order) {
+ _Tp __ret;
+ __atomic_load(&__a->__a_value, &__ret,
+ __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
+ _Tp __ret;
+ __atomic_load(&__a->__a_value, &__ret,
+ __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp __value, memory_order __order) {
+ _Tp __ret;
+ __atomic_exchange(&__a->__a_value, &__value, &__ret,
+ __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value,
+ memory_order __order) {
+ _Tp __ret;
+ __atomic_exchange(&__a->__a_value, &__value, &__ret,
+ __to_gcc_order(__order));
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(
+ volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value,
+ memory_order __success, memory_order __failure) {
+ return __atomic_compare_exchange(&__a->__a_value, __expected, &__value,
+ false,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(
+ __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success,
+ memory_order __failure) {
+ return __atomic_compare_exchange(&__a->__a_value, __expected, &__value,
+ false,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(
+ volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value,
+ memory_order __success, memory_order __failure) {
+ return __atomic_compare_exchange(&__a->__a_value, __expected, &__value,
+ true,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(
+ __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success,
+ memory_order __failure) {
+ return __atomic_compare_exchange(&__a->__a_value, __expected, &__value,
+ true,
+ __to_gcc_order(__success),
+ __to_gcc_failure_order(__failure));
+}
+
+template <typename _Tp>
+struct __skip_amt { enum {value = 1}; };
+
+template <typename _Tp>
+struct __skip_amt<_Tp*> { enum {value = sizeof(_Tp)}; };
+
+// FIXME: Haven't figured out what the spec says about using arrays with
+// atomic_fetch_add. Force a failure rather than creating bad behavior.
+template <typename _Tp>
+struct __skip_amt<_Tp[]> { };
+template <typename _Tp, int n>
+struct __skip_amt<_Tp[n]> { };
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Td __delta, memory_order __order) {
+ return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta,
+ memory_order __order) {
+ return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Td __delta, memory_order __order) {
+ return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta,
+ memory_order __order) {
+ return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_and(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a,
+ _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_and(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_or(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
+ memory_order __order) {
+ return __atomic_fetch_or(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a,
+ _Tp __pattern, memory_order __order) {
+ return __atomic_fetch_xor(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
+ memory_order __order) {
+ return __atomic_fetch_xor(&__a->__a_value, __pattern,
+ __to_gcc_order(__order));
+}
+
+#define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
+
+#elif defined(_LIBCPP_HAS_C_ATOMIC_IMP)
+
+template <typename _Tp>
+struct __cxx_atomic_base_impl {
+
+ _LIBCPP_INLINE_VISIBILITY
+#ifndef _LIBCPP_CXX03_LANG
+ __cxx_atomic_base_impl() _NOEXCEPT = default;
+#else
+ __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {}
+#endif // _LIBCPP_CXX03_LANG
+ _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT
+ : __a_value(value) {}
+ _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value;
+};
+
+#define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
+
+_LIBCPP_INLINE_VISIBILITY inline
+void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
+ __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
+}
+
+_LIBCPP_INLINE_VISIBILITY inline
+void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
+ __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT {
+ __c11_atomic_init(&__a->__a_value, __val);
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT {
+ __c11_atomic_init(&__a->__a_value, __val);
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT {
+ __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT {
+ __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT {
+ using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*;
+ return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
+ using __ptr_type = typename remove_const<decltype(__a->__a_value)>::type*;
+ return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order));
+}
+
+_LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR memory_order __to_failure_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_release ? memory_order_relaxed:
+ (__order == memory_order_acq_rel ? memory_order_acquire:
+ __order);
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
+ return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
+ return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
+ return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
+ return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+template<class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
+ return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order));
+}
+
+#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp kill_dependency(_Tp __y) _NOEXCEPT
+{
+ return __y;
+}
+
+#if defined(__CLANG_ATOMIC_BOOL_LOCK_FREE)
+# define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE
+# define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE
+#ifndef _LIBCPP_HAS_NO_CHAR8_T
+# define ATOMIC_CHAR8_T_LOCK_FREE __CLANG_ATOMIC_CHAR8_T_LOCK_FREE
+#endif
+# define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE
+# define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE
+# define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE
+# define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE
+# define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE
+# define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE
+# define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE
+# define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
+#elif defined(__GCC_ATOMIC_BOOL_LOCK_FREE)
+# define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
+# define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
+#ifndef _LIBCPP_HAS_NO_CHAR8_T
+# define ATOMIC_CHAR8_T_LOCK_FREE __GCC_ATOMIC_CHAR8_T_LOCK_FREE
+#endif
+# define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
+# define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
+# define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
+# define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+# define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+# define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+# define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+# define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
+#endif
+
+#ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
+
+template<typename _Tp>
+struct __cxx_atomic_lock_impl {
+
+ _LIBCPP_INLINE_VISIBILITY
+ __cxx_atomic_lock_impl() _NOEXCEPT
+ : __a_value(), __a_lock(0) {}
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit
+ __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT
+ : __a_value(value), __a_lock(0) {}
+
+ _Tp __a_value;
+ mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock;
+
+ _LIBCPP_INLINE_VISIBILITY void __lock() const volatile {
+ while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
+ /*spin*/;
+ }
+ _LIBCPP_INLINE_VISIBILITY void __lock() const {
+ while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
+ /*spin*/;
+ }
+ _LIBCPP_INLINE_VISIBILITY void __unlock() const volatile {
+ __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
+ }
+ _LIBCPP_INLINE_VISIBILITY void __unlock() const {
+ __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
+ }
+ _LIBCPP_INLINE_VISIBILITY _Tp __read() const volatile {
+ __lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a_value);
+ __unlock();
+ return __old;
+ }
+ _LIBCPP_INLINE_VISIBILITY _Tp __read() const {
+ __lock();
+ _Tp __old = __a_value;
+ __unlock();
+ return __old;
+ }
+};
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
+ __cxx_atomic_assign_volatile(__a->__a_value, __val);
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
+ __a->__a_value = __val;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
+ __a->__lock();
+ __cxx_atomic_assign_volatile(__a->__a_value, __val);
+ __a->__unlock();
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
+ __a->__lock();
+ __a->__a_value = __val;
+ __a->__unlock();
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
+ return __a->__read();
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
+ return __a->__read();
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, __value);
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value = __value;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp* __expected, _Tp __value, memory_order, memory_order) {
+ _Tp __temp;
+ __a->__lock();
+ __cxx_atomic_assign_volatile(__temp, __a->__a_value);
+ bool __ret = (_VSTD::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
+ if(__ret)
+ __cxx_atomic_assign_volatile(__a->__a_value, __value);
+ else
+ __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
+ __a->__unlock();
+ return __ret;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp* __expected, _Tp __value, memory_order, memory_order) {
+ __a->__lock();
+ bool __ret = (_VSTD::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
+ if(__ret)
+ _VSTD::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
+ else
+ _VSTD::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
+ __a->__unlock();
+ return __ret;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp* __expected, _Tp __value, memory_order, memory_order) {
+ _Tp __temp;
+ __a->__lock();
+ __cxx_atomic_assign_volatile(__temp, __a->__a_value);
+ bool __ret = (_VSTD::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
+ if(__ret)
+ __cxx_atomic_assign_volatile(__a->__a_value, __value);
+ else
+ __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
+ __a->__unlock();
+ return __ret;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp* __expected, _Tp __value, memory_order, memory_order) {
+ __a->__lock();
+ bool __ret = (_VSTD::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
+ if(__ret)
+ _VSTD::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
+ else
+ _VSTD::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
+ __a->__unlock();
+ return __ret;
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Td __delta, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta));
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Td __delta, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value += __delta;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a,
+ ptrdiff_t __delta, memory_order) {
+ __a->__lock();
+ _Tp* __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta);
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a,
+ ptrdiff_t __delta, memory_order) {
+ __a->__lock();
+ _Tp* __old = __a->__a_value;
+ __a->__a_value += __delta;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Td __delta, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta));
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp, typename _Td>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Td __delta, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value -= __delta;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern));
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value &= __pattern;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern));
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value |= __pattern;
+ __a->__unlock();
+ return __old;
+}
+
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old;
+ __cxx_atomic_assign_volatile(__old, __a->__a_value);
+ __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern));
+ __a->__unlock();
+ return __old;
+}
+template <typename _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a,
+ _Tp __pattern, memory_order) {
+ __a->__lock();
+ _Tp __old = __a->__a_value;
+ __a->__a_value ^= __pattern;
+ __a->__unlock();
+ return __old;
+}
+
+#ifdef __cpp_lib_atomic_is_always_lock_free
+
+template<typename _Tp> struct __cxx_is_always_lock_free {
+ enum { __value = __atomic_always_lock_free(sizeof(_Tp), 0) }; };
+
+#else
+
+template<typename _Tp> struct __cxx_is_always_lock_free { enum { __value = false }; };
+// Implementations must match the C ATOMIC_*_LOCK_FREE macro values.
+template<> struct __cxx_is_always_lock_free<bool> { enum { __value = 2 == ATOMIC_BOOL_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<signed char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<unsigned char> { enum { __value = 2 == ATOMIC_CHAR_LOCK_FREE }; };
+#ifndef _LIBCPP_HAS_NO_CHAR8_T
+template<> struct __cxx_is_always_lock_free<char8_t> { enum { __value = 2 == ATOMIC_CHAR8_T_LOCK_FREE }; };
+#endif
+template<> struct __cxx_is_always_lock_free<char16_t> { enum { __value = 2 == ATOMIC_CHAR16_T_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<char32_t> { enum { __value = 2 == ATOMIC_CHAR32_T_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<wchar_t> { enum { __value = 2 == ATOMIC_WCHAR_T_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<short> { enum { __value = 2 == ATOMIC_SHORT_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<unsigned short> { enum { __value = 2 == ATOMIC_SHORT_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<int> { enum { __value = 2 == ATOMIC_INT_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<unsigned int> { enum { __value = 2 == ATOMIC_INT_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<long> { enum { __value = 2 == ATOMIC_LONG_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<unsigned long> { enum { __value = 2 == ATOMIC_LONG_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<long long> { enum { __value = 2 == ATOMIC_LLONG_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<unsigned long long> { enum { __value = 2 == ATOMIC_LLONG_LOCK_FREE }; };
+template<typename _Tp> struct __cxx_is_always_lock_free<_Tp*> { enum { __value = 2 == ATOMIC_POINTER_LOCK_FREE }; };
+template<> struct __cxx_is_always_lock_free<std::nullptr_t> { enum { __value = 2 == ATOMIC_POINTER_LOCK_FREE }; };
+
+#endif //__cpp_lib_atomic_is_always_lock_free
+
+template <typename _Tp,
+ typename _Base = typename conditional<__cxx_is_always_lock_free<_Tp>::__value,
+ __cxx_atomic_base_impl<_Tp>,
+ __cxx_atomic_lock_impl<_Tp> >::type>
+#else
+template <typename _Tp,
+ typename _Base = __cxx_atomic_base_impl<_Tp> >
+#endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
+struct __cxx_atomic_impl : public _Base {
+
+#if _GNUC_VER >= 501
+ static_assert(is_trivially_copyable<_Tp>::value,
+ "std::atomic<Tp> requires that 'Tp' be a trivially copyable type");
+#endif
+
+ _LIBCPP_INLINE_VISIBILITY __cxx_atomic_impl() _NOEXCEPT _LIBCPP_DEFAULT
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp value) _NOEXCEPT
+ : _Base(value) {}
+};
+
+#if defined(__linux__) || (defined(__FreeBSD__) && defined(__mips__))
+ using __cxx_contention_t = int32_t;
+#else
+ using __cxx_contention_t = int64_t;
+#endif
+
+using __cxx_atomic_contention_t = __cxx_atomic_impl<__cxx_contention_t>;
+
+#ifndef _LIBCPP_HAS_NO_PLATFORM_WAIT
+
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(void const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __libcpp_atomic_wait(void const volatile*, __cxx_contention_t);
+
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*);
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t);
+
+template <class _Atp, class _Fn>
+struct __libcpp_atomic_wait_backoff_impl {
+ _Atp* __a;
+ _Fn __test_fn;
+ _LIBCPP_AVAILABILITY_SYNC
+ _LIBCPP_INLINE_VISIBILITY bool operator()(chrono::nanoseconds __elapsed) const
+ {
+ if(__elapsed > chrono::microseconds(64))
+ {
+ auto const __monitor = __libcpp_atomic_monitor(__a);
+ if(__test_fn())
+ return true;
+ __libcpp_atomic_wait(__a, __monitor);
+ }
+ else if(__elapsed > chrono::microseconds(4))
+ __libcpp_thread_yield();
+ else
+ {} // poll
+ return false;
+ }
+};
+
+template <class _Atp, class _Fn>
+_LIBCPP_AVAILABILITY_SYNC
+_LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_wait(_Atp* __a, _Fn && __test_fn)
+{
+ __libcpp_atomic_wait_backoff_impl<_Atp, typename decay<_Fn>::type> __backoff_fn = {__a, __test_fn};
+ return __libcpp_thread_poll_with_backoff(__test_fn, __backoff_fn);
+}
+
+#else // _LIBCPP_HAS_NO_PLATFORM_WAIT
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp> const volatile*) { }
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp> const volatile*) { }
+template <class _Atp, class _Fn>
+_LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_wait(_Atp*, _Fn && __test_fn)
+{
+ return __libcpp_thread_poll_with_backoff(__test_fn, __libcpp_timed_backoff_policy());
+}
+
+#endif // _LIBCPP_HAS_NO_PLATFORM_WAIT
+
+template <class _Atp, class _Tp>
+struct __cxx_atomic_wait_test_fn_impl {
+ _Atp* __a;
+ _Tp __val;
+ memory_order __order;
+ _LIBCPP_INLINE_VISIBILITY bool operator()() const
+ {
+ return !__cxx_nonatomic_compare_equal(__cxx_atomic_load(__a, __order), __val);
+ }
+};
+
+template <class _Atp, class _Tp>
+_LIBCPP_AVAILABILITY_SYNC
+_LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_wait(_Atp* __a, _Tp const __val, memory_order __order)
+{
+ __cxx_atomic_wait_test_fn_impl<_Atp, _Tp> __test_fn = {__a, __val, __order};
+ return __cxx_atomic_wait(__a, __test_fn);
+}
+
+// general atomic<T>
+
+template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value>
+struct __atomic_base // false
+{
+ mutable __cxx_atomic_impl<_Tp> __a_;
+
+#if defined(__cpp_lib_atomic_is_always_lock_free)
+ static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0);
+#endif
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool is_lock_free() const volatile _NOEXCEPT
+ {return __cxx_atomic_is_lock_free(sizeof(_Tp));}
+ _LIBCPP_INLINE_VISIBILITY
+ bool is_lock_free() const _NOEXCEPT
+ {return static_cast<__atomic_base const volatile*>(this)->is_lock_free();}
+ _LIBCPP_INLINE_VISIBILITY
+ void store(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m)
+ {__cxx_atomic_store(&__a_, __d, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ void store(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m)
+ {__cxx_atomic_store(&__a_, __d, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp load(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+ {return __cxx_atomic_load(&__a_, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+ {return __cxx_atomic_load(&__a_, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ operator _Tp() const volatile _NOEXCEPT {return load();}
+ _LIBCPP_INLINE_VISIBILITY
+ operator _Tp() const _NOEXCEPT {return load();}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_exchange(&__a_, __d, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_exchange(&__a_, __d, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_weak(_Tp& __e, _Tp __d,
+ memory_order __s, memory_order __f) volatile _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+ {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_weak(_Tp& __e, _Tp __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+ {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_strong(_Tp& __e, _Tp __d,
+ memory_order __s, memory_order __f) volatile _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+ {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_strong(_Tp& __e, _Tp __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+ {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_weak(_Tp& __e, _Tp __d,
+ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_weak(_Tp& __e, _Tp __d,
+ memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_strong(_Tp& __e, _Tp __d,
+ memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool compare_exchange_strong(_Tp& __e, _Tp __d,
+ memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);}
+
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+ {__cxx_atomic_wait(&__a_, __v, __m);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ {__cxx_atomic_wait(&__a_, __v, __m);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void notify_one() volatile _NOEXCEPT
+ {__cxx_atomic_notify_one(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void notify_one() _NOEXCEPT
+ {__cxx_atomic_notify_one(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void notify_all() volatile _NOEXCEPT
+ {__cxx_atomic_notify_all(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY void notify_all() _NOEXCEPT
+ {__cxx_atomic_notify_all(&__a_);}
+
+#if _LIBCPP_STD_VER > 17
+ _LIBCPP_INLINE_VISIBILITY constexpr
+ __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {}
+#else
+ _LIBCPP_INLINE_VISIBILITY
+ __atomic_base() _NOEXCEPT _LIBCPP_DEFAULT
+#endif
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ __atomic_base(_Tp __d) _NOEXCEPT : __a_(__d) {}
+
+#ifndef _LIBCPP_CXX03_LANG
+ __atomic_base(const __atomic_base&) = delete;
+#else
+private:
+ _LIBCPP_INLINE_VISIBILITY
+ __atomic_base(const __atomic_base&);
+#endif
+};
+
+#if defined(__cpp_lib_atomic_is_always_lock_free)
+template <class _Tp, bool __b>
+_LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free;
+#endif
+
+// atomic<Integral>
+
+template <class _Tp>
+struct __atomic_base<_Tp, true>
+ : public __atomic_base<_Tp, false>
+{
+ typedef __atomic_base<_Tp, false> __base;
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
+ __atomic_base() _NOEXCEPT _LIBCPP_DEFAULT
+
+ _LIBCPP_INLINE_VISIBILITY
+ _LIBCPP_CONSTEXPR __atomic_base(_Tp __d) _NOEXCEPT : __base(__d) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator++(int) volatile _NOEXCEPT {return fetch_add(_Tp(1));}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator++(int) _NOEXCEPT {return fetch_add(_Tp(1));}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator--(int) volatile _NOEXCEPT {return fetch_sub(_Tp(1));}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator--(int) _NOEXCEPT {return fetch_sub(_Tp(1));}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator++() volatile _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator++() _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator--() volatile _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator--() _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator+=(_Tp __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator+=(_Tp __op) _NOEXCEPT {return fetch_add(__op) + __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator-=(_Tp __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator-=(_Tp __op) _NOEXCEPT {return fetch_sub(__op) - __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator&=(_Tp __op) volatile _NOEXCEPT {return fetch_and(__op) & __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator&=(_Tp __op) _NOEXCEPT {return fetch_and(__op) & __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator|=(_Tp __op) volatile _NOEXCEPT {return fetch_or(__op) | __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator|=(_Tp __op) _NOEXCEPT {return fetch_or(__op) | __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator^=(_Tp __op) volatile _NOEXCEPT {return fetch_xor(__op) ^ __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;}
+};
+
+// atomic<T>
+
+template <class _Tp>
+struct atomic
+ : public __atomic_base<_Tp>
+{
+ typedef __atomic_base<_Tp> __base;
+ typedef _Tp value_type;
+ typedef value_type difference_type;
+
+#if _LIBCPP_STD_VER > 17
+ _LIBCPP_INLINE_VISIBILITY
+ atomic() = default;
+#else
+ _LIBCPP_INLINE_VISIBILITY
+ atomic() _NOEXCEPT _LIBCPP_DEFAULT
+#endif
+
+ _LIBCPP_INLINE_VISIBILITY
+ _LIBCPP_CONSTEXPR atomic(_Tp __d) _NOEXCEPT : __base(__d) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator=(_Tp __d) volatile _NOEXCEPT
+ {__base::store(__d); return __d;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp operator=(_Tp __d) _NOEXCEPT
+ {__base::store(__d); return __d;}
+
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+};
+
+// atomic<T*>
+
+template <class _Tp>
+struct atomic<_Tp*>
+ : public __atomic_base<_Tp*>
+{
+ typedef __atomic_base<_Tp*> __base;
+ typedef _Tp* value_type;
+ typedef ptrdiff_t difference_type;
+
+ _LIBCPP_INLINE_VISIBILITY
+ atomic() _NOEXCEPT _LIBCPP_DEFAULT
+
+ _LIBCPP_INLINE_VISIBILITY
+ _LIBCPP_CONSTEXPR atomic(_Tp* __d) _NOEXCEPT : __base(__d) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator=(_Tp* __d) volatile _NOEXCEPT
+ {__base::store(__d); return __d;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator=(_Tp* __d) _NOEXCEPT
+ {__base::store(__d); return __d;}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst)
+ volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst)
+ volatile _NOEXCEPT
+ {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);}
+
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator++(int) volatile _NOEXCEPT {return fetch_add(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator++(int) _NOEXCEPT {return fetch_add(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator--(int) volatile _NOEXCEPT {return fetch_sub(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator--(int) _NOEXCEPT {return fetch_sub(1);}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator++() volatile _NOEXCEPT {return fetch_add(1) + 1;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator++() _NOEXCEPT {return fetch_add(1) + 1;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator--() volatile _NOEXCEPT {return fetch_sub(1) - 1;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator--() _NOEXCEPT {return fetch_sub(1) - 1;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator+=(ptrdiff_t __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator+=(ptrdiff_t __op) _NOEXCEPT {return fetch_add(__op) + __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator-=(ptrdiff_t __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;}
+ _LIBCPP_INLINE_VISIBILITY
+ _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT {return fetch_sub(__op) - __op;}
+
+ atomic& operator=(const atomic&) = delete;
+ atomic& operator=(const atomic&) volatile = delete;
+};
+
+// atomic_is_lock_free
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_is_lock_free(const volatile atomic<_Tp>* __o) _NOEXCEPT
+{
+ return __o->is_lock_free();
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_is_lock_free(const atomic<_Tp>* __o) _NOEXCEPT
+{
+ return __o->is_lock_free();
+}
+
+// atomic_init
+
+template <class _Tp>
+_LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_INLINE_VISIBILITY
+void
+atomic_init(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ __cxx_atomic_init(&__o->__a_, __d);
+}
+
+template <class _Tp>
+_LIBCPP_DEPRECATED_IN_CXX20 _LIBCPP_INLINE_VISIBILITY
+void
+atomic_init(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ __cxx_atomic_init(&__o->__a_, __d);
+}
+
+// atomic_store
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void
+atomic_store(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ __o->store(__d);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void
+atomic_store(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ __o->store(__d);
+}
+
+// atomic_store_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void
+atomic_store_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m)
+{
+ __o->store(__d, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+void
+atomic_store_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m)
+{
+ __o->store(__d, __m);
+}
+
+// atomic_load
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_load(const volatile atomic<_Tp>* __o) _NOEXCEPT
+{
+ return __o->load();
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_load(const atomic<_Tp>* __o) _NOEXCEPT
+{
+ return __o->load();
+}
+
+// atomic_load_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_load_explicit(const volatile atomic<_Tp>* __o, memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+{
+ return __o->load(__m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_load_explicit(const atomic<_Tp>* __o, memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+{
+ return __o->load(__m);
+}
+
+// atomic_exchange
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_exchange(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->exchange(__d);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_exchange(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->exchange(__d);
+}
+
+// atomic_exchange_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_exchange_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT
+{
+ return __o->exchange(__d, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp
+atomic_exchange_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __d, memory_order __m) _NOEXCEPT
+{
+ return __o->exchange(__d, __m);
+}
+
+// atomic_compare_exchange_weak
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_weak(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->compare_exchange_weak(*__e, __d);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_weak(atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->compare_exchange_weak(*__e, __d);
+}
+
+// atomic_compare_exchange_strong
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_strong(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->compare_exchange_strong(*__e, __d);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_strong(atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d) _NOEXCEPT
+{
+ return __o->compare_exchange_strong(*__e, __d);
+}
+
+// atomic_compare_exchange_weak_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_weak_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e,
+ typename atomic<_Tp>::value_type __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+{
+ return __o->compare_exchange_weak(*__e, __d, __s, __f);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_weak_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+{
+ return __o->compare_exchange_weak(*__e, __d, __s, __f);
+}
+
+// atomic_compare_exchange_strong_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_strong_explicit(volatile atomic<_Tp>* __o,
+ typename atomic<_Tp>::value_type* __e, typename atomic<_Tp>::value_type __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+{
+ return __o->compare_exchange_strong(*__e, __d, __s, __f);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+bool
+atomic_compare_exchange_strong_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type* __e,
+ typename atomic<_Tp>::value_type __d,
+ memory_order __s, memory_order __f) _NOEXCEPT
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f)
+{
+ return __o->compare_exchange_strong(*__e, __d, __s, __f);
+}
+
+// atomic_wait
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_wait(const volatile atomic<_Tp>* __o,
+ typename atomic<_Tp>::value_type __v) _NOEXCEPT
+{
+ return __o->wait(__v);
+}
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_wait(const atomic<_Tp>* __o,
+ typename atomic<_Tp>::value_type __v) _NOEXCEPT
+{
+ return __o->wait(__v);
+}
+
+// atomic_wait_explicit
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_wait_explicit(const volatile atomic<_Tp>* __o,
+ typename atomic<_Tp>::value_type __v,
+ memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+{
+ return __o->wait(__v, __m);
+}
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_wait_explicit(const atomic<_Tp>* __o,
+ typename atomic<_Tp>::value_type __v,
+ memory_order __m) _NOEXCEPT
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m)
+{
+ return __o->wait(__v, __m);
+}
+
+// atomic_notify_one
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT
+{
+ __o->notify_one();
+}
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT
+{
+ __o->notify_one();
+}
+
+// atomic_notify_one
+
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT
+{
+ __o->notify_all();
+}
+template <class _Tp>
+_LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+void atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT
+{
+ __o->notify_all();
+}
+
+// atomic_fetch_add
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_add(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_add(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_add(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_add(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_add(volatile atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_add(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_add(atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_add(__op);
+}
+
+// atomic_fetch_add_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_add_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_add(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_add_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_add(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_add_explicit(volatile atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_add(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_add_explicit(atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_add(__op, __m);
+}
+
+// atomic_fetch_sub
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_sub(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_sub(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_sub(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_sub(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_sub(volatile atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_sub(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_sub(atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op) _NOEXCEPT
+{
+ return __o->fetch_sub(__op);
+}
+
+// atomic_fetch_sub_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_sub_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_sub(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value && !is_const<_Tp>::value,
+ _Tp
+>::type
+atomic_fetch_sub_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_sub(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_sub_explicit(volatile atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_sub(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+_Tp*
+atomic_fetch_sub_explicit(atomic<_Tp*>* __o, typename atomic<_Tp*>::difference_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_sub(__op, __m);
+}
+
+// atomic_fetch_and
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_and(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_and(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_and(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_and(__op);
+}
+
+// atomic_fetch_and_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_and_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_and(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_and_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_and(__op, __m);
+}
+
+// atomic_fetch_or
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_or(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_or(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_or(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_or(__op);
+}
+
+// atomic_fetch_or_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_or_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_or(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_or_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_or(__op, __m);
+}
+
+// atomic_fetch_xor
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_xor(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_xor(__op);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_xor(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op) _NOEXCEPT
+{
+ return __o->fetch_xor(__op);
+}
+
+// atomic_fetch_xor_explicit
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_xor_explicit(volatile atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_xor(__op, __m);
+}
+
+template <class _Tp>
+_LIBCPP_INLINE_VISIBILITY
+typename enable_if
+<
+ is_integral<_Tp>::value && !is_same<_Tp, bool>::value,
+ _Tp
+>::type
+atomic_fetch_xor_explicit(atomic<_Tp>* __o, typename atomic<_Tp>::value_type __op, memory_order __m) _NOEXCEPT
+{
+ return __o->fetch_xor(__op, __m);
+}
+
+// flag type and operations
+
+typedef struct atomic_flag
+{
+ __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_;
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool test(memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+ {return _LIBCPP_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool test(memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ {return _LIBCPP_ATOMIC_FLAG_TYPE(true) == __cxx_atomic_load(&__a_, __m);}
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool test_and_set(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ bool test_and_set(memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {return __cxx_atomic_exchange(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(true), __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ void clear(memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT
+ {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);}
+ _LIBCPP_INLINE_VISIBILITY
+ void clear(memory_order __m = memory_order_seq_cst) _NOEXCEPT
+ {__cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m);}
+
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT
+ {__cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void wait(bool __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT
+ {__cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void notify_one() volatile _NOEXCEPT
+ {__cxx_atomic_notify_one(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void notify_one() _NOEXCEPT
+ {__cxx_atomic_notify_one(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void notify_all() volatile _NOEXCEPT
+ {__cxx_atomic_notify_all(&__a_);}
+ _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INLINE_VISIBILITY
+ void notify_all() _NOEXCEPT
+ {__cxx_atomic_notify_all(&__a_);}
+
+#if _LIBCPP_STD_VER > 17
+ _LIBCPP_INLINE_VISIBILITY constexpr
+ atomic_flag() _NOEXCEPT : __a_(false) {}
+#else
+ _LIBCPP_INLINE_VISIBILITY
+ atomic_flag() _NOEXCEPT _LIBCPP_DEFAULT
+#endif
+
+ _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR
+ atomic_flag(bool __b) _NOEXCEPT : __a_(__b) {} // EXTENSION
+
+#ifndef _LIBCPP_CXX03_LANG
+ atomic_flag(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) = delete;
+ atomic_flag& operator=(const atomic_flag&) volatile = delete;
+#else
+private:
+ _LIBCPP_INLINE_VISIBILITY
+ atomic_flag(const atomic_flag&);
+ _LIBCPP_INLINE_VISIBILITY
+ atomic_flag& operator=(const atomic_flag&);
+ _LIBCPP_INLINE_VISIBILITY
+ atomic_flag& operator=(const atomic_flag&) volatile;
+#endif
+} atomic_flag;
+
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT
+{
+ return __o->test();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test(const atomic_flag* __o) _NOEXCEPT
+{
+ return __o->test();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_explicit(const volatile atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ return __o->test(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_explicit(const atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ return __o->test(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_and_set(volatile atomic_flag* __o) _NOEXCEPT
+{
+ return __o->test_and_set();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_and_set(atomic_flag* __o) _NOEXCEPT
+{
+ return __o->test_and_set();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_and_set_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ return __o->test_and_set(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+bool
+atomic_flag_test_and_set_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ return __o->test_and_set(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_flag_clear(volatile atomic_flag* __o) _NOEXCEPT
+{
+ __o->clear();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_flag_clear(atomic_flag* __o) _NOEXCEPT
+{
+ __o->clear();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_flag_clear_explicit(volatile atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ __o->clear(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_flag_clear_explicit(atomic_flag* __o, memory_order __m) _NOEXCEPT
+{
+ __o->clear(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_wait(const volatile atomic_flag* __o, bool __v) _NOEXCEPT
+{
+ __o->wait(__v);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_wait(const atomic_flag* __o, bool __v) _NOEXCEPT
+{
+ __o->wait(__v);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_wait_explicit(const volatile atomic_flag* __o,
+ bool __v, memory_order __m) _NOEXCEPT
+{
+ __o->wait(__v, __m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_wait_explicit(const atomic_flag* __o,
+ bool __v, memory_order __m) _NOEXCEPT
+{
+ __o->wait(__v, __m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_notify_one(volatile atomic_flag* __o) _NOEXCEPT
+{
+ __o->notify_one();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT
+{
+ __o->notify_one();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_notify_all(volatile atomic_flag* __o) _NOEXCEPT
+{
+ __o->notify_all();
+}
+
+inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_SYNC
+void
+atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT
+{
+ __o->notify_all();
+}
+
+// fences
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_thread_fence(memory_order __m) _NOEXCEPT
+{
+ __cxx_atomic_thread_fence(__m);
+}
+
+inline _LIBCPP_INLINE_VISIBILITY
+void
+atomic_signal_fence(memory_order __m) _NOEXCEPT
+{
+ __cxx_atomic_signal_fence(__m);
+}
+
+// Atomics for standard typedef types
+
+typedef atomic<bool> atomic_bool;
+typedef atomic<char> atomic_char;
+typedef atomic<signed char> atomic_schar;
+typedef atomic<unsigned char> atomic_uchar;
+typedef atomic<short> atomic_short;
+typedef atomic<unsigned short> atomic_ushort;
+typedef atomic<int> atomic_int;
+typedef atomic<unsigned int> atomic_uint;
+typedef atomic<long> atomic_long;
+typedef atomic<unsigned long> atomic_ulong;
+typedef atomic<long long> atomic_llong;
+typedef atomic<unsigned long long> atomic_ullong;
+#ifndef _LIBCPP_HAS_NO_CHAR8_T
+typedef atomic<char8_t> atomic_char8_t;
+#endif
+typedef atomic<char16_t> atomic_char16_t;
+typedef atomic<char32_t> atomic_char32_t;
+typedef atomic<wchar_t> atomic_wchar_t;
+
+typedef atomic<int_least8_t> atomic_int_least8_t;
+typedef atomic<uint_least8_t> atomic_uint_least8_t;
+typedef atomic<int_least16_t> atomic_int_least16_t;
+typedef atomic<uint_least16_t> atomic_uint_least16_t;
+typedef atomic<int_least32_t> atomic_int_least32_t;
+typedef atomic<uint_least32_t> atomic_uint_least32_t;
+typedef atomic<int_least64_t> atomic_int_least64_t;
+typedef atomic<uint_least64_t> atomic_uint_least64_t;
+
+typedef atomic<int_fast8_t> atomic_int_fast8_t;
+typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
+typedef atomic<int_fast16_t> atomic_int_fast16_t;
+typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
+typedef atomic<int_fast32_t> atomic_int_fast32_t;
+typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
+typedef atomic<int_fast64_t> atomic_int_fast64_t;
+typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
+
+typedef atomic< int8_t> atomic_int8_t;
+typedef atomic<uint8_t> atomic_uint8_t;
+typedef atomic< int16_t> atomic_int16_t;
+typedef atomic<uint16_t> atomic_uint16_t;
+typedef atomic< int32_t> atomic_int32_t;
+typedef atomic<uint32_t> atomic_uint32_t;
+typedef atomic< int64_t> atomic_int64_t;
+typedef atomic<uint64_t> atomic_uint64_t;
+
+typedef atomic<intptr_t> atomic_intptr_t;
+typedef atomic<uintptr_t> atomic_uintptr_t;
+typedef atomic<size_t> atomic_size_t;
+typedef atomic<ptrdiff_t> atomic_ptrdiff_t;
+typedef atomic<intmax_t> atomic_intmax_t;
+typedef atomic<uintmax_t> atomic_uintmax_t;
+
+// atomic_*_lock_free : prefer the contention type most highly, then the largest lock-free type
+
+#ifdef __cpp_lib_atomic_is_always_lock_free
+# define _LIBCPP_CONTENTION_LOCK_FREE __atomic_always_lock_free(sizeof(__cxx_contention_t), 0)
+#else
+# define _LIBCPP_CONTENTION_LOCK_FREE false
+#endif
+
+#if ATOMIC_LLONG_LOCK_FREE == 2
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, long long>::type __libcpp_signed_lock_free;
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, unsigned long long>::type __libcpp_unsigned_lock_free;
+#elif ATOMIC_INT_LOCK_FREE == 2
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, int>::type __libcpp_signed_lock_free;
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, unsigned int>::type __libcpp_unsigned_lock_free;
+#elif ATOMIC_SHORT_LOCK_FREE == 2
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, short>::type __libcpp_signed_lock_free;
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, unsigned short>::type __libcpp_unsigned_lock_free;
+#elif ATOMIC_CHAR_LOCK_FREE == 2
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, char>::type __libcpp_signed_lock_free;
+typedef conditional<_LIBCPP_CONTENTION_LOCK_FREE, __cxx_contention_t, unsigned char>::type __libcpp_unsigned_lock_free;
+#else
+ // No signed/unsigned lock-free types
+#endif
+
+typedef atomic<__libcpp_signed_lock_free> atomic_signed_lock_free;
+typedef atomic<__libcpp_unsigned_lock_free> atomic_unsigned_lock_free;
+
+#define ATOMIC_FLAG_INIT {false}
+#define ATOMIC_VAR_INIT(__v) {__v}
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP_ATOMIC