Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / libcxx / include / __atomic / cxx_atomic_impl.h
blobd670fddc3934cdd1f854b788351e9f34d5c6067a
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
10 #define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
12 #include <__atomic/is_always_lock_free.h>
13 #include <__atomic/memory_order.h>
14 #include <__config>
15 #include <__memory/addressof.h>
16 #include <__type_traits/conditional.h>
17 #include <__type_traits/is_assignable.h>
18 #include <__type_traits/is_trivially_copyable.h>
19 #include <__type_traits/remove_const.h>
20 #include <cstddef>
21 #include <cstring>
23 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
24 # pragma GCC system_header
25 #endif
27 _LIBCPP_BEGIN_NAMESPACE_STD
29 #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || \
30 defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS)
32 // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
33 // the default operator= in an object is not volatile, a byte-by-byte copy
34 // is required.
35 template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI
36 void __cxx_atomic_assign_volatile(_Tp& __a_value, _Tv const& __val) {
37 __a_value = __val;
39 template <typename _Tp, typename _Tv, __enable_if_t<is_assignable<_Tp&, _Tv>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI
40 void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, _Tv volatile const& __val) {
41 volatile char* __to = reinterpret_cast<volatile char*>(std::addressof(__a_value));
42 volatile char* __end = __to + sizeof(_Tp);
43 volatile const char* __from = reinterpret_cast<volatile const char*>(std::addressof(__val));
44 while (__to != __end)
45 *__to++ = *__from++;
48 #endif
50 #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP)
52 template <typename _Tp>
53 struct __cxx_atomic_base_impl {
55 _LIBCPP_HIDE_FROM_ABI
56 #ifndef _LIBCPP_CXX03_LANG
57 __cxx_atomic_base_impl() _NOEXCEPT = default;
58 #else
59 __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {}
60 #endif // _LIBCPP_CXX03_LANG
61 _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT
62 : __a_value(value) {}
63 _Tp __a_value;
66 _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
67 // Avoid switch statement to make this a constexpr.
68 return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
69 (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
70 (__order == memory_order_release ? __ATOMIC_RELEASE:
71 (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
72 (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL:
73 __ATOMIC_CONSUME))));
76 _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
77 // Avoid switch statement to make this a constexpr.
78 return __order == memory_order_relaxed ? __ATOMIC_RELAXED:
79 (__order == memory_order_acquire ? __ATOMIC_ACQUIRE:
80 (__order == memory_order_release ? __ATOMIC_RELAXED:
81 (__order == memory_order_seq_cst ? __ATOMIC_SEQ_CST:
82 (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE:
83 __ATOMIC_CONSUME))));
86 template <typename _Tp>
87 _LIBCPP_HIDE_FROM_ABI
88 void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
89 __cxx_atomic_assign_volatile(__a->__a_value, __val);
92 template <typename _Tp>
93 _LIBCPP_HIDE_FROM_ABI
94 void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
95 __a->__a_value = __val;
98 _LIBCPP_HIDE_FROM_ABI inline
99 void __cxx_atomic_thread_fence(memory_order __order) {
100 __atomic_thread_fence(__to_gcc_order(__order));
103 _LIBCPP_HIDE_FROM_ABI inline
104 void __cxx_atomic_signal_fence(memory_order __order) {
105 __atomic_signal_fence(__to_gcc_order(__order));
108 template <typename _Tp>
109 _LIBCPP_HIDE_FROM_ABI
110 void __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val,
111 memory_order __order) {
112 __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
115 template <typename _Tp>
116 _LIBCPP_HIDE_FROM_ABI
117 void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val,
118 memory_order __order) {
119 __atomic_store(std::addressof(__a->__a_value), std::addressof(__val), __to_gcc_order(__order));
122 template <typename _Tp>
123 _LIBCPP_HIDE_FROM_ABI
124 _Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a,
125 memory_order __order) {
126 _Tp __ret;
127 __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
128 return __ret;
131 template <typename _Tp>
132 _LIBCPP_HIDE_FROM_ABI
133 _Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) {
134 _Tp __ret;
135 __atomic_load(std::addressof(__a->__a_value), std::addressof(__ret), __to_gcc_order(__order));
136 return __ret;
139 template <typename _Tp>
140 _LIBCPP_HIDE_FROM_ABI
141 _Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a,
142 _Tp __value, memory_order __order) {
143 _Tp __ret;
144 __atomic_exchange(
145 std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
146 return __ret;
149 template <typename _Tp>
150 _LIBCPP_HIDE_FROM_ABI
151 _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value,
152 memory_order __order) {
153 _Tp __ret;
154 __atomic_exchange(
155 std::addressof(__a->__a_value), std::addressof(__value), std::addressof(__ret), __to_gcc_order(__order));
156 return __ret;
159 template <typename _Tp>
160 _LIBCPP_HIDE_FROM_ABI
161 bool __cxx_atomic_compare_exchange_strong(
162 volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value,
163 memory_order __success, memory_order __failure) {
164 return __atomic_compare_exchange(
165 std::addressof(__a->__a_value),
166 __expected,
167 std::addressof(__value),
168 false,
169 __to_gcc_order(__success),
170 __to_gcc_failure_order(__failure));
173 template <typename _Tp>
174 _LIBCPP_HIDE_FROM_ABI
175 bool __cxx_atomic_compare_exchange_strong(
176 __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success,
177 memory_order __failure) {
178 return __atomic_compare_exchange(
179 std::addressof(__a->__a_value),
180 __expected,
181 std::addressof(__value),
182 false,
183 __to_gcc_order(__success),
184 __to_gcc_failure_order(__failure));
187 template <typename _Tp>
188 _LIBCPP_HIDE_FROM_ABI
189 bool __cxx_atomic_compare_exchange_weak(
190 volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value,
191 memory_order __success, memory_order __failure) {
192 return __atomic_compare_exchange(
193 std::addressof(__a->__a_value),
194 __expected,
195 std::addressof(__value),
196 true,
197 __to_gcc_order(__success),
198 __to_gcc_failure_order(__failure));
201 template <typename _Tp>
202 _LIBCPP_HIDE_FROM_ABI
203 bool __cxx_atomic_compare_exchange_weak(
204 __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success,
205 memory_order __failure) {
206 return __atomic_compare_exchange(
207 std::addressof(__a->__a_value),
208 __expected,
209 std::addressof(__value),
210 true,
211 __to_gcc_order(__success),
212 __to_gcc_failure_order(__failure));
215 template <typename _Tp>
216 struct __skip_amt { enum {value = 1}; };
218 template <typename _Tp>
219 struct __skip_amt<_Tp*> { enum {value = sizeof(_Tp)}; };
221 // FIXME: Haven't figured out what the spec says about using arrays with
222 // atomic_fetch_add. Force a failure rather than creating bad behavior.
223 template <typename _Tp>
224 struct __skip_amt<_Tp[]> { };
225 template <typename _Tp, int n>
226 struct __skip_amt<_Tp[n]> { };
228 template <typename _Tp, typename _Td>
229 _LIBCPP_HIDE_FROM_ABI
230 _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a,
231 _Td __delta, memory_order __order) {
232 return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
235 template <typename _Tp, typename _Td>
236 _LIBCPP_HIDE_FROM_ABI
237 _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta,
238 memory_order __order) {
239 return __atomic_fetch_add(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
242 template <typename _Tp, typename _Td>
243 _LIBCPP_HIDE_FROM_ABI
244 _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a,
245 _Td __delta, memory_order __order) {
246 return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
249 template <typename _Tp, typename _Td>
250 _LIBCPP_HIDE_FROM_ABI
251 _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta,
252 memory_order __order) {
253 return __atomic_fetch_sub(std::addressof(__a->__a_value), __delta * __skip_amt<_Tp>::value, __to_gcc_order(__order));
256 template <typename _Tp>
257 _LIBCPP_HIDE_FROM_ABI
258 _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a,
259 _Tp __pattern, memory_order __order) {
260 return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
263 template <typename _Tp>
264 _LIBCPP_HIDE_FROM_ABI
265 _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a,
266 _Tp __pattern, memory_order __order) {
267 return __atomic_fetch_and(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
270 template <typename _Tp>
271 _LIBCPP_HIDE_FROM_ABI
272 _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a,
273 _Tp __pattern, memory_order __order) {
274 return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
277 template <typename _Tp>
278 _LIBCPP_HIDE_FROM_ABI
279 _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
280 memory_order __order) {
281 return __atomic_fetch_or(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
284 template <typename _Tp>
285 _LIBCPP_HIDE_FROM_ABI
286 _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a,
287 _Tp __pattern, memory_order __order) {
288 return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
291 template <typename _Tp>
292 _LIBCPP_HIDE_FROM_ABI
293 _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern,
294 memory_order __order) {
295 return __atomic_fetch_xor(std::addressof(__a->__a_value), __pattern, __to_gcc_order(__order));
298 #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
300 #elif defined(_LIBCPP_HAS_C_ATOMIC_IMP)
302 template <typename _Tp>
303 struct __cxx_atomic_base_impl {
305 _LIBCPP_HIDE_FROM_ABI
306 #ifndef _LIBCPP_CXX03_LANG
307 __cxx_atomic_base_impl() _NOEXCEPT = default;
308 #else
309 __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {}
310 #endif // _LIBCPP_CXX03_LANG
311 _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp __value) _NOEXCEPT
312 : __a_value(__value) {}
313 _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value;
316 #define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
318 _LIBCPP_HIDE_FROM_ABI inline
319 void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT {
320 __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order));
323 _LIBCPP_HIDE_FROM_ABI inline
324 void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT {
325 __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order));
328 template<class _Tp>
329 _LIBCPP_HIDE_FROM_ABI
330 void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT {
331 __c11_atomic_init(std::addressof(__a->__a_value), __val);
333 template<class _Tp>
334 _LIBCPP_HIDE_FROM_ABI
335 void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT {
336 __c11_atomic_init(std::addressof(__a->__a_value), __val);
339 template<class _Tp>
340 _LIBCPP_HIDE_FROM_ABI
341 void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT {
342 __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
344 template<class _Tp>
345 _LIBCPP_HIDE_FROM_ABI
346 void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT {
347 __c11_atomic_store(std::addressof(__a->__a_value), __val, static_cast<__memory_order_underlying_t>(__order));
350 template<class _Tp>
351 _LIBCPP_HIDE_FROM_ABI
352 _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT {
353 using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
354 return __c11_atomic_load(
355 const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
357 template<class _Tp>
358 _LIBCPP_HIDE_FROM_ABI
359 _Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT {
360 using __ptr_type = __remove_const_t<decltype(__a->__a_value)>*;
361 return __c11_atomic_load(
362 const_cast<__ptr_type>(std::addressof(__a->__a_value)), static_cast<__memory_order_underlying_t>(__order));
365 template<class _Tp>
366 _LIBCPP_HIDE_FROM_ABI
367 _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT {
368 return __c11_atomic_exchange(
369 std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
371 template<class _Tp>
372 _LIBCPP_HIDE_FROM_ABI
373 _Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT {
374 return __c11_atomic_exchange(
375 std::addressof(__a->__a_value), __value, static_cast<__memory_order_underlying_t>(__order));
378 _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR memory_order __to_failure_order(memory_order __order) {
379 // Avoid switch statement to make this a constexpr.
380 return __order == memory_order_release ? memory_order_relaxed:
381 (__order == memory_order_acq_rel ? memory_order_acquire:
382 __order);
385 template<class _Tp>
386 _LIBCPP_HIDE_FROM_ABI
387 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
388 return __c11_atomic_compare_exchange_strong(
389 std::addressof(__a->__a_value),
390 __expected,
391 __value,
392 static_cast<__memory_order_underlying_t>(__success),
393 static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
395 template<class _Tp>
396 _LIBCPP_HIDE_FROM_ABI
397 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
398 return __c11_atomic_compare_exchange_strong(
399 std::addressof(__a->__a_value),
400 __expected,
401 __value,
402 static_cast<__memory_order_underlying_t>(__success),
403 static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
406 template<class _Tp>
407 _LIBCPP_HIDE_FROM_ABI
408 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
409 return __c11_atomic_compare_exchange_weak(
410 std::addressof(__a->__a_value),
411 __expected,
412 __value,
413 static_cast<__memory_order_underlying_t>(__success),
414 static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
416 template<class _Tp>
417 _LIBCPP_HIDE_FROM_ABI
418 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT {
419 return __c11_atomic_compare_exchange_weak(
420 std::addressof(__a->__a_value),
421 __expected,
422 __value,
423 static_cast<__memory_order_underlying_t>(__success),
424 static_cast<__memory_order_underlying_t>(__to_failure_order(__failure)));
427 template<class _Tp>
428 _LIBCPP_HIDE_FROM_ABI
429 _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
430 return __c11_atomic_fetch_add(
431 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
433 template<class _Tp>
434 _LIBCPP_HIDE_FROM_ABI
435 _Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
436 return __c11_atomic_fetch_add(
437 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
440 template<class _Tp>
441 _LIBCPP_HIDE_FROM_ABI
442 _Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
443 return __c11_atomic_fetch_add(
444 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
446 template<class _Tp>
447 _LIBCPP_HIDE_FROM_ABI
448 _Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
449 return __c11_atomic_fetch_add(
450 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
453 template<class _Tp>
454 _LIBCPP_HIDE_FROM_ABI
455 _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT {
456 return __c11_atomic_fetch_sub(
457 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
459 template<class _Tp>
460 _LIBCPP_HIDE_FROM_ABI
461 _Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT {
462 return __c11_atomic_fetch_sub(
463 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
465 template<class _Tp>
466 _LIBCPP_HIDE_FROM_ABI
467 _Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
468 return __c11_atomic_fetch_sub(
469 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
471 template<class _Tp>
472 _LIBCPP_HIDE_FROM_ABI
473 _Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT {
474 return __c11_atomic_fetch_sub(
475 std::addressof(__a->__a_value), __delta, static_cast<__memory_order_underlying_t>(__order));
478 template<class _Tp>
479 _LIBCPP_HIDE_FROM_ABI
480 _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
481 return __c11_atomic_fetch_and(
482 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
484 template<class _Tp>
485 _LIBCPP_HIDE_FROM_ABI
486 _Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
487 return __c11_atomic_fetch_and(
488 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
491 template<class _Tp>
492 _LIBCPP_HIDE_FROM_ABI
493 _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
494 return __c11_atomic_fetch_or(
495 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
497 template<class _Tp>
498 _LIBCPP_HIDE_FROM_ABI
499 _Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
500 return __c11_atomic_fetch_or(
501 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
504 template<class _Tp>
505 _LIBCPP_HIDE_FROM_ABI
506 _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
507 return __c11_atomic_fetch_xor(
508 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
510 template<class _Tp>
511 _LIBCPP_HIDE_FROM_ABI
512 _Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT {
513 return __c11_atomic_fetch_xor(
514 std::addressof(__a->__a_value), __pattern, static_cast<__memory_order_underlying_t>(__order));
517 #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
519 #ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
521 template<typename _Tp>
522 struct __cxx_atomic_lock_impl {
524 _LIBCPP_HIDE_FROM_ABI
525 __cxx_atomic_lock_impl() _NOEXCEPT
526 : __a_value(), __a_lock(0) {}
527 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit
528 __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT
529 : __a_value(value), __a_lock(0) {}
531 _Tp __a_value;
532 mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock;
534 _LIBCPP_HIDE_FROM_ABI void __lock() const volatile {
535 while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
536 /*spin*/;
538 _LIBCPP_HIDE_FROM_ABI void __lock() const {
539 while(1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire))
540 /*spin*/;
542 _LIBCPP_HIDE_FROM_ABI void __unlock() const volatile {
543 __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
545 _LIBCPP_HIDE_FROM_ABI void __unlock() const {
546 __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release);
548 _LIBCPP_HIDE_FROM_ABI _Tp __read() const volatile {
549 __lock();
550 _Tp __old;
551 __cxx_atomic_assign_volatile(__old, __a_value);
552 __unlock();
553 return __old;
555 _LIBCPP_HIDE_FROM_ABI _Tp __read() const {
556 __lock();
557 _Tp __old = __a_value;
558 __unlock();
559 return __old;
563 template <typename _Tp>
564 _LIBCPP_HIDE_FROM_ABI
565 void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
566 __cxx_atomic_assign_volatile(__a->__a_value, __val);
568 template <typename _Tp>
569 _LIBCPP_HIDE_FROM_ABI
570 void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) {
571 __a->__a_value = __val;
574 template <typename _Tp>
575 _LIBCPP_HIDE_FROM_ABI
576 void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
577 __a->__lock();
578 __cxx_atomic_assign_volatile(__a->__a_value, __val);
579 __a->__unlock();
581 template <typename _Tp>
582 _LIBCPP_HIDE_FROM_ABI
583 void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) {
584 __a->__lock();
585 __a->__a_value = __val;
586 __a->__unlock();
589 template <typename _Tp>
590 _LIBCPP_HIDE_FROM_ABI
591 _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
592 return __a->__read();
594 template <typename _Tp>
595 _LIBCPP_HIDE_FROM_ABI
596 _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) {
597 return __a->__read();
600 template <typename _Tp>
601 _LIBCPP_HIDE_FROM_ABI
602 _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
603 __a->__lock();
604 _Tp __old;
605 __cxx_atomic_assign_volatile(__old, __a->__a_value);
606 __cxx_atomic_assign_volatile(__a->__a_value, __value);
607 __a->__unlock();
608 return __old;
610 template <typename _Tp>
611 _LIBCPP_HIDE_FROM_ABI
612 _Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) {
613 __a->__lock();
614 _Tp __old = __a->__a_value;
615 __a->__a_value = __value;
616 __a->__unlock();
617 return __old;
620 template <typename _Tp>
621 _LIBCPP_HIDE_FROM_ABI
622 bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl<_Tp>* __a,
623 _Tp* __expected, _Tp __value, memory_order, memory_order) {
624 _Tp __temp;
625 __a->__lock();
626 __cxx_atomic_assign_volatile(__temp, __a->__a_value);
627 bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
628 if(__ret)
629 __cxx_atomic_assign_volatile(__a->__a_value, __value);
630 else
631 __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
632 __a->__unlock();
633 return __ret;
635 template <typename _Tp>
636 _LIBCPP_HIDE_FROM_ABI
637 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl<_Tp>* __a,
638 _Tp* __expected, _Tp __value, memory_order, memory_order) {
639 __a->__lock();
640 bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
641 if(__ret)
642 std::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
643 else
644 std::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
645 __a->__unlock();
646 return __ret;
649 template <typename _Tp>
650 _LIBCPP_HIDE_FROM_ABI
651 bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl<_Tp>* __a,
652 _Tp* __expected, _Tp __value, memory_order, memory_order) {
653 _Tp __temp;
654 __a->__lock();
655 __cxx_atomic_assign_volatile(__temp, __a->__a_value);
656 bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0);
657 if(__ret)
658 __cxx_atomic_assign_volatile(__a->__a_value, __value);
659 else
660 __cxx_atomic_assign_volatile(*__expected, __a->__a_value);
661 __a->__unlock();
662 return __ret;
664 template <typename _Tp>
665 _LIBCPP_HIDE_FROM_ABI
666 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl<_Tp>* __a,
667 _Tp* __expected, _Tp __value, memory_order, memory_order) {
668 __a->__lock();
669 bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0);
670 if(__ret)
671 std::memcpy(&__a->__a_value, &__value, sizeof(_Tp));
672 else
673 std::memcpy(__expected, &__a->__a_value, sizeof(_Tp));
674 __a->__unlock();
675 return __ret;
678 template <typename _Tp, typename _Td>
679 _LIBCPP_HIDE_FROM_ABI
680 _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a,
681 _Td __delta, memory_order) {
682 __a->__lock();
683 _Tp __old;
684 __cxx_atomic_assign_volatile(__old, __a->__a_value);
685 __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta));
686 __a->__unlock();
687 return __old;
689 template <typename _Tp, typename _Td>
690 _LIBCPP_HIDE_FROM_ABI
691 _Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a,
692 _Td __delta, memory_order) {
693 __a->__lock();
694 _Tp __old = __a->__a_value;
695 __a->__a_value += __delta;
696 __a->__unlock();
697 return __old;
700 template <typename _Tp, typename _Td>
701 _LIBCPP_HIDE_FROM_ABI
702 _Tp* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a,
703 ptrdiff_t __delta, memory_order) {
704 __a->__lock();
705 _Tp* __old;
706 __cxx_atomic_assign_volatile(__old, __a->__a_value);
707 __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta);
708 __a->__unlock();
709 return __old;
711 template <typename _Tp, typename _Td>
712 _LIBCPP_HIDE_FROM_ABI
713 _Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a,
714 ptrdiff_t __delta, memory_order) {
715 __a->__lock();
716 _Tp* __old = __a->__a_value;
717 __a->__a_value += __delta;
718 __a->__unlock();
719 return __old;
722 template <typename _Tp, typename _Td>
723 _LIBCPP_HIDE_FROM_ABI
724 _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a,
725 _Td __delta, memory_order) {
726 __a->__lock();
727 _Tp __old;
728 __cxx_atomic_assign_volatile(__old, __a->__a_value);
729 __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta));
730 __a->__unlock();
731 return __old;
733 template <typename _Tp, typename _Td>
734 _LIBCPP_HIDE_FROM_ABI
735 _Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a,
736 _Td __delta, memory_order) {
737 __a->__lock();
738 _Tp __old = __a->__a_value;
739 __a->__a_value -= __delta;
740 __a->__unlock();
741 return __old;
744 template <typename _Tp>
745 _LIBCPP_HIDE_FROM_ABI
746 _Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a,
747 _Tp __pattern, memory_order) {
748 __a->__lock();
749 _Tp __old;
750 __cxx_atomic_assign_volatile(__old, __a->__a_value);
751 __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern));
752 __a->__unlock();
753 return __old;
755 template <typename _Tp>
756 _LIBCPP_HIDE_FROM_ABI
757 _Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a,
758 _Tp __pattern, memory_order) {
759 __a->__lock();
760 _Tp __old = __a->__a_value;
761 __a->__a_value &= __pattern;
762 __a->__unlock();
763 return __old;
766 template <typename _Tp>
767 _LIBCPP_HIDE_FROM_ABI
768 _Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a,
769 _Tp __pattern, memory_order) {
770 __a->__lock();
771 _Tp __old;
772 __cxx_atomic_assign_volatile(__old, __a->__a_value);
773 __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern));
774 __a->__unlock();
775 return __old;
777 template <typename _Tp>
778 _LIBCPP_HIDE_FROM_ABI
779 _Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a,
780 _Tp __pattern, memory_order) {
781 __a->__lock();
782 _Tp __old = __a->__a_value;
783 __a->__a_value |= __pattern;
784 __a->__unlock();
785 return __old;
788 template <typename _Tp>
789 _LIBCPP_HIDE_FROM_ABI
790 _Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a,
791 _Tp __pattern, memory_order) {
792 __a->__lock();
793 _Tp __old;
794 __cxx_atomic_assign_volatile(__old, __a->__a_value);
795 __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern));
796 __a->__unlock();
797 return __old;
799 template <typename _Tp>
800 _LIBCPP_HIDE_FROM_ABI
801 _Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a,
802 _Tp __pattern, memory_order) {
803 __a->__lock();
804 _Tp __old = __a->__a_value;
805 __a->__a_value ^= __pattern;
806 __a->__unlock();
807 return __old;
810 template <typename _Tp,
811 typename _Base = typename conditional<__libcpp_is_always_lock_free<_Tp>::__value,
812 __cxx_atomic_base_impl<_Tp>,
813 __cxx_atomic_lock_impl<_Tp> >::type>
814 #else
815 template <typename _Tp,
816 typename _Base = __cxx_atomic_base_impl<_Tp> >
817 #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
818 struct __cxx_atomic_impl : public _Base {
819 static_assert(is_trivially_copyable<_Tp>::value,
820 "std::atomic<T> requires that 'T' be a trivially copyable type");
822 _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default;
823 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT
824 : _Base(__value) {}
827 _LIBCPP_END_NAMESPACE_STD
829 #endif // _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H