1 //===----------------------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
10 #define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
12 #include <__atomic/is_always_lock_free.h>
13 #include <__atomic/memory_order.h>
15 #include <__memory/addressof.h>
16 #include <__type_traits/conditional.h>
17 #include <__type_traits/is_assignable.h>
18 #include <__type_traits/is_trivially_copyable.h>
19 #include <__type_traits/remove_const.h>
23 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
24 # pragma GCC system_header
27 _LIBCPP_BEGIN_NAMESPACE_STD
29 #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || \
30 defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS)
32 // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because
33 // the default operator= in an object is not volatile, a byte-by-byte copy
35 template <typename _Tp
, typename _Tv
, __enable_if_t
<is_assignable
<_Tp
&, _Tv
>::value
, int> = 0> _LIBCPP_HIDE_FROM_ABI
36 void __cxx_atomic_assign_volatile(_Tp
& __a_value
, _Tv
const& __val
) {
39 template <typename _Tp
, typename _Tv
, __enable_if_t
<is_assignable
<_Tp
&, _Tv
>::value
, int> = 0> _LIBCPP_HIDE_FROM_ABI
40 void __cxx_atomic_assign_volatile(_Tp
volatile& __a_value
, _Tv
volatile const& __val
) {
41 volatile char* __to
= reinterpret_cast<volatile char*>(std::addressof(__a_value
));
42 volatile char* __end
= __to
+ sizeof(_Tp
);
43 volatile const char* __from
= reinterpret_cast<volatile const char*>(std::addressof(__val
));
50 #if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP)
52 template <typename _Tp
>
53 struct __cxx_atomic_base_impl
{
56 #ifndef _LIBCPP_CXX03_LANG
57 __cxx_atomic_base_impl() _NOEXCEPT
= default;
59 __cxx_atomic_base_impl() _NOEXCEPT
: __a_value() {}
60 #endif // _LIBCPP_CXX03_LANG
61 _LIBCPP_CONSTEXPR
explicit __cxx_atomic_base_impl(_Tp value
) _NOEXCEPT
66 _LIBCPP_HIDE_FROM_ABI
inline _LIBCPP_CONSTEXPR
int __to_gcc_order(memory_order __order
) {
67 // Avoid switch statement to make this a constexpr.
68 return __order
== memory_order_relaxed
? __ATOMIC_RELAXED
:
69 (__order
== memory_order_acquire
? __ATOMIC_ACQUIRE
:
70 (__order
== memory_order_release
? __ATOMIC_RELEASE
:
71 (__order
== memory_order_seq_cst
? __ATOMIC_SEQ_CST
:
72 (__order
== memory_order_acq_rel
? __ATOMIC_ACQ_REL
:
76 _LIBCPP_HIDE_FROM_ABI
inline _LIBCPP_CONSTEXPR
int __to_gcc_failure_order(memory_order __order
) {
77 // Avoid switch statement to make this a constexpr.
78 return __order
== memory_order_relaxed
? __ATOMIC_RELAXED
:
79 (__order
== memory_order_acquire
? __ATOMIC_ACQUIRE
:
80 (__order
== memory_order_release
? __ATOMIC_RELAXED
:
81 (__order
== memory_order_seq_cst
? __ATOMIC_SEQ_CST
:
82 (__order
== memory_order_acq_rel
? __ATOMIC_ACQUIRE
:
86 template <typename _Tp
>
88 void __cxx_atomic_init(volatile __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __val
) {
89 __cxx_atomic_assign_volatile(__a
->__a_value
, __val
);
92 template <typename _Tp
>
94 void __cxx_atomic_init(__cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __val
) {
95 __a
->__a_value
= __val
;
98 _LIBCPP_HIDE_FROM_ABI
inline
99 void __cxx_atomic_thread_fence(memory_order __order
) {
100 __atomic_thread_fence(__to_gcc_order(__order
));
103 _LIBCPP_HIDE_FROM_ABI
inline
104 void __cxx_atomic_signal_fence(memory_order __order
) {
105 __atomic_signal_fence(__to_gcc_order(__order
));
108 template <typename _Tp
>
109 _LIBCPP_HIDE_FROM_ABI
110 void __cxx_atomic_store(volatile __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __val
,
111 memory_order __order
) {
112 __atomic_store(std::addressof(__a
->__a_value
), std::addressof(__val
), __to_gcc_order(__order
));
115 template <typename _Tp
>
116 _LIBCPP_HIDE_FROM_ABI
117 void __cxx_atomic_store(__cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __val
,
118 memory_order __order
) {
119 __atomic_store(std::addressof(__a
->__a_value
), std::addressof(__val
), __to_gcc_order(__order
));
122 template <typename _Tp
>
123 _LIBCPP_HIDE_FROM_ABI
124 _Tp
__cxx_atomic_load(const volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
125 memory_order __order
) {
127 __atomic_load(std::addressof(__a
->__a_value
), std::addressof(__ret
), __to_gcc_order(__order
));
131 template <typename _Tp
>
132 _LIBCPP_HIDE_FROM_ABI
133 _Tp
__cxx_atomic_load(const __cxx_atomic_base_impl
<_Tp
>* __a
, memory_order __order
) {
135 __atomic_load(std::addressof(__a
->__a_value
), std::addressof(__ret
), __to_gcc_order(__order
));
139 template <typename _Tp
>
140 _LIBCPP_HIDE_FROM_ABI
141 _Tp
__cxx_atomic_exchange(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
142 _Tp __value
, memory_order __order
) {
145 std::addressof(__a
->__a_value
), std::addressof(__value
), std::addressof(__ret
), __to_gcc_order(__order
));
149 template <typename _Tp
>
150 _LIBCPP_HIDE_FROM_ABI
151 _Tp
__cxx_atomic_exchange(__cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __value
,
152 memory_order __order
) {
155 std::addressof(__a
->__a_value
), std::addressof(__value
), std::addressof(__ret
), __to_gcc_order(__order
));
159 template <typename _Tp
>
160 _LIBCPP_HIDE_FROM_ABI
161 bool __cxx_atomic_compare_exchange_strong(
162 volatile __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp
* __expected
, _Tp __value
,
163 memory_order __success
, memory_order __failure
) {
164 return __atomic_compare_exchange(
165 std::addressof(__a
->__a_value
),
167 std::addressof(__value
),
169 __to_gcc_order(__success
),
170 __to_gcc_failure_order(__failure
));
173 template <typename _Tp
>
174 _LIBCPP_HIDE_FROM_ABI
175 bool __cxx_atomic_compare_exchange_strong(
176 __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
,
177 memory_order __failure
) {
178 return __atomic_compare_exchange(
179 std::addressof(__a
->__a_value
),
181 std::addressof(__value
),
183 __to_gcc_order(__success
),
184 __to_gcc_failure_order(__failure
));
187 template <typename _Tp
>
188 _LIBCPP_HIDE_FROM_ABI
189 bool __cxx_atomic_compare_exchange_weak(
190 volatile __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp
* __expected
, _Tp __value
,
191 memory_order __success
, memory_order __failure
) {
192 return __atomic_compare_exchange(
193 std::addressof(__a
->__a_value
),
195 std::addressof(__value
),
197 __to_gcc_order(__success
),
198 __to_gcc_failure_order(__failure
));
201 template <typename _Tp
>
202 _LIBCPP_HIDE_FROM_ABI
203 bool __cxx_atomic_compare_exchange_weak(
204 __cxx_atomic_base_impl
<_Tp
>* __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
,
205 memory_order __failure
) {
206 return __atomic_compare_exchange(
207 std::addressof(__a
->__a_value
),
209 std::addressof(__value
),
211 __to_gcc_order(__success
),
212 __to_gcc_failure_order(__failure
));
215 template <typename _Tp
>
216 struct __skip_amt
{ enum {value
= 1}; };
218 template <typename _Tp
>
219 struct __skip_amt
<_Tp
*> { enum {value
= sizeof(_Tp
)}; };
221 // FIXME: Haven't figured out what the spec says about using arrays with
222 // atomic_fetch_add. Force a failure rather than creating bad behavior.
223 template <typename _Tp
>
224 struct __skip_amt
<_Tp
[]> { };
225 template <typename _Tp
, int n
>
226 struct __skip_amt
<_Tp
[n
]> { };
228 template <typename _Tp
, typename _Td
>
229 _LIBCPP_HIDE_FROM_ABI
230 _Tp
__cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
231 _Td __delta
, memory_order __order
) {
232 return __atomic_fetch_add(std::addressof(__a
->__a_value
), __delta
* __skip_amt
<_Tp
>::value
, __to_gcc_order(__order
));
235 template <typename _Tp
, typename _Td
>
236 _LIBCPP_HIDE_FROM_ABI
237 _Tp
__cxx_atomic_fetch_add(__cxx_atomic_base_impl
<_Tp
>* __a
, _Td __delta
,
238 memory_order __order
) {
239 return __atomic_fetch_add(std::addressof(__a
->__a_value
), __delta
* __skip_amt
<_Tp
>::value
, __to_gcc_order(__order
));
242 template <typename _Tp
, typename _Td
>
243 _LIBCPP_HIDE_FROM_ABI
244 _Tp
__cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
245 _Td __delta
, memory_order __order
) {
246 return __atomic_fetch_sub(std::addressof(__a
->__a_value
), __delta
* __skip_amt
<_Tp
>::value
, __to_gcc_order(__order
));
249 template <typename _Tp
, typename _Td
>
250 _LIBCPP_HIDE_FROM_ABI
251 _Tp
__cxx_atomic_fetch_sub(__cxx_atomic_base_impl
<_Tp
>* __a
, _Td __delta
,
252 memory_order __order
) {
253 return __atomic_fetch_sub(std::addressof(__a
->__a_value
), __delta
* __skip_amt
<_Tp
>::value
, __to_gcc_order(__order
));
256 template <typename _Tp
>
257 _LIBCPP_HIDE_FROM_ABI
258 _Tp
__cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
259 _Tp __pattern
, memory_order __order
) {
260 return __atomic_fetch_and(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
263 template <typename _Tp
>
264 _LIBCPP_HIDE_FROM_ABI
265 _Tp
__cxx_atomic_fetch_and(__cxx_atomic_base_impl
<_Tp
>* __a
,
266 _Tp __pattern
, memory_order __order
) {
267 return __atomic_fetch_and(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
270 template <typename _Tp
>
271 _LIBCPP_HIDE_FROM_ABI
272 _Tp
__cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
273 _Tp __pattern
, memory_order __order
) {
274 return __atomic_fetch_or(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
277 template <typename _Tp
>
278 _LIBCPP_HIDE_FROM_ABI
279 _Tp
__cxx_atomic_fetch_or(__cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __pattern
,
280 memory_order __order
) {
281 return __atomic_fetch_or(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
284 template <typename _Tp
>
285 _LIBCPP_HIDE_FROM_ABI
286 _Tp
__cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl
<_Tp
>* __a
,
287 _Tp __pattern
, memory_order __order
) {
288 return __atomic_fetch_xor(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
291 template <typename _Tp
>
292 _LIBCPP_HIDE_FROM_ABI
293 _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_base_impl
<_Tp
>* __a
, _Tp __pattern
,
294 memory_order __order
) {
295 return __atomic_fetch_xor(std::addressof(__a
->__a_value
), __pattern
, __to_gcc_order(__order
));
298 #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0)
300 #elif defined(_LIBCPP_HAS_C_ATOMIC_IMP)
302 template <typename _Tp
>
303 struct __cxx_atomic_base_impl
{
305 _LIBCPP_HIDE_FROM_ABI
306 #ifndef _LIBCPP_CXX03_LANG
307 __cxx_atomic_base_impl() _NOEXCEPT
= default;
309 __cxx_atomic_base_impl() _NOEXCEPT
: __a_value() {}
310 #endif // _LIBCPP_CXX03_LANG
311 _LIBCPP_CONSTEXPR
explicit __cxx_atomic_base_impl(_Tp __value
) _NOEXCEPT
312 : __a_value(__value
) {}
313 _LIBCPP_DISABLE_EXTENSION_WARNING
_Atomic(_Tp
) __a_value
;
316 #define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s)
318 _LIBCPP_HIDE_FROM_ABI
inline
319 void __cxx_atomic_thread_fence(memory_order __order
) _NOEXCEPT
{
320 __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t
>(__order
));
323 _LIBCPP_HIDE_FROM_ABI
inline
324 void __cxx_atomic_signal_fence(memory_order __order
) _NOEXCEPT
{
325 __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t
>(__order
));
329 _LIBCPP_HIDE_FROM_ABI
330 void __cxx_atomic_init(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __val
) _NOEXCEPT
{
331 __c11_atomic_init(std::addressof(__a
->__a_value
), __val
);
334 _LIBCPP_HIDE_FROM_ABI
335 void __cxx_atomic_init(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __val
) _NOEXCEPT
{
336 __c11_atomic_init(std::addressof(__a
->__a_value
), __val
);
340 _LIBCPP_HIDE_FROM_ABI
341 void __cxx_atomic_store(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __val
, memory_order __order
) _NOEXCEPT
{
342 __c11_atomic_store(std::addressof(__a
->__a_value
), __val
, static_cast<__memory_order_underlying_t
>(__order
));
345 _LIBCPP_HIDE_FROM_ABI
346 void __cxx_atomic_store(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __val
, memory_order __order
) _NOEXCEPT
{
347 __c11_atomic_store(std::addressof(__a
->__a_value
), __val
, static_cast<__memory_order_underlying_t
>(__order
));
351 _LIBCPP_HIDE_FROM_ABI
352 _Tp
__cxx_atomic_load(__cxx_atomic_base_impl
<_Tp
> const volatile* __a
, memory_order __order
) _NOEXCEPT
{
353 using __ptr_type
= __remove_const_t
<decltype(__a
->__a_value
)>*;
354 return __c11_atomic_load(
355 const_cast<__ptr_type
>(std::addressof(__a
->__a_value
)), static_cast<__memory_order_underlying_t
>(__order
));
358 _LIBCPP_HIDE_FROM_ABI
359 _Tp
__cxx_atomic_load(__cxx_atomic_base_impl
<_Tp
> const* __a
, memory_order __order
) _NOEXCEPT
{
360 using __ptr_type
= __remove_const_t
<decltype(__a
->__a_value
)>*;
361 return __c11_atomic_load(
362 const_cast<__ptr_type
>(std::addressof(__a
->__a_value
)), static_cast<__memory_order_underlying_t
>(__order
));
366 _LIBCPP_HIDE_FROM_ABI
367 _Tp
__cxx_atomic_exchange(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __value
, memory_order __order
) _NOEXCEPT
{
368 return __c11_atomic_exchange(
369 std::addressof(__a
->__a_value
), __value
, static_cast<__memory_order_underlying_t
>(__order
));
372 _LIBCPP_HIDE_FROM_ABI
373 _Tp
__cxx_atomic_exchange(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __value
, memory_order __order
) _NOEXCEPT
{
374 return __c11_atomic_exchange(
375 std::addressof(__a
->__a_value
), __value
, static_cast<__memory_order_underlying_t
>(__order
));
378 _LIBCPP_HIDE_FROM_ABI
inline _LIBCPP_CONSTEXPR memory_order
__to_failure_order(memory_order __order
) {
379 // Avoid switch statement to make this a constexpr.
380 return __order
== memory_order_release
? memory_order_relaxed
:
381 (__order
== memory_order_acq_rel
? memory_order_acquire
:
386 _LIBCPP_HIDE_FROM_ABI
387 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
, memory_order __failure
) _NOEXCEPT
{
388 return __c11_atomic_compare_exchange_strong(
389 std::addressof(__a
->__a_value
),
392 static_cast<__memory_order_underlying_t
>(__success
),
393 static_cast<__memory_order_underlying_t
>(__to_failure_order(__failure
)));
396 _LIBCPP_HIDE_FROM_ABI
397 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
, memory_order __failure
) _NOEXCEPT
{
398 return __c11_atomic_compare_exchange_strong(
399 std::addressof(__a
->__a_value
),
402 static_cast<__memory_order_underlying_t
>(__success
),
403 static_cast<__memory_order_underlying_t
>(__to_failure_order(__failure
)));
407 _LIBCPP_HIDE_FROM_ABI
408 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
, memory_order __failure
) _NOEXCEPT
{
409 return __c11_atomic_compare_exchange_weak(
410 std::addressof(__a
->__a_value
),
413 static_cast<__memory_order_underlying_t
>(__success
),
414 static_cast<__memory_order_underlying_t
>(__to_failure_order(__failure
)));
417 _LIBCPP_HIDE_FROM_ABI
418 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp
* __expected
, _Tp __value
, memory_order __success
, memory_order __failure
) _NOEXCEPT
{
419 return __c11_atomic_compare_exchange_weak(
420 std::addressof(__a
->__a_value
),
423 static_cast<__memory_order_underlying_t
>(__success
),
424 static_cast<__memory_order_underlying_t
>(__to_failure_order(__failure
)));
428 _LIBCPP_HIDE_FROM_ABI
429 _Tp
__cxx_atomic_fetch_add(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __delta
, memory_order __order
) _NOEXCEPT
{
430 return __c11_atomic_fetch_add(
431 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
434 _LIBCPP_HIDE_FROM_ABI
435 _Tp
__cxx_atomic_fetch_add(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __delta
, memory_order __order
) _NOEXCEPT
{
436 return __c11_atomic_fetch_add(
437 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
441 _LIBCPP_HIDE_FROM_ABI
442 _Tp
* __cxx_atomic_fetch_add(__cxx_atomic_base_impl
<_Tp
*> volatile* __a
, ptrdiff_t __delta
, memory_order __order
) _NOEXCEPT
{
443 return __c11_atomic_fetch_add(
444 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
447 _LIBCPP_HIDE_FROM_ABI
448 _Tp
* __cxx_atomic_fetch_add(__cxx_atomic_base_impl
<_Tp
*> * __a
, ptrdiff_t __delta
, memory_order __order
) _NOEXCEPT
{
449 return __c11_atomic_fetch_add(
450 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
454 _LIBCPP_HIDE_FROM_ABI
455 _Tp
__cxx_atomic_fetch_sub(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __delta
, memory_order __order
) _NOEXCEPT
{
456 return __c11_atomic_fetch_sub(
457 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
460 _LIBCPP_HIDE_FROM_ABI
461 _Tp
__cxx_atomic_fetch_sub(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __delta
, memory_order __order
) _NOEXCEPT
{
462 return __c11_atomic_fetch_sub(
463 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
466 _LIBCPP_HIDE_FROM_ABI
467 _Tp
* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl
<_Tp
*> volatile* __a
, ptrdiff_t __delta
, memory_order __order
) _NOEXCEPT
{
468 return __c11_atomic_fetch_sub(
469 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
472 _LIBCPP_HIDE_FROM_ABI
473 _Tp
* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl
<_Tp
*> * __a
, ptrdiff_t __delta
, memory_order __order
) _NOEXCEPT
{
474 return __c11_atomic_fetch_sub(
475 std::addressof(__a
->__a_value
), __delta
, static_cast<__memory_order_underlying_t
>(__order
));
479 _LIBCPP_HIDE_FROM_ABI
480 _Tp
__cxx_atomic_fetch_and(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
481 return __c11_atomic_fetch_and(
482 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
485 _LIBCPP_HIDE_FROM_ABI
486 _Tp
__cxx_atomic_fetch_and(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
487 return __c11_atomic_fetch_and(
488 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
492 _LIBCPP_HIDE_FROM_ABI
493 _Tp
__cxx_atomic_fetch_or(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
494 return __c11_atomic_fetch_or(
495 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
498 _LIBCPP_HIDE_FROM_ABI
499 _Tp
__cxx_atomic_fetch_or(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
500 return __c11_atomic_fetch_or(
501 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
505 _LIBCPP_HIDE_FROM_ABI
506 _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_base_impl
<_Tp
> volatile* __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
507 return __c11_atomic_fetch_xor(
508 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
511 _LIBCPP_HIDE_FROM_ABI
512 _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_base_impl
<_Tp
> * __a
, _Tp __pattern
, memory_order __order
) _NOEXCEPT
{
513 return __c11_atomic_fetch_xor(
514 std::addressof(__a
->__a_value
), __pattern
, static_cast<__memory_order_underlying_t
>(__order
));
517 #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP
519 #ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS
521 template<typename _Tp
>
522 struct __cxx_atomic_lock_impl
{
524 _LIBCPP_HIDE_FROM_ABI
525 __cxx_atomic_lock_impl() _NOEXCEPT
526 : __a_value(), __a_lock(0) {}
527 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR
explicit
528 __cxx_atomic_lock_impl(_Tp value
) _NOEXCEPT
529 : __a_value(value
), __a_lock(0) {}
532 mutable __cxx_atomic_base_impl
<_LIBCPP_ATOMIC_FLAG_TYPE
> __a_lock
;
534 _LIBCPP_HIDE_FROM_ABI
void __lock() const volatile {
535 while(1 == __cxx_atomic_exchange(&__a_lock
, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire
))
538 _LIBCPP_HIDE_FROM_ABI
void __lock() const {
539 while(1 == __cxx_atomic_exchange(&__a_lock
, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire
))
542 _LIBCPP_HIDE_FROM_ABI
void __unlock() const volatile {
543 __cxx_atomic_store(&__a_lock
, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release
);
545 _LIBCPP_HIDE_FROM_ABI
void __unlock() const {
546 __cxx_atomic_store(&__a_lock
, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release
);
548 _LIBCPP_HIDE_FROM_ABI _Tp
__read() const volatile {
551 __cxx_atomic_assign_volatile(__old
, __a_value
);
555 _LIBCPP_HIDE_FROM_ABI _Tp
__read() const {
557 _Tp __old
= __a_value
;
563 template <typename _Tp
>
564 _LIBCPP_HIDE_FROM_ABI
565 void __cxx_atomic_init(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __val
) {
566 __cxx_atomic_assign_volatile(__a
->__a_value
, __val
);
568 template <typename _Tp
>
569 _LIBCPP_HIDE_FROM_ABI
570 void __cxx_atomic_init(__cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __val
) {
571 __a
->__a_value
= __val
;
574 template <typename _Tp
>
575 _LIBCPP_HIDE_FROM_ABI
576 void __cxx_atomic_store(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __val
, memory_order
) {
578 __cxx_atomic_assign_volatile(__a
->__a_value
, __val
);
581 template <typename _Tp
>
582 _LIBCPP_HIDE_FROM_ABI
583 void __cxx_atomic_store(__cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __val
, memory_order
) {
585 __a
->__a_value
= __val
;
589 template <typename _Tp
>
590 _LIBCPP_HIDE_FROM_ABI
591 _Tp
__cxx_atomic_load(const volatile __cxx_atomic_lock_impl
<_Tp
>* __a
, memory_order
) {
592 return __a
->__read();
594 template <typename _Tp
>
595 _LIBCPP_HIDE_FROM_ABI
596 _Tp
__cxx_atomic_load(const __cxx_atomic_lock_impl
<_Tp
>* __a
, memory_order
) {
597 return __a
->__read();
600 template <typename _Tp
>
601 _LIBCPP_HIDE_FROM_ABI
602 _Tp
__cxx_atomic_exchange(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __value
, memory_order
) {
605 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
606 __cxx_atomic_assign_volatile(__a
->__a_value
, __value
);
610 template <typename _Tp
>
611 _LIBCPP_HIDE_FROM_ABI
612 _Tp
__cxx_atomic_exchange(__cxx_atomic_lock_impl
<_Tp
>* __a
, _Tp __value
, memory_order
) {
614 _Tp __old
= __a
->__a_value
;
615 __a
->__a_value
= __value
;
620 template <typename _Tp
>
621 _LIBCPP_HIDE_FROM_ABI
622 bool __cxx_atomic_compare_exchange_strong(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
623 _Tp
* __expected
, _Tp __value
, memory_order
, memory_order
) {
626 __cxx_atomic_assign_volatile(__temp
, __a
->__a_value
);
627 bool __ret
= (std::memcmp(&__temp
, __expected
, sizeof(_Tp
)) == 0);
629 __cxx_atomic_assign_volatile(__a
->__a_value
, __value
);
631 __cxx_atomic_assign_volatile(*__expected
, __a
->__a_value
);
635 template <typename _Tp
>
636 _LIBCPP_HIDE_FROM_ABI
637 bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_lock_impl
<_Tp
>* __a
,
638 _Tp
* __expected
, _Tp __value
, memory_order
, memory_order
) {
640 bool __ret
= (std::memcmp(&__a
->__a_value
, __expected
, sizeof(_Tp
)) == 0);
642 std::memcpy(&__a
->__a_value
, &__value
, sizeof(_Tp
));
644 std::memcpy(__expected
, &__a
->__a_value
, sizeof(_Tp
));
649 template <typename _Tp
>
650 _LIBCPP_HIDE_FROM_ABI
651 bool __cxx_atomic_compare_exchange_weak(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
652 _Tp
* __expected
, _Tp __value
, memory_order
, memory_order
) {
655 __cxx_atomic_assign_volatile(__temp
, __a
->__a_value
);
656 bool __ret
= (std::memcmp(&__temp
, __expected
, sizeof(_Tp
)) == 0);
658 __cxx_atomic_assign_volatile(__a
->__a_value
, __value
);
660 __cxx_atomic_assign_volatile(*__expected
, __a
->__a_value
);
664 template <typename _Tp
>
665 _LIBCPP_HIDE_FROM_ABI
666 bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_lock_impl
<_Tp
>* __a
,
667 _Tp
* __expected
, _Tp __value
, memory_order
, memory_order
) {
669 bool __ret
= (std::memcmp(&__a
->__a_value
, __expected
, sizeof(_Tp
)) == 0);
671 std::memcpy(&__a
->__a_value
, &__value
, sizeof(_Tp
));
673 std::memcpy(__expected
, &__a
->__a_value
, sizeof(_Tp
));
678 template <typename _Tp
, typename _Td
>
679 _LIBCPP_HIDE_FROM_ABI
680 _Tp
__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
681 _Td __delta
, memory_order
) {
684 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
685 __cxx_atomic_assign_volatile(__a
->__a_value
, _Tp(__old
+ __delta
));
689 template <typename _Tp
, typename _Td
>
690 _LIBCPP_HIDE_FROM_ABI
691 _Tp
__cxx_atomic_fetch_add(__cxx_atomic_lock_impl
<_Tp
>* __a
,
692 _Td __delta
, memory_order
) {
694 _Tp __old
= __a
->__a_value
;
695 __a
->__a_value
+= __delta
;
700 template <typename _Tp
, typename _Td
>
701 _LIBCPP_HIDE_FROM_ABI
702 _Tp
* __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl
<_Tp
*>* __a
,
703 ptrdiff_t __delta
, memory_order
) {
706 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
707 __cxx_atomic_assign_volatile(__a
->__a_value
, __old
+ __delta
);
711 template <typename _Tp
, typename _Td
>
712 _LIBCPP_HIDE_FROM_ABI
713 _Tp
* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl
<_Tp
*>* __a
,
714 ptrdiff_t __delta
, memory_order
) {
716 _Tp
* __old
= __a
->__a_value
;
717 __a
->__a_value
+= __delta
;
722 template <typename _Tp
, typename _Td
>
723 _LIBCPP_HIDE_FROM_ABI
724 _Tp
__cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
725 _Td __delta
, memory_order
) {
728 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
729 __cxx_atomic_assign_volatile(__a
->__a_value
, _Tp(__old
- __delta
));
733 template <typename _Tp
, typename _Td
>
734 _LIBCPP_HIDE_FROM_ABI
735 _Tp
__cxx_atomic_fetch_sub(__cxx_atomic_lock_impl
<_Tp
>* __a
,
736 _Td __delta
, memory_order
) {
738 _Tp __old
= __a
->__a_value
;
739 __a
->__a_value
-= __delta
;
744 template <typename _Tp
>
745 _LIBCPP_HIDE_FROM_ABI
746 _Tp
__cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
747 _Tp __pattern
, memory_order
) {
750 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
751 __cxx_atomic_assign_volatile(__a
->__a_value
, _Tp(__old
& __pattern
));
755 template <typename _Tp
>
756 _LIBCPP_HIDE_FROM_ABI
757 _Tp
__cxx_atomic_fetch_and(__cxx_atomic_lock_impl
<_Tp
>* __a
,
758 _Tp __pattern
, memory_order
) {
760 _Tp __old
= __a
->__a_value
;
761 __a
->__a_value
&= __pattern
;
766 template <typename _Tp
>
767 _LIBCPP_HIDE_FROM_ABI
768 _Tp
__cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
769 _Tp __pattern
, memory_order
) {
772 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
773 __cxx_atomic_assign_volatile(__a
->__a_value
, _Tp(__old
| __pattern
));
777 template <typename _Tp
>
778 _LIBCPP_HIDE_FROM_ABI
779 _Tp
__cxx_atomic_fetch_or(__cxx_atomic_lock_impl
<_Tp
>* __a
,
780 _Tp __pattern
, memory_order
) {
782 _Tp __old
= __a
->__a_value
;
783 __a
->__a_value
|= __pattern
;
788 template <typename _Tp
>
789 _LIBCPP_HIDE_FROM_ABI
790 _Tp
__cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl
<_Tp
>* __a
,
791 _Tp __pattern
, memory_order
) {
794 __cxx_atomic_assign_volatile(__old
, __a
->__a_value
);
795 __cxx_atomic_assign_volatile(__a
->__a_value
, _Tp(__old
^ __pattern
));
799 template <typename _Tp
>
800 _LIBCPP_HIDE_FROM_ABI
801 _Tp
__cxx_atomic_fetch_xor(__cxx_atomic_lock_impl
<_Tp
>* __a
,
802 _Tp __pattern
, memory_order
) {
804 _Tp __old
= __a
->__a_value
;
805 __a
->__a_value
^= __pattern
;
810 template <typename _Tp
,
811 typename _Base
= typename conditional
<__libcpp_is_always_lock_free
<_Tp
>::__value
,
812 __cxx_atomic_base_impl
<_Tp
>,
813 __cxx_atomic_lock_impl
<_Tp
> >::type
>
815 template <typename _Tp
,
816 typename _Base
= __cxx_atomic_base_impl
<_Tp
> >
817 #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS
818 struct __cxx_atomic_impl
: public _Base
{
819 static_assert(is_trivially_copyable
<_Tp
>::value
,
820 "std::atomic<T> requires that 'T' be a trivially copyable type");
822 _LIBCPP_HIDE_FROM_ABI
__cxx_atomic_impl() _NOEXCEPT
= default;
823 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR
explicit __cxx_atomic_impl(_Tp __value
) _NOEXCEPT
827 _LIBCPP_END_NAMESPACE_STD
829 #endif // _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H