2 * kmp_atomic.h - ATOMIC header file
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
20 #include "ompt-specific.h"
24 // Intel compiler does not support _Complex datatype on win.
25 // Intel compiler supports _Complex datatype on lin and mac.
26 // On the other side, there is a problem of stack alignment on lin_32 and mac_32
27 // if the rhs is cmplx80 or cmplx128 typedef'ed datatype.
28 // The decision is: to use compiler supported _Complex type on lin and mac,
29 // to use typedef'ed types on win.
30 // Condition for WIN64 was modified in anticipation of 10.1 build compiler.
32 #if defined(__cplusplus) && (KMP_OS_WINDOWS)
33 // create shortcuts for c99 complex types
35 // Visual Studio cannot have function parameters that have the
36 // align __declspec attribute, so we must remove it. (Compiler Error C2719)
39 #define KMP_DO_ALIGN(alignment) /* Nothing */
42 #if defined(_MSC_VER) && (_MSC_VER < 1600) && defined(_DEBUG)
43 // Workaround for the problem of _DebugHeapTag unresolved external.
44 // This problem prevented to use our static debug library for C tests
45 // compiled with /MDd option (the library itself built with /MTd),
47 #define _DEBUG_TEMPORARILY_UNSET_
52 template <typename type_lhs
, typename type_rhs
>
53 std::complex<type_lhs
> __kmp_lhs_div_rhs(const std::complex<type_lhs
> &lhs
,
54 const std::complex<type_rhs
> &rhs
) {
55 type_lhs a
= lhs
.real();
56 type_lhs b
= lhs
.imag();
57 type_rhs c
= rhs
.real();
58 type_rhs d
= rhs
.imag();
59 type_rhs den
= c
* c
+ d
* d
;
60 type_rhs r
= (a
* c
+ b
* d
);
61 type_rhs i
= (b
* c
- a
* d
);
62 std::complex<type_lhs
> ret(r
/ den
, i
/ den
);
67 struct __kmp_cmplx64_t
: std::complex<double> {
69 __kmp_cmplx64_t() : std::complex<double>() {}
71 __kmp_cmplx64_t(const std::complex<double> &cd
) : std::complex<double>(cd
) {}
73 void operator/=(const __kmp_cmplx64_t
&rhs
) {
74 std::complex<double> lhs
= *this;
75 *this = __kmp_lhs_div_rhs(lhs
, rhs
);
78 __kmp_cmplx64_t
operator/(const __kmp_cmplx64_t
&rhs
) {
79 std::complex<double> lhs
= *this;
80 return __kmp_lhs_div_rhs(lhs
, rhs
);
83 typedef struct __kmp_cmplx64_t kmp_cmplx64
;
86 struct __kmp_cmplx32_t
: std::complex<float> {
88 __kmp_cmplx32_t() : std::complex<float>() {}
90 __kmp_cmplx32_t(const std::complex<float> &cf
) : std::complex<float>(cf
) {}
92 __kmp_cmplx32_t
operator+(const __kmp_cmplx32_t
&b
) {
93 std::complex<float> lhs
= *this;
94 std::complex<float> rhs
= b
;
97 __kmp_cmplx32_t
operator-(const __kmp_cmplx32_t
&b
) {
98 std::complex<float> lhs
= *this;
99 std::complex<float> rhs
= b
;
102 __kmp_cmplx32_t
operator*(const __kmp_cmplx32_t
&b
) {
103 std::complex<float> lhs
= *this;
104 std::complex<float> rhs
= b
;
108 __kmp_cmplx32_t
operator+(const kmp_cmplx64
&b
) {
109 kmp_cmplx64 t
= kmp_cmplx64(*this) + b
;
110 std::complex<double> d(t
);
111 std::complex<float> f(d
);
112 __kmp_cmplx32_t
r(f
);
115 __kmp_cmplx32_t
operator-(const kmp_cmplx64
&b
) {
116 kmp_cmplx64 t
= kmp_cmplx64(*this) - b
;
117 std::complex<double> d(t
);
118 std::complex<float> f(d
);
119 __kmp_cmplx32_t
r(f
);
122 __kmp_cmplx32_t
operator*(const kmp_cmplx64
&b
) {
123 kmp_cmplx64 t
= kmp_cmplx64(*this) * b
;
124 std::complex<double> d(t
);
125 std::complex<float> f(d
);
126 __kmp_cmplx32_t
r(f
);
130 void operator/=(const __kmp_cmplx32_t
&rhs
) {
131 std::complex<float> lhs
= *this;
132 *this = __kmp_lhs_div_rhs(lhs
, rhs
);
135 __kmp_cmplx32_t
operator/(const __kmp_cmplx32_t
&rhs
) {
136 std::complex<float> lhs
= *this;
137 return __kmp_lhs_div_rhs(lhs
, rhs
);
140 void operator/=(const kmp_cmplx64
&rhs
) {
141 std::complex<float> lhs
= *this;
142 *this = __kmp_lhs_div_rhs(lhs
, rhs
);
145 __kmp_cmplx32_t
operator/(const kmp_cmplx64
&rhs
) {
146 std::complex<float> lhs
= *this;
147 return __kmp_lhs_div_rhs(lhs
, rhs
);
150 typedef struct __kmp_cmplx32_t kmp_cmplx32
;
153 struct KMP_DO_ALIGN(16) __kmp_cmplx80_t
: std::complex<long double> {
155 __kmp_cmplx80_t() : std::complex<long double>() {}
157 __kmp_cmplx80_t(const std::complex<long double> &cld
)
158 : std::complex<long double>(cld
) {}
160 void operator/=(const __kmp_cmplx80_t
&rhs
) {
161 std::complex<long double> lhs
= *this;
162 *this = __kmp_lhs_div_rhs(lhs
, rhs
);
165 __kmp_cmplx80_t
operator/(const __kmp_cmplx80_t
&rhs
) {
166 std::complex<long double> lhs
= *this;
167 return __kmp_lhs_div_rhs(lhs
, rhs
);
170 typedef KMP_DO_ALIGN(16) struct __kmp_cmplx80_t kmp_cmplx80
;
174 struct __kmp_cmplx128_t
: std::complex<_Quad
> {
176 __kmp_cmplx128_t() : std::complex<_Quad
>() {}
178 __kmp_cmplx128_t(const std::complex<_Quad
> &cq
) : std::complex<_Quad
>(cq
) {}
180 void operator/=(const __kmp_cmplx128_t
&rhs
) {
181 std::complex<_Quad
> lhs
= *this;
182 *this = __kmp_lhs_div_rhs(lhs
, rhs
);
185 __kmp_cmplx128_t
operator/(const __kmp_cmplx128_t
&rhs
) {
186 std::complex<_Quad
> lhs
= *this;
187 return __kmp_lhs_div_rhs(lhs
, rhs
);
190 typedef struct __kmp_cmplx128_t kmp_cmplx128
;
191 #endif /* KMP_HAVE_QUAD */
193 #ifdef _DEBUG_TEMPORARILY_UNSET_
194 #undef _DEBUG_TEMPORARILY_UNSET_
200 // create shortcuts for c99 complex types
201 typedef float _Complex kmp_cmplx32
;
202 typedef double _Complex kmp_cmplx64
;
203 typedef long double _Complex kmp_cmplx80
;
205 typedef _Quad _Complex kmp_cmplx128
;
209 // Compiler 12.0 changed alignment of 16 and 32-byte arguments (like _Quad
210 // and kmp_cmplx128) on IA-32 architecture. The following aligned structures
211 // are implemented to support the old alignment in 10.1, 11.0, 11.1 and
212 // introduce the new alignment in 12.0. See CQ88405.
213 #if KMP_ARCH_X86 && KMP_HAVE_QUAD
215 // 4-byte aligned structures for backward compatibility.
217 #pragma pack(push, 4)
219 struct KMP_DO_ALIGN(4) Quad_a4_t
{
223 Quad_a4_t(const _Quad
&cq
) : q(cq
) {}
225 Quad_a4_t
operator+(const Quad_a4_t
&b
) {
226 _Quad lhs
= (*this).q
;
228 return (Quad_a4_t
)(lhs
+ rhs
);
231 Quad_a4_t
operator-(const Quad_a4_t
&b
) {
232 _Quad lhs
= (*this).q
;
234 return (Quad_a4_t
)(lhs
- rhs
);
236 Quad_a4_t
operator*(const Quad_a4_t
&b
) {
237 _Quad lhs
= (*this).q
;
239 return (Quad_a4_t
)(lhs
* rhs
);
242 Quad_a4_t
operator/(const Quad_a4_t
&b
) {
243 _Quad lhs
= (*this).q
;
245 return (Quad_a4_t
)(lhs
/ rhs
);
249 struct KMP_DO_ALIGN(4) kmp_cmplx128_a4_t
{
252 kmp_cmplx128_a4_t() : q() {}
254 kmp_cmplx128_a4_t(const kmp_cmplx128
&c128
) : q(c128
) {}
256 kmp_cmplx128_a4_t
operator+(const kmp_cmplx128_a4_t
&b
) {
257 kmp_cmplx128 lhs
= (*this).q
;
258 kmp_cmplx128 rhs
= b
.q
;
259 return (kmp_cmplx128_a4_t
)(lhs
+ rhs
);
261 kmp_cmplx128_a4_t
operator-(const kmp_cmplx128_a4_t
&b
) {
262 kmp_cmplx128 lhs
= (*this).q
;
263 kmp_cmplx128 rhs
= b
.q
;
264 return (kmp_cmplx128_a4_t
)(lhs
- rhs
);
266 kmp_cmplx128_a4_t
operator*(const kmp_cmplx128_a4_t
&b
) {
267 kmp_cmplx128 lhs
= (*this).q
;
268 kmp_cmplx128 rhs
= b
.q
;
269 return (kmp_cmplx128_a4_t
)(lhs
* rhs
);
272 kmp_cmplx128_a4_t
operator/(const kmp_cmplx128_a4_t
&b
) {
273 kmp_cmplx128 lhs
= (*this).q
;
274 kmp_cmplx128 rhs
= b
.q
;
275 return (kmp_cmplx128_a4_t
)(lhs
/ rhs
);
281 // New 16-byte aligned structures for 12.0 compiler.
282 struct KMP_DO_ALIGN(16) Quad_a16_t
{
285 Quad_a16_t() : q() {}
286 Quad_a16_t(const _Quad
&cq
) : q(cq
) {}
288 Quad_a16_t
operator+(const Quad_a16_t
&b
) {
289 _Quad lhs
= (*this).q
;
291 return (Quad_a16_t
)(lhs
+ rhs
);
294 Quad_a16_t
operator-(const Quad_a16_t
&b
) {
295 _Quad lhs
= (*this).q
;
297 return (Quad_a16_t
)(lhs
- rhs
);
299 Quad_a16_t
operator*(const Quad_a16_t
&b
) {
300 _Quad lhs
= (*this).q
;
302 return (Quad_a16_t
)(lhs
* rhs
);
305 Quad_a16_t
operator/(const Quad_a16_t
&b
) {
306 _Quad lhs
= (*this).q
;
308 return (Quad_a16_t
)(lhs
/ rhs
);
312 struct KMP_DO_ALIGN(16) kmp_cmplx128_a16_t
{
315 kmp_cmplx128_a16_t() : q() {}
317 kmp_cmplx128_a16_t(const kmp_cmplx128
&c128
) : q(c128
) {}
319 kmp_cmplx128_a16_t
operator+(const kmp_cmplx128_a16_t
&b
) {
320 kmp_cmplx128 lhs
= (*this).q
;
321 kmp_cmplx128 rhs
= b
.q
;
322 return (kmp_cmplx128_a16_t
)(lhs
+ rhs
);
324 kmp_cmplx128_a16_t
operator-(const kmp_cmplx128_a16_t
&b
) {
325 kmp_cmplx128 lhs
= (*this).q
;
326 kmp_cmplx128 rhs
= b
.q
;
327 return (kmp_cmplx128_a16_t
)(lhs
- rhs
);
329 kmp_cmplx128_a16_t
operator*(const kmp_cmplx128_a16_t
&b
) {
330 kmp_cmplx128 lhs
= (*this).q
;
331 kmp_cmplx128 rhs
= b
.q
;
332 return (kmp_cmplx128_a16_t
)(lhs
* rhs
);
335 kmp_cmplx128_a16_t
operator/(const kmp_cmplx128_a16_t
&b
) {
336 kmp_cmplx128 lhs
= (*this).q
;
337 kmp_cmplx128 rhs
= b
.q
;
338 return (kmp_cmplx128_a16_t
)(lhs
/ rhs
);
345 #define QUAD_LEGACY Quad_a4_t
346 #define CPLX128_LEG kmp_cmplx128_a4_t
348 #define QUAD_LEGACY _Quad
349 #define CPLX128_LEG kmp_cmplx128
356 extern int __kmp_atomic_mode
;
358 // Atomic locks can easily become contended, so we use queuing locks for them.
359 typedef kmp_queuing_lock_t kmp_atomic_lock_t
;
361 static inline void __kmp_acquire_atomic_lock(kmp_atomic_lock_t
*lck
,
363 #if OMPT_SUPPORT && OMPT_OPTIONAL
364 if (ompt_enabled
.ompt_callback_mutex_acquire
) {
365 ompt_callbacks
.ompt_callback(ompt_callback_mutex_acquire
)(
366 ompt_mutex_atomic
, 0, kmp_mutex_impl_queuing
, (ompt_wait_id_t
)(uintptr_t)lck
,
367 OMPT_GET_RETURN_ADDRESS(0));
371 __kmp_acquire_queuing_lock(lck
, gtid
);
373 #if OMPT_SUPPORT && OMPT_OPTIONAL
374 if (ompt_enabled
.ompt_callback_mutex_acquired
) {
375 ompt_callbacks
.ompt_callback(ompt_callback_mutex_acquired
)(
376 ompt_mutex_atomic
, (ompt_wait_id_t
)(uintptr_t)lck
, OMPT_GET_RETURN_ADDRESS(0));
381 static inline int __kmp_test_atomic_lock(kmp_atomic_lock_t
*lck
,
383 return __kmp_test_queuing_lock(lck
, gtid
);
386 static inline void __kmp_release_atomic_lock(kmp_atomic_lock_t
*lck
,
388 __kmp_release_queuing_lock(lck
, gtid
);
389 #if OMPT_SUPPORT && OMPT_OPTIONAL
390 if (ompt_enabled
.ompt_callback_mutex_released
) {
391 ompt_callbacks
.ompt_callback(ompt_callback_mutex_released
)(
392 ompt_mutex_atomic
, (ompt_wait_id_t
)(uintptr_t)lck
, OMPT_GET_RETURN_ADDRESS(0));
397 static inline void __kmp_init_atomic_lock(kmp_atomic_lock_t
*lck
) {
398 __kmp_init_queuing_lock(lck
);
401 static inline void __kmp_destroy_atomic_lock(kmp_atomic_lock_t
*lck
) {
402 __kmp_destroy_queuing_lock(lck
);
406 extern kmp_atomic_lock_t __kmp_atomic_lock
; /* Control access to all user coded
407 atomics in Gnu compat mode */
408 extern kmp_atomic_lock_t __kmp_atomic_lock_1i
; /* Control access to all user
409 coded atomics for 1-byte fixed
411 extern kmp_atomic_lock_t __kmp_atomic_lock_2i
; /* Control access to all user
412 coded atomics for 2-byte fixed
414 extern kmp_atomic_lock_t __kmp_atomic_lock_4i
; /* Control access to all user
415 coded atomics for 4-byte fixed
417 extern kmp_atomic_lock_t __kmp_atomic_lock_4r
; /* Control access to all user
418 coded atomics for kmp_real32
420 extern kmp_atomic_lock_t __kmp_atomic_lock_8i
; /* Control access to all user
421 coded atomics for 8-byte fixed
423 extern kmp_atomic_lock_t __kmp_atomic_lock_8r
; /* Control access to all user
424 coded atomics for kmp_real64
426 extern kmp_atomic_lock_t
427 __kmp_atomic_lock_8c
; /* Control access to all user coded atomics for
428 complex byte data type */
429 extern kmp_atomic_lock_t
430 __kmp_atomic_lock_10r
; /* Control access to all user coded atomics for long
432 extern kmp_atomic_lock_t __kmp_atomic_lock_16r
; /* Control access to all user
433 coded atomics for _Quad data
435 extern kmp_atomic_lock_t __kmp_atomic_lock_16c
; /* Control access to all user
436 coded atomics for double
438 extern kmp_atomic_lock_t
439 __kmp_atomic_lock_20c
; /* Control access to all user coded atomics for long
440 double complex type*/
441 extern kmp_atomic_lock_t __kmp_atomic_lock_32c
; /* Control access to all user
442 coded atomics for _Quad
445 // Below routines for atomic UPDATE are listed
448 void __kmpc_atomic_fixed1_add(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
449 void __kmpc_atomic_fixed1_andb(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
450 void __kmpc_atomic_fixed1_div(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
451 void __kmpc_atomic_fixed1u_div(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
453 void __kmpc_atomic_fixed1_mul(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
454 void __kmpc_atomic_fixed1_orb(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
455 void __kmpc_atomic_fixed1_shl(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
456 void __kmpc_atomic_fixed1_shr(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
457 void __kmpc_atomic_fixed1u_shr(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
459 void __kmpc_atomic_fixed1_sub(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
460 void __kmpc_atomic_fixed1_xor(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
462 void __kmpc_atomic_fixed2_add(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
463 void __kmpc_atomic_fixed2_andb(ident_t
*id_ref
, int gtid
, short *lhs
,
465 void __kmpc_atomic_fixed2_div(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
466 void __kmpc_atomic_fixed2u_div(ident_t
*id_ref
, int gtid
, unsigned short *lhs
,
468 void __kmpc_atomic_fixed2_mul(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
469 void __kmpc_atomic_fixed2_orb(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
470 void __kmpc_atomic_fixed2_shl(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
471 void __kmpc_atomic_fixed2_shr(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
472 void __kmpc_atomic_fixed2u_shr(ident_t
*id_ref
, int gtid
, unsigned short *lhs
,
474 void __kmpc_atomic_fixed2_sub(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
475 void __kmpc_atomic_fixed2_xor(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
476 // 4-byte add / sub fixed
477 void __kmpc_atomic_fixed4_add(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
479 void __kmpc_atomic_fixed4_sub(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
481 // 4-byte add / sub float
482 void __kmpc_atomic_float4_add(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
484 void __kmpc_atomic_float4_sub(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
486 // 8-byte add / sub fixed
487 void __kmpc_atomic_fixed8_add(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
489 void __kmpc_atomic_fixed8_sub(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
491 // 8-byte add / sub float
492 void __kmpc_atomic_float8_add(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
494 void __kmpc_atomic_float8_sub(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
497 void __kmpc_atomic_fixed4_andb(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
499 void __kmpc_atomic_fixed4_div(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
501 void __kmpc_atomic_fixed4u_div(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
503 void __kmpc_atomic_fixed4_mul(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
505 void __kmpc_atomic_fixed4_orb(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
507 void __kmpc_atomic_fixed4_shl(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
509 void __kmpc_atomic_fixed4_shr(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
511 void __kmpc_atomic_fixed4u_shr(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
513 void __kmpc_atomic_fixed4_xor(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
516 void __kmpc_atomic_fixed8_andb(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
518 void __kmpc_atomic_fixed8_div(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
520 void __kmpc_atomic_fixed8u_div(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
522 void __kmpc_atomic_fixed8_mul(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
524 void __kmpc_atomic_fixed8_orb(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
526 void __kmpc_atomic_fixed8_shl(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
528 void __kmpc_atomic_fixed8_shr(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
530 void __kmpc_atomic_fixed8u_shr(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
532 void __kmpc_atomic_fixed8_xor(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
535 void __kmpc_atomic_float4_div(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
537 void __kmpc_atomic_float4_mul(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
540 void __kmpc_atomic_float8_div(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
542 void __kmpc_atomic_float8_mul(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
544 // 1-, 2-, 4-, 8-byte logical (&&, ||)
545 void __kmpc_atomic_fixed1_andl(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
546 void __kmpc_atomic_fixed1_orl(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
547 void __kmpc_atomic_fixed2_andl(ident_t
*id_ref
, int gtid
, short *lhs
,
549 void __kmpc_atomic_fixed2_orl(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
550 void __kmpc_atomic_fixed4_andl(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
552 void __kmpc_atomic_fixed4_orl(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
554 void __kmpc_atomic_fixed8_andl(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
556 void __kmpc_atomic_fixed8_orl(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
559 void __kmpc_atomic_fixed1_max(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
560 void __kmpc_atomic_fixed1_min(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
561 void __kmpc_atomic_fixed2_max(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
562 void __kmpc_atomic_fixed2_min(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
563 void __kmpc_atomic_fixed4_max(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
565 void __kmpc_atomic_fixed4_min(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
567 void __kmpc_atomic_fixed8_max(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
569 void __kmpc_atomic_fixed8_min(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
571 void __kmpc_atomic_float4_max(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
573 void __kmpc_atomic_float4_min(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
575 void __kmpc_atomic_float8_max(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
577 void __kmpc_atomic_float8_min(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
580 void __kmpc_atomic_float16_max(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
582 void __kmpc_atomic_float16_min(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
585 // Routines with 16-byte arguments aligned to 16-byte boundary; IA-32
587 void __kmpc_atomic_float16_max_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
589 void __kmpc_atomic_float16_min_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
593 // .NEQV. (same as xor)
594 void __kmpc_atomic_fixed1_neqv(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
595 void __kmpc_atomic_fixed2_neqv(ident_t
*id_ref
, int gtid
, short *lhs
,
597 void __kmpc_atomic_fixed4_neqv(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
599 void __kmpc_atomic_fixed8_neqv(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
601 // .EQV. (same as ~xor)
602 void __kmpc_atomic_fixed1_eqv(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
603 void __kmpc_atomic_fixed2_eqv(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
604 void __kmpc_atomic_fixed4_eqv(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
606 void __kmpc_atomic_fixed8_eqv(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
609 void __kmpc_atomic_float10_add(ident_t
*id_ref
, int gtid
, long double *lhs
,
611 void __kmpc_atomic_float10_sub(ident_t
*id_ref
, int gtid
, long double *lhs
,
613 void __kmpc_atomic_float10_mul(ident_t
*id_ref
, int gtid
, long double *lhs
,
615 void __kmpc_atomic_float10_div(ident_t
*id_ref
, int gtid
, long double *lhs
,
619 void __kmpc_atomic_float16_add(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
621 void __kmpc_atomic_float16_sub(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
623 void __kmpc_atomic_float16_mul(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
625 void __kmpc_atomic_float16_div(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
628 // Routines with 16-byte arguments aligned to 16-byte boundary
629 void __kmpc_atomic_float16_add_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
631 void __kmpc_atomic_float16_sub_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
633 void __kmpc_atomic_float16_mul_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
635 void __kmpc_atomic_float16_div_a16(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
639 // routines for complex types
640 void __kmpc_atomic_cmplx4_add(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
642 void __kmpc_atomic_cmplx4_sub(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
644 void __kmpc_atomic_cmplx4_mul(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
646 void __kmpc_atomic_cmplx4_div(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
648 void __kmpc_atomic_cmplx8_add(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
650 void __kmpc_atomic_cmplx8_sub(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
652 void __kmpc_atomic_cmplx8_mul(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
654 void __kmpc_atomic_cmplx8_div(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
656 void __kmpc_atomic_cmplx10_add(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
658 void __kmpc_atomic_cmplx10_sub(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
660 void __kmpc_atomic_cmplx10_mul(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
662 void __kmpc_atomic_cmplx10_div(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
665 void __kmpc_atomic_cmplx16_add(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
667 void __kmpc_atomic_cmplx16_sub(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
669 void __kmpc_atomic_cmplx16_mul(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
671 void __kmpc_atomic_cmplx16_div(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
674 // Routines with 16-byte arguments aligned to 16-byte boundary
675 void __kmpc_atomic_cmplx16_add_a16(ident_t
*id_ref
, int gtid
,
676 kmp_cmplx128_a16_t
*lhs
,
677 kmp_cmplx128_a16_t rhs
);
678 void __kmpc_atomic_cmplx16_sub_a16(ident_t
*id_ref
, int gtid
,
679 kmp_cmplx128_a16_t
*lhs
,
680 kmp_cmplx128_a16_t rhs
);
681 void __kmpc_atomic_cmplx16_mul_a16(ident_t
*id_ref
, int gtid
,
682 kmp_cmplx128_a16_t
*lhs
,
683 kmp_cmplx128_a16_t rhs
);
684 void __kmpc_atomic_cmplx16_div_a16(ident_t
*id_ref
, int gtid
,
685 kmp_cmplx128_a16_t
*lhs
,
686 kmp_cmplx128_a16_t rhs
);
690 // OpenMP 4.0: x = expr binop x for non-commutative operations.
691 // Supported only on IA-32 architecture and Intel(R) 64
692 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
694 void __kmpc_atomic_fixed1_sub_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
696 void __kmpc_atomic_fixed1_div_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
698 void __kmpc_atomic_fixed1u_div_rev(ident_t
*id_ref
, int gtid
,
699 unsigned char *lhs
, unsigned char rhs
);
700 void __kmpc_atomic_fixed1_shl_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
702 void __kmpc_atomic_fixed1_shr_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
704 void __kmpc_atomic_fixed1u_shr_rev(ident_t
*id_ref
, int gtid
,
705 unsigned char *lhs
, unsigned char rhs
);
706 void __kmpc_atomic_fixed2_sub_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
708 void __kmpc_atomic_fixed2_div_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
710 void __kmpc_atomic_fixed2u_div_rev(ident_t
*id_ref
, int gtid
,
711 unsigned short *lhs
, unsigned short rhs
);
712 void __kmpc_atomic_fixed2_shl_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
714 void __kmpc_atomic_fixed2_shr_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
716 void __kmpc_atomic_fixed2u_shr_rev(ident_t
*id_ref
, int gtid
,
717 unsigned short *lhs
, unsigned short rhs
);
718 void __kmpc_atomic_fixed4_sub_rev(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
720 void __kmpc_atomic_fixed4_div_rev(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
722 void __kmpc_atomic_fixed4u_div_rev(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
724 void __kmpc_atomic_fixed4_shl_rev(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
726 void __kmpc_atomic_fixed4_shr_rev(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
728 void __kmpc_atomic_fixed4u_shr_rev(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
730 void __kmpc_atomic_fixed8_sub_rev(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
732 void __kmpc_atomic_fixed8_div_rev(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
734 void __kmpc_atomic_fixed8u_div_rev(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
736 void __kmpc_atomic_fixed8_shl_rev(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
738 void __kmpc_atomic_fixed8_shr_rev(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
740 void __kmpc_atomic_fixed8u_shr_rev(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
742 void __kmpc_atomic_float4_sub_rev(ident_t
*id_ref
, int gtid
, float *lhs
,
744 void __kmpc_atomic_float4_div_rev(ident_t
*id_ref
, int gtid
, float *lhs
,
746 void __kmpc_atomic_float8_sub_rev(ident_t
*id_ref
, int gtid
, double *lhs
,
748 void __kmpc_atomic_float8_div_rev(ident_t
*id_ref
, int gtid
, double *lhs
,
750 void __kmpc_atomic_float10_sub_rev(ident_t
*id_ref
, int gtid
, long double *lhs
,
752 void __kmpc_atomic_float10_div_rev(ident_t
*id_ref
, int gtid
, long double *lhs
,
755 void __kmpc_atomic_float16_sub_rev(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
757 void __kmpc_atomic_float16_div_rev(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
760 void __kmpc_atomic_cmplx4_sub_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
762 void __kmpc_atomic_cmplx4_div_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
764 void __kmpc_atomic_cmplx8_sub_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
766 void __kmpc_atomic_cmplx8_div_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
768 void __kmpc_atomic_cmplx10_sub_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
770 void __kmpc_atomic_cmplx10_div_rev(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
773 void __kmpc_atomic_cmplx16_sub_rev(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
775 void __kmpc_atomic_cmplx16_div_rev(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
778 // Routines with 16-byte arguments aligned to 16-byte boundary
779 void __kmpc_atomic_float16_sub_a16_rev(ident_t
*id_ref
, int gtid
,
780 Quad_a16_t
*lhs
, Quad_a16_t rhs
);
781 void __kmpc_atomic_float16_div_a16_rev(ident_t
*id_ref
, int gtid
,
782 Quad_a16_t
*lhs
, Quad_a16_t rhs
);
783 void __kmpc_atomic_cmplx16_sub_a16_rev(ident_t
*id_ref
, int gtid
,
784 kmp_cmplx128_a16_t
*lhs
,
785 kmp_cmplx128_a16_t rhs
);
786 void __kmpc_atomic_cmplx16_div_a16_rev(ident_t
*id_ref
, int gtid
,
787 kmp_cmplx128_a16_t
*lhs
,
788 kmp_cmplx128_a16_t rhs
);
790 #endif // KMP_HAVE_QUAD
792 #endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
794 // routines for mixed types
797 void __kmpc_atomic_fixed1_mul_float8(ident_t
*id_ref
, int gtid
, char *lhs
,
799 void __kmpc_atomic_fixed1_div_float8(ident_t
*id_ref
, int gtid
, char *lhs
,
801 void __kmpc_atomic_fixed2_mul_float8(ident_t
*id_ref
, int gtid
, short *lhs
,
803 void __kmpc_atomic_fixed2_div_float8(ident_t
*id_ref
, int gtid
, short *lhs
,
805 void __kmpc_atomic_fixed4_mul_float8(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
807 void __kmpc_atomic_fixed4_div_float8(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
809 void __kmpc_atomic_fixed8_mul_float8(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
811 void __kmpc_atomic_fixed8_div_float8(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
813 void __kmpc_atomic_float4_add_float8(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
815 void __kmpc_atomic_float4_sub_float8(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
817 void __kmpc_atomic_float4_mul_float8(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
819 void __kmpc_atomic_float4_div_float8(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
822 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not
825 void __kmpc_atomic_fixed1_add_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
827 void __kmpc_atomic_fixed1u_add_fp(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
829 void __kmpc_atomic_fixed1_sub_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
831 void __kmpc_atomic_fixed1u_sub_fp(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
833 void __kmpc_atomic_fixed1_mul_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
835 void __kmpc_atomic_fixed1u_mul_fp(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
837 void __kmpc_atomic_fixed1_div_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
839 void __kmpc_atomic_fixed1u_div_fp(ident_t
*id_ref
, int gtid
, unsigned char *lhs
,
842 void __kmpc_atomic_fixed2_add_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
844 void __kmpc_atomic_fixed2u_add_fp(ident_t
*id_ref
, int gtid
,
845 unsigned short *lhs
, _Quad rhs
);
846 void __kmpc_atomic_fixed2_sub_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
848 void __kmpc_atomic_fixed2u_sub_fp(ident_t
*id_ref
, int gtid
,
849 unsigned short *lhs
, _Quad rhs
);
850 void __kmpc_atomic_fixed2_mul_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
852 void __kmpc_atomic_fixed2u_mul_fp(ident_t
*id_ref
, int gtid
,
853 unsigned short *lhs
, _Quad rhs
);
854 void __kmpc_atomic_fixed2_div_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
856 void __kmpc_atomic_fixed2u_div_fp(ident_t
*id_ref
, int gtid
,
857 unsigned short *lhs
, _Quad rhs
);
859 void __kmpc_atomic_fixed4_add_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
861 void __kmpc_atomic_fixed4u_add_fp(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
863 void __kmpc_atomic_fixed4_sub_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
865 void __kmpc_atomic_fixed4u_sub_fp(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
867 void __kmpc_atomic_fixed4_mul_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
869 void __kmpc_atomic_fixed4u_mul_fp(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
871 void __kmpc_atomic_fixed4_div_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
873 void __kmpc_atomic_fixed4u_div_fp(ident_t
*id_ref
, int gtid
, kmp_uint32
*lhs
,
876 void __kmpc_atomic_fixed8_add_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
878 void __kmpc_atomic_fixed8u_add_fp(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
880 void __kmpc_atomic_fixed8_sub_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
882 void __kmpc_atomic_fixed8u_sub_fp(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
884 void __kmpc_atomic_fixed8_mul_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
886 void __kmpc_atomic_fixed8u_mul_fp(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
888 void __kmpc_atomic_fixed8_div_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
890 void __kmpc_atomic_fixed8u_div_fp(ident_t
*id_ref
, int gtid
, kmp_uint64
*lhs
,
893 void __kmpc_atomic_float4_add_fp(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
895 void __kmpc_atomic_float4_sub_fp(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
897 void __kmpc_atomic_float4_mul_fp(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
899 void __kmpc_atomic_float4_div_fp(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
902 void __kmpc_atomic_float8_add_fp(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
904 void __kmpc_atomic_float8_sub_fp(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
906 void __kmpc_atomic_float8_mul_fp(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
908 void __kmpc_atomic_float8_div_fp(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
911 void __kmpc_atomic_float10_add_fp(ident_t
*id_ref
, int gtid
, long double *lhs
,
913 void __kmpc_atomic_float10_sub_fp(ident_t
*id_ref
, int gtid
, long double *lhs
,
915 void __kmpc_atomic_float10_mul_fp(ident_t
*id_ref
, int gtid
, long double *lhs
,
917 void __kmpc_atomic_float10_div_fp(ident_t
*id_ref
, int gtid
, long double *lhs
,
920 // Reverse operations
921 void __kmpc_atomic_fixed1_sub_rev_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
923 void __kmpc_atomic_fixed1u_sub_rev_fp(ident_t
*id_ref
, int gtid
,
924 unsigned char *lhs
, _Quad rhs
);
925 void __kmpc_atomic_fixed1_div_rev_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
927 void __kmpc_atomic_fixed1u_div_rev_fp(ident_t
*id_ref
, int gtid
,
928 unsigned char *lhs
, _Quad rhs
);
929 void __kmpc_atomic_fixed2_sub_rev_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
931 void __kmpc_atomic_fixed2u_sub_rev_fp(ident_t
*id_ref
, int gtid
,
932 unsigned short *lhs
, _Quad rhs
);
933 void __kmpc_atomic_fixed2_div_rev_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
935 void __kmpc_atomic_fixed2u_div_rev_fp(ident_t
*id_ref
, int gtid
,
936 unsigned short *lhs
, _Quad rhs
);
937 void __kmpc_atomic_fixed4_sub_rev_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
939 void __kmpc_atomic_fixed4u_sub_rev_fp(ident_t
*id_ref
, int gtid
,
940 kmp_uint32
*lhs
, _Quad rhs
);
941 void __kmpc_atomic_fixed4_div_rev_fp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
943 void __kmpc_atomic_fixed4u_div_rev_fp(ident_t
*id_ref
, int gtid
,
944 kmp_uint32
*lhs
, _Quad rhs
);
945 void __kmpc_atomic_fixed8_sub_rev_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
947 void __kmpc_atomic_fixed8u_sub_rev_fp(ident_t
*id_ref
, int gtid
,
948 kmp_uint64
*lhs
, _Quad rhs
);
949 void __kmpc_atomic_fixed8_div_rev_fp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
951 void __kmpc_atomic_fixed8u_div_rev_fp(ident_t
*id_ref
, int gtid
,
952 kmp_uint64
*lhs
, _Quad rhs
);
953 void __kmpc_atomic_float4_sub_rev_fp(ident_t
*id_ref
, int gtid
, float *lhs
,
955 void __kmpc_atomic_float4_div_rev_fp(ident_t
*id_ref
, int gtid
, float *lhs
,
957 void __kmpc_atomic_float8_sub_rev_fp(ident_t
*id_ref
, int gtid
, double *lhs
,
959 void __kmpc_atomic_float8_div_rev_fp(ident_t
*id_ref
, int gtid
, double *lhs
,
961 void __kmpc_atomic_float10_sub_rev_fp(ident_t
*id_ref
, int gtid
,
962 long double *lhs
, _Quad rhs
);
963 void __kmpc_atomic_float10_div_rev_fp(ident_t
*id_ref
, int gtid
,
964 long double *lhs
, _Quad rhs
);
966 #endif // KMP_HAVE_QUAD
969 void __kmpc_atomic_cmplx4_add_cmplx8(ident_t
*id_ref
, int gtid
,
970 kmp_cmplx32
*lhs
, kmp_cmplx64 rhs
);
971 void __kmpc_atomic_cmplx4_sub_cmplx8(ident_t
*id_ref
, int gtid
,
972 kmp_cmplx32
*lhs
, kmp_cmplx64 rhs
);
973 void __kmpc_atomic_cmplx4_mul_cmplx8(ident_t
*id_ref
, int gtid
,
974 kmp_cmplx32
*lhs
, kmp_cmplx64 rhs
);
975 void __kmpc_atomic_cmplx4_div_cmplx8(ident_t
*id_ref
, int gtid
,
976 kmp_cmplx32
*lhs
, kmp_cmplx64 rhs
);
978 // generic atomic routines
979 void __kmpc_atomic_1(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
980 void (*f
)(void *, void *, void *));
981 void __kmpc_atomic_2(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
982 void (*f
)(void *, void *, void *));
983 void __kmpc_atomic_4(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
984 void (*f
)(void *, void *, void *));
985 void __kmpc_atomic_8(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
986 void (*f
)(void *, void *, void *));
987 void __kmpc_atomic_10(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
988 void (*f
)(void *, void *, void *));
989 void __kmpc_atomic_16(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
990 void (*f
)(void *, void *, void *));
991 void __kmpc_atomic_20(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
992 void (*f
)(void *, void *, void *));
993 void __kmpc_atomic_32(ident_t
*id_ref
, int gtid
, void *lhs
, void *rhs
,
994 void (*f
)(void *, void *, void *));
996 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64
997 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
999 // Below routines for atomic READ are listed
1000 char __kmpc_atomic_fixed1_rd(ident_t
*id_ref
, int gtid
, char *loc
);
1001 short __kmpc_atomic_fixed2_rd(ident_t
*id_ref
, int gtid
, short *loc
);
1002 kmp_int32
__kmpc_atomic_fixed4_rd(ident_t
*id_ref
, int gtid
, kmp_int32
*loc
);
1003 kmp_int64
__kmpc_atomic_fixed8_rd(ident_t
*id_ref
, int gtid
, kmp_int64
*loc
);
1004 kmp_real32
__kmpc_atomic_float4_rd(ident_t
*id_ref
, int gtid
, kmp_real32
*loc
);
1005 kmp_real64
__kmpc_atomic_float8_rd(ident_t
*id_ref
, int gtid
, kmp_real64
*loc
);
1006 long double __kmpc_atomic_float10_rd(ident_t
*id_ref
, int gtid
,
1009 QUAD_LEGACY
__kmpc_atomic_float16_rd(ident_t
*id_ref
, int gtid
,
1012 // Fix for CQ220361: cmplx4 READ will return void on Windows* OS; read value
1013 // will be returned through an additional parameter
1014 #if (KMP_OS_WINDOWS)
1015 void __kmpc_atomic_cmplx4_rd(kmp_cmplx32
*out
, ident_t
*id_ref
, int gtid
,
1018 kmp_cmplx32
__kmpc_atomic_cmplx4_rd(ident_t
*id_ref
, int gtid
,
1021 kmp_cmplx64
__kmpc_atomic_cmplx8_rd(ident_t
*id_ref
, int gtid
,
1023 kmp_cmplx80
__kmpc_atomic_cmplx10_rd(ident_t
*id_ref
, int gtid
,
1026 CPLX128_LEG
__kmpc_atomic_cmplx16_rd(ident_t
*id_ref
, int gtid
,
1029 // Routines with 16-byte arguments aligned to 16-byte boundary
1030 Quad_a16_t
__kmpc_atomic_float16_a16_rd(ident_t
*id_ref
, int gtid
,
1032 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_a16_rd(ident_t
*id_ref
, int gtid
,
1033 kmp_cmplx128_a16_t
*loc
);
1037 // Below routines for atomic WRITE are listed
1038 void __kmpc_atomic_fixed1_wr(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
1039 void __kmpc_atomic_fixed2_wr(ident_t
*id_ref
, int gtid
, short *lhs
, short rhs
);
1040 void __kmpc_atomic_fixed4_wr(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
1042 void __kmpc_atomic_fixed8_wr(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
1044 void __kmpc_atomic_float4_wr(ident_t
*id_ref
, int gtid
, kmp_real32
*lhs
,
1046 void __kmpc_atomic_float8_wr(ident_t
*id_ref
, int gtid
, kmp_real64
*lhs
,
1048 void __kmpc_atomic_float10_wr(ident_t
*id_ref
, int gtid
, long double *lhs
,
1051 void __kmpc_atomic_float16_wr(ident_t
*id_ref
, int gtid
, QUAD_LEGACY
*lhs
,
1054 void __kmpc_atomic_cmplx4_wr(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1056 void __kmpc_atomic_cmplx8_wr(ident_t
*id_ref
, int gtid
, kmp_cmplx64
*lhs
,
1058 void __kmpc_atomic_cmplx10_wr(ident_t
*id_ref
, int gtid
, kmp_cmplx80
*lhs
,
1061 void __kmpc_atomic_cmplx16_wr(ident_t
*id_ref
, int gtid
, CPLX128_LEG
*lhs
,
1064 // Routines with 16-byte arguments aligned to 16-byte boundary
1065 void __kmpc_atomic_float16_a16_wr(ident_t
*id_ref
, int gtid
, Quad_a16_t
*lhs
,
1067 void __kmpc_atomic_cmplx16_a16_wr(ident_t
*id_ref
, int gtid
,
1068 kmp_cmplx128_a16_t
*lhs
,
1069 kmp_cmplx128_a16_t rhs
);
1073 // Below routines for atomic CAPTURE are listed
1076 char __kmpc_atomic_fixed1_add_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1077 char rhs
, int flag
);
1078 char __kmpc_atomic_fixed1_andb_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1079 char rhs
, int flag
);
1080 char __kmpc_atomic_fixed1_div_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1081 char rhs
, int flag
);
1082 unsigned char __kmpc_atomic_fixed1u_div_cpt(ident_t
*id_ref
, int gtid
,
1084 unsigned char rhs
, int flag
);
1085 char __kmpc_atomic_fixed1_mul_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1086 char rhs
, int flag
);
1087 char __kmpc_atomic_fixed1_orb_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1088 char rhs
, int flag
);
1089 char __kmpc_atomic_fixed1_shl_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1090 char rhs
, int flag
);
1091 char __kmpc_atomic_fixed1_shr_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1092 char rhs
, int flag
);
1093 unsigned char __kmpc_atomic_fixed1u_shr_cpt(ident_t
*id_ref
, int gtid
,
1095 unsigned char rhs
, int flag
);
1096 char __kmpc_atomic_fixed1_sub_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1097 char rhs
, int flag
);
1098 char __kmpc_atomic_fixed1_xor_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1099 char rhs
, int flag
);
1101 short __kmpc_atomic_fixed2_add_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1102 short rhs
, int flag
);
1103 short __kmpc_atomic_fixed2_andb_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1104 short rhs
, int flag
);
1105 short __kmpc_atomic_fixed2_div_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1106 short rhs
, int flag
);
1107 unsigned short __kmpc_atomic_fixed2u_div_cpt(ident_t
*id_ref
, int gtid
,
1108 unsigned short *lhs
,
1109 unsigned short rhs
, int flag
);
1110 short __kmpc_atomic_fixed2_mul_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1111 short rhs
, int flag
);
1112 short __kmpc_atomic_fixed2_orb_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1113 short rhs
, int flag
);
1114 short __kmpc_atomic_fixed2_shl_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1115 short rhs
, int flag
);
1116 short __kmpc_atomic_fixed2_shr_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1117 short rhs
, int flag
);
1118 unsigned short __kmpc_atomic_fixed2u_shr_cpt(ident_t
*id_ref
, int gtid
,
1119 unsigned short *lhs
,
1120 unsigned short rhs
, int flag
);
1121 short __kmpc_atomic_fixed2_sub_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1122 short rhs
, int flag
);
1123 short __kmpc_atomic_fixed2_xor_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1124 short rhs
, int flag
);
1125 // 4-byte add / sub fixed
1126 kmp_int32
__kmpc_atomic_fixed4_add_cpt(ident_t
*id_ref
, int gtid
,
1127 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1128 kmp_int32
__kmpc_atomic_fixed4_sub_cpt(ident_t
*id_ref
, int gtid
,
1129 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1130 // 4-byte add / sub float
1131 kmp_real32
__kmpc_atomic_float4_add_cpt(ident_t
*id_ref
, int gtid
,
1132 kmp_real32
*lhs
, kmp_real32 rhs
,
1134 kmp_real32
__kmpc_atomic_float4_sub_cpt(ident_t
*id_ref
, int gtid
,
1135 kmp_real32
*lhs
, kmp_real32 rhs
,
1137 // 8-byte add / sub fixed
1138 kmp_int64
__kmpc_atomic_fixed8_add_cpt(ident_t
*id_ref
, int gtid
,
1139 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1140 kmp_int64
__kmpc_atomic_fixed8_sub_cpt(ident_t
*id_ref
, int gtid
,
1141 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1142 // 8-byte add / sub float
1143 kmp_real64
__kmpc_atomic_float8_add_cpt(ident_t
*id_ref
, int gtid
,
1144 kmp_real64
*lhs
, kmp_real64 rhs
,
1146 kmp_real64
__kmpc_atomic_float8_sub_cpt(ident_t
*id_ref
, int gtid
,
1147 kmp_real64
*lhs
, kmp_real64 rhs
,
1150 kmp_int32
__kmpc_atomic_fixed4_andb_cpt(ident_t
*id_ref
, int gtid
,
1151 kmp_int32
*lhs
, kmp_int32 rhs
,
1153 kmp_int32
__kmpc_atomic_fixed4_div_cpt(ident_t
*id_ref
, int gtid
,
1154 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1155 kmp_uint32
__kmpc_atomic_fixed4u_div_cpt(ident_t
*id_ref
, int gtid
,
1156 kmp_uint32
*lhs
, kmp_uint32 rhs
,
1158 kmp_int32
__kmpc_atomic_fixed4_mul_cpt(ident_t
*id_ref
, int gtid
,
1159 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1160 kmp_int32
__kmpc_atomic_fixed4_orb_cpt(ident_t
*id_ref
, int gtid
,
1161 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1162 kmp_int32
__kmpc_atomic_fixed4_shl_cpt(ident_t
*id_ref
, int gtid
,
1163 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1164 kmp_int32
__kmpc_atomic_fixed4_shr_cpt(ident_t
*id_ref
, int gtid
,
1165 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1166 kmp_uint32
__kmpc_atomic_fixed4u_shr_cpt(ident_t
*id_ref
, int gtid
,
1167 kmp_uint32
*lhs
, kmp_uint32 rhs
,
1169 kmp_int32
__kmpc_atomic_fixed4_xor_cpt(ident_t
*id_ref
, int gtid
,
1170 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1172 kmp_int64
__kmpc_atomic_fixed8_andb_cpt(ident_t
*id_ref
, int gtid
,
1173 kmp_int64
*lhs
, kmp_int64 rhs
,
1175 kmp_int64
__kmpc_atomic_fixed8_div_cpt(ident_t
*id_ref
, int gtid
,
1176 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1177 kmp_uint64
__kmpc_atomic_fixed8u_div_cpt(ident_t
*id_ref
, int gtid
,
1178 kmp_uint64
*lhs
, kmp_uint64 rhs
,
1180 kmp_int64
__kmpc_atomic_fixed8_mul_cpt(ident_t
*id_ref
, int gtid
,
1181 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1182 kmp_int64
__kmpc_atomic_fixed8_orb_cpt(ident_t
*id_ref
, int gtid
,
1183 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1184 kmp_int64
__kmpc_atomic_fixed8_shl_cpt(ident_t
*id_ref
, int gtid
,
1185 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1186 kmp_int64
__kmpc_atomic_fixed8_shr_cpt(ident_t
*id_ref
, int gtid
,
1187 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1188 kmp_uint64
__kmpc_atomic_fixed8u_shr_cpt(ident_t
*id_ref
, int gtid
,
1189 kmp_uint64
*lhs
, kmp_uint64 rhs
,
1191 kmp_int64
__kmpc_atomic_fixed8_xor_cpt(ident_t
*id_ref
, int gtid
,
1192 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1194 kmp_real32
__kmpc_atomic_float4_div_cpt(ident_t
*id_ref
, int gtid
,
1195 kmp_real32
*lhs
, kmp_real32 rhs
,
1197 kmp_real32
__kmpc_atomic_float4_mul_cpt(ident_t
*id_ref
, int gtid
,
1198 kmp_real32
*lhs
, kmp_real32 rhs
,
1201 kmp_real64
__kmpc_atomic_float8_div_cpt(ident_t
*id_ref
, int gtid
,
1202 kmp_real64
*lhs
, kmp_real64 rhs
,
1204 kmp_real64
__kmpc_atomic_float8_mul_cpt(ident_t
*id_ref
, int gtid
,
1205 kmp_real64
*lhs
, kmp_real64 rhs
,
1207 // 1-, 2-, 4-, 8-byte logical (&&, ||)
1208 char __kmpc_atomic_fixed1_andl_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1209 char rhs
, int flag
);
1210 char __kmpc_atomic_fixed1_orl_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1211 char rhs
, int flag
);
1212 short __kmpc_atomic_fixed2_andl_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1213 short rhs
, int flag
);
1214 short __kmpc_atomic_fixed2_orl_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1215 short rhs
, int flag
);
1216 kmp_int32
__kmpc_atomic_fixed4_andl_cpt(ident_t
*id_ref
, int gtid
,
1217 kmp_int32
*lhs
, kmp_int32 rhs
,
1219 kmp_int32
__kmpc_atomic_fixed4_orl_cpt(ident_t
*id_ref
, int gtid
,
1220 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1221 kmp_int64
__kmpc_atomic_fixed8_andl_cpt(ident_t
*id_ref
, int gtid
,
1222 kmp_int64
*lhs
, kmp_int64 rhs
,
1224 kmp_int64
__kmpc_atomic_fixed8_orl_cpt(ident_t
*id_ref
, int gtid
,
1225 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1227 char __kmpc_atomic_fixed1_max_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1228 char rhs
, int flag
);
1229 char __kmpc_atomic_fixed1_min_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1230 char rhs
, int flag
);
1231 short __kmpc_atomic_fixed2_max_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1232 short rhs
, int flag
);
1233 short __kmpc_atomic_fixed2_min_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1234 short rhs
, int flag
);
1235 kmp_int32
__kmpc_atomic_fixed4_max_cpt(ident_t
*id_ref
, int gtid
,
1236 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1237 kmp_int32
__kmpc_atomic_fixed4_min_cpt(ident_t
*id_ref
, int gtid
,
1238 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1239 kmp_int64
__kmpc_atomic_fixed8_max_cpt(ident_t
*id_ref
, int gtid
,
1240 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1241 kmp_int64
__kmpc_atomic_fixed8_min_cpt(ident_t
*id_ref
, int gtid
,
1242 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1243 kmp_real32
__kmpc_atomic_float4_max_cpt(ident_t
*id_ref
, int gtid
,
1244 kmp_real32
*lhs
, kmp_real32 rhs
,
1246 kmp_real32
__kmpc_atomic_float4_min_cpt(ident_t
*id_ref
, int gtid
,
1247 kmp_real32
*lhs
, kmp_real32 rhs
,
1249 kmp_real64
__kmpc_atomic_float8_max_cpt(ident_t
*id_ref
, int gtid
,
1250 kmp_real64
*lhs
, kmp_real64 rhs
,
1252 kmp_real64
__kmpc_atomic_float8_min_cpt(ident_t
*id_ref
, int gtid
,
1253 kmp_real64
*lhs
, kmp_real64 rhs
,
1256 QUAD_LEGACY
__kmpc_atomic_float16_max_cpt(ident_t
*id_ref
, int gtid
,
1257 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1259 QUAD_LEGACY
__kmpc_atomic_float16_min_cpt(ident_t
*id_ref
, int gtid
,
1260 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1263 // .NEQV. (same as xor)
1264 char __kmpc_atomic_fixed1_neqv_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1265 char rhs
, int flag
);
1266 short __kmpc_atomic_fixed2_neqv_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1267 short rhs
, int flag
);
1268 kmp_int32
__kmpc_atomic_fixed4_neqv_cpt(ident_t
*id_ref
, int gtid
,
1269 kmp_int32
*lhs
, kmp_int32 rhs
,
1271 kmp_int64
__kmpc_atomic_fixed8_neqv_cpt(ident_t
*id_ref
, int gtid
,
1272 kmp_int64
*lhs
, kmp_int64 rhs
,
1274 // .EQV. (same as ~xor)
1275 char __kmpc_atomic_fixed1_eqv_cpt(ident_t
*id_ref
, int gtid
, char *lhs
,
1276 char rhs
, int flag
);
1277 short __kmpc_atomic_fixed2_eqv_cpt(ident_t
*id_ref
, int gtid
, short *lhs
,
1278 short rhs
, int flag
);
1279 kmp_int32
__kmpc_atomic_fixed4_eqv_cpt(ident_t
*id_ref
, int gtid
,
1280 kmp_int32
*lhs
, kmp_int32 rhs
, int flag
);
1281 kmp_int64
__kmpc_atomic_fixed8_eqv_cpt(ident_t
*id_ref
, int gtid
,
1282 kmp_int64
*lhs
, kmp_int64 rhs
, int flag
);
1284 long double __kmpc_atomic_float10_add_cpt(ident_t
*id_ref
, int gtid
,
1285 long double *lhs
, long double rhs
,
1287 long double __kmpc_atomic_float10_sub_cpt(ident_t
*id_ref
, int gtid
,
1288 long double *lhs
, long double rhs
,
1290 long double __kmpc_atomic_float10_mul_cpt(ident_t
*id_ref
, int gtid
,
1291 long double *lhs
, long double rhs
,
1293 long double __kmpc_atomic_float10_div_cpt(ident_t
*id_ref
, int gtid
,
1294 long double *lhs
, long double rhs
,
1298 QUAD_LEGACY
__kmpc_atomic_float16_add_cpt(ident_t
*id_ref
, int gtid
,
1299 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1301 QUAD_LEGACY
__kmpc_atomic_float16_sub_cpt(ident_t
*id_ref
, int gtid
,
1302 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1304 QUAD_LEGACY
__kmpc_atomic_float16_mul_cpt(ident_t
*id_ref
, int gtid
,
1305 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1307 QUAD_LEGACY
__kmpc_atomic_float16_div_cpt(ident_t
*id_ref
, int gtid
,
1308 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1311 // routines for complex types
1312 // Workaround for cmplx4 routines - return void; captured value is returned via
1314 void __kmpc_atomic_cmplx4_add_cpt(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1315 kmp_cmplx32 rhs
, kmp_cmplx32
*out
, int flag
);
1316 void __kmpc_atomic_cmplx4_sub_cpt(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1317 kmp_cmplx32 rhs
, kmp_cmplx32
*out
, int flag
);
1318 void __kmpc_atomic_cmplx4_mul_cpt(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1319 kmp_cmplx32 rhs
, kmp_cmplx32
*out
, int flag
);
1320 void __kmpc_atomic_cmplx4_div_cpt(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1321 kmp_cmplx32 rhs
, kmp_cmplx32
*out
, int flag
);
1323 kmp_cmplx64
__kmpc_atomic_cmplx8_add_cpt(ident_t
*id_ref
, int gtid
,
1324 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1326 kmp_cmplx64
__kmpc_atomic_cmplx8_sub_cpt(ident_t
*id_ref
, int gtid
,
1327 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1329 kmp_cmplx64
__kmpc_atomic_cmplx8_mul_cpt(ident_t
*id_ref
, int gtid
,
1330 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1332 kmp_cmplx64
__kmpc_atomic_cmplx8_div_cpt(ident_t
*id_ref
, int gtid
,
1333 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1335 kmp_cmplx80
__kmpc_atomic_cmplx10_add_cpt(ident_t
*id_ref
, int gtid
,
1336 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1338 kmp_cmplx80
__kmpc_atomic_cmplx10_sub_cpt(ident_t
*id_ref
, int gtid
,
1339 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1341 kmp_cmplx80
__kmpc_atomic_cmplx10_mul_cpt(ident_t
*id_ref
, int gtid
,
1342 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1344 kmp_cmplx80
__kmpc_atomic_cmplx10_div_cpt(ident_t
*id_ref
, int gtid
,
1345 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1348 CPLX128_LEG
__kmpc_atomic_cmplx16_add_cpt(ident_t
*id_ref
, int gtid
,
1349 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1351 CPLX128_LEG
__kmpc_atomic_cmplx16_sub_cpt(ident_t
*id_ref
, int gtid
,
1352 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1354 CPLX128_LEG
__kmpc_atomic_cmplx16_mul_cpt(ident_t
*id_ref
, int gtid
,
1355 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1357 CPLX128_LEG
__kmpc_atomic_cmplx16_div_cpt(ident_t
*id_ref
, int gtid
,
1358 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1361 // Routines with 16-byte arguments aligned to 16-byte boundary
1362 Quad_a16_t
__kmpc_atomic_float16_add_a16_cpt(ident_t
*id_ref
, int gtid
,
1363 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1365 Quad_a16_t
__kmpc_atomic_float16_sub_a16_cpt(ident_t
*id_ref
, int gtid
,
1366 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1368 Quad_a16_t
__kmpc_atomic_float16_mul_a16_cpt(ident_t
*id_ref
, int gtid
,
1369 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1371 Quad_a16_t
__kmpc_atomic_float16_div_a16_cpt(ident_t
*id_ref
, int gtid
,
1372 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1374 Quad_a16_t
__kmpc_atomic_float16_max_a16_cpt(ident_t
*id_ref
, int gtid
,
1375 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1377 Quad_a16_t
__kmpc_atomic_float16_min_a16_cpt(ident_t
*id_ref
, int gtid
,
1378 Quad_a16_t
*lhs
, Quad_a16_t rhs
,
1380 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_add_a16_cpt(ident_t
*id_ref
, int gtid
,
1381 kmp_cmplx128_a16_t
*lhs
,
1382 kmp_cmplx128_a16_t rhs
,
1384 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_sub_a16_cpt(ident_t
*id_ref
, int gtid
,
1385 kmp_cmplx128_a16_t
*lhs
,
1386 kmp_cmplx128_a16_t rhs
,
1388 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_mul_a16_cpt(ident_t
*id_ref
, int gtid
,
1389 kmp_cmplx128_a16_t
*lhs
,
1390 kmp_cmplx128_a16_t rhs
,
1392 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_div_a16_cpt(ident_t
*id_ref
, int gtid
,
1393 kmp_cmplx128_a16_t
*lhs
,
1394 kmp_cmplx128_a16_t rhs
,
1399 void __kmpc_atomic_start(void);
1400 void __kmpc_atomic_end(void);
1402 // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr
1403 // binop x; v = x; } for non-commutative operations.
1405 char __kmpc_atomic_fixed1_sub_cpt_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
1406 char rhs
, int flag
);
1407 char __kmpc_atomic_fixed1_div_cpt_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
1408 char rhs
, int flag
);
1409 unsigned char __kmpc_atomic_fixed1u_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1411 unsigned char rhs
, int flag
);
1412 char __kmpc_atomic_fixed1_shl_cpt_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
1413 char rhs
, int flag
);
1414 char __kmpc_atomic_fixed1_shr_cpt_rev(ident_t
*id_ref
, int gtid
, char *lhs
,
1415 char rhs
, int flag
);
1416 unsigned char __kmpc_atomic_fixed1u_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1418 unsigned char rhs
, int flag
);
1419 short __kmpc_atomic_fixed2_sub_cpt_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
1420 short rhs
, int flag
);
1421 short __kmpc_atomic_fixed2_div_cpt_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
1422 short rhs
, int flag
);
1423 unsigned short __kmpc_atomic_fixed2u_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1424 unsigned short *lhs
,
1425 unsigned short rhs
, int flag
);
1426 short __kmpc_atomic_fixed2_shl_cpt_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
1427 short rhs
, int flag
);
1428 short __kmpc_atomic_fixed2_shr_cpt_rev(ident_t
*id_ref
, int gtid
, short *lhs
,
1429 short rhs
, int flag
);
1430 unsigned short __kmpc_atomic_fixed2u_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1431 unsigned short *lhs
,
1432 unsigned short rhs
, int flag
);
1433 kmp_int32
__kmpc_atomic_fixed4_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1434 kmp_int32
*lhs
, kmp_int32 rhs
,
1436 kmp_int32
__kmpc_atomic_fixed4_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1437 kmp_int32
*lhs
, kmp_int32 rhs
,
1439 kmp_uint32
__kmpc_atomic_fixed4u_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1440 kmp_uint32
*lhs
, kmp_uint32 rhs
,
1442 kmp_int32
__kmpc_atomic_fixed4_shl_cpt_rev(ident_t
*id_ref
, int gtid
,
1443 kmp_int32
*lhs
, kmp_int32 rhs
,
1445 kmp_int32
__kmpc_atomic_fixed4_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1446 kmp_int32
*lhs
, kmp_int32 rhs
,
1448 kmp_uint32
__kmpc_atomic_fixed4u_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1449 kmp_uint32
*lhs
, kmp_uint32 rhs
,
1451 kmp_int64
__kmpc_atomic_fixed8_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1452 kmp_int64
*lhs
, kmp_int64 rhs
,
1454 kmp_int64
__kmpc_atomic_fixed8_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1455 kmp_int64
*lhs
, kmp_int64 rhs
,
1457 kmp_uint64
__kmpc_atomic_fixed8u_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1458 kmp_uint64
*lhs
, kmp_uint64 rhs
,
1460 kmp_int64
__kmpc_atomic_fixed8_shl_cpt_rev(ident_t
*id_ref
, int gtid
,
1461 kmp_int64
*lhs
, kmp_int64 rhs
,
1463 kmp_int64
__kmpc_atomic_fixed8_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1464 kmp_int64
*lhs
, kmp_int64 rhs
,
1466 kmp_uint64
__kmpc_atomic_fixed8u_shr_cpt_rev(ident_t
*id_ref
, int gtid
,
1467 kmp_uint64
*lhs
, kmp_uint64 rhs
,
1469 float __kmpc_atomic_float4_sub_cpt_rev(ident_t
*id_ref
, int gtid
, float *lhs
,
1470 float rhs
, int flag
);
1471 float __kmpc_atomic_float4_div_cpt_rev(ident_t
*id_ref
, int gtid
, float *lhs
,
1472 float rhs
, int flag
);
1473 double __kmpc_atomic_float8_sub_cpt_rev(ident_t
*id_ref
, int gtid
, double *lhs
,
1474 double rhs
, int flag
);
1475 double __kmpc_atomic_float8_div_cpt_rev(ident_t
*id_ref
, int gtid
, double *lhs
,
1476 double rhs
, int flag
);
1477 long double __kmpc_atomic_float10_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1478 long double *lhs
, long double rhs
,
1480 long double __kmpc_atomic_float10_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1481 long double *lhs
, long double rhs
,
1484 QUAD_LEGACY
__kmpc_atomic_float16_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1485 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1487 QUAD_LEGACY
__kmpc_atomic_float16_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1488 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
,
1491 // Workaround for cmplx4 routines - return void; captured value is returned via
1493 void __kmpc_atomic_cmplx4_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1494 kmp_cmplx32
*lhs
, kmp_cmplx32 rhs
,
1495 kmp_cmplx32
*out
, int flag
);
1496 void __kmpc_atomic_cmplx4_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1497 kmp_cmplx32
*lhs
, kmp_cmplx32 rhs
,
1498 kmp_cmplx32
*out
, int flag
);
1499 kmp_cmplx64
__kmpc_atomic_cmplx8_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1500 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1502 kmp_cmplx64
__kmpc_atomic_cmplx8_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1503 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
,
1505 kmp_cmplx80
__kmpc_atomic_cmplx10_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1506 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1508 kmp_cmplx80
__kmpc_atomic_cmplx10_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1509 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
,
1512 CPLX128_LEG
__kmpc_atomic_cmplx16_sub_cpt_rev(ident_t
*id_ref
, int gtid
,
1513 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1515 CPLX128_LEG
__kmpc_atomic_cmplx16_div_cpt_rev(ident_t
*id_ref
, int gtid
,
1516 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
,
1519 Quad_a16_t
__kmpc_atomic_float16_sub_a16_cpt_rev(ident_t
*id_ref
, int gtid
,
1521 Quad_a16_t rhs
, int flag
);
1522 Quad_a16_t
__kmpc_atomic_float16_div_a16_cpt_rev(ident_t
*id_ref
, int gtid
,
1524 Quad_a16_t rhs
, int flag
);
1526 __kmpc_atomic_cmplx16_sub_a16_cpt_rev(ident_t
*id_ref
, int gtid
,
1527 kmp_cmplx128_a16_t
*lhs
,
1528 kmp_cmplx128_a16_t rhs
, int flag
);
1530 __kmpc_atomic_cmplx16_div_a16_cpt_rev(ident_t
*id_ref
, int gtid
,
1531 kmp_cmplx128_a16_t
*lhs
,
1532 kmp_cmplx128_a16_t rhs
, int flag
);
1536 // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
1537 char __kmpc_atomic_fixed1_swp(ident_t
*id_ref
, int gtid
, char *lhs
, char rhs
);
1538 short __kmpc_atomic_fixed2_swp(ident_t
*id_ref
, int gtid
, short *lhs
,
1540 kmp_int32
__kmpc_atomic_fixed4_swp(ident_t
*id_ref
, int gtid
, kmp_int32
*lhs
,
1542 kmp_int64
__kmpc_atomic_fixed8_swp(ident_t
*id_ref
, int gtid
, kmp_int64
*lhs
,
1544 float __kmpc_atomic_float4_swp(ident_t
*id_ref
, int gtid
, float *lhs
,
1546 double __kmpc_atomic_float8_swp(ident_t
*id_ref
, int gtid
, double *lhs
,
1548 long double __kmpc_atomic_float10_swp(ident_t
*id_ref
, int gtid
,
1549 long double *lhs
, long double rhs
);
1551 QUAD_LEGACY
__kmpc_atomic_float16_swp(ident_t
*id_ref
, int gtid
,
1552 QUAD_LEGACY
*lhs
, QUAD_LEGACY rhs
);
1554 // !!! TODO: check if we need a workaround here
1555 void __kmpc_atomic_cmplx4_swp(ident_t
*id_ref
, int gtid
, kmp_cmplx32
*lhs
,
1556 kmp_cmplx32 rhs
, kmp_cmplx32
*out
);
1557 // kmp_cmplx32 __kmpc_atomic_cmplx4_swp( ident_t *id_ref, int gtid,
1558 // kmp_cmplx32 * lhs, kmp_cmplx32 rhs );
1560 kmp_cmplx64
__kmpc_atomic_cmplx8_swp(ident_t
*id_ref
, int gtid
,
1561 kmp_cmplx64
*lhs
, kmp_cmplx64 rhs
);
1562 kmp_cmplx80
__kmpc_atomic_cmplx10_swp(ident_t
*id_ref
, int gtid
,
1563 kmp_cmplx80
*lhs
, kmp_cmplx80 rhs
);
1565 CPLX128_LEG
__kmpc_atomic_cmplx16_swp(ident_t
*id_ref
, int gtid
,
1566 CPLX128_LEG
*lhs
, CPLX128_LEG rhs
);
1568 Quad_a16_t
__kmpc_atomic_float16_a16_swp(ident_t
*id_ref
, int gtid
,
1569 Quad_a16_t
*lhs
, Quad_a16_t rhs
);
1570 kmp_cmplx128_a16_t
__kmpc_atomic_cmplx16_a16_swp(ident_t
*id_ref
, int gtid
,
1571 kmp_cmplx128_a16_t
*lhs
,
1572 kmp_cmplx128_a16_t rhs
);
1576 // Capture routines for mixed types (RHS=float16)
1579 char __kmpc_atomic_fixed1_add_cpt_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1580 _Quad rhs
, int flag
);
1581 char __kmpc_atomic_fixed1_sub_cpt_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1582 _Quad rhs
, int flag
);
1583 char __kmpc_atomic_fixed1_mul_cpt_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1584 _Quad rhs
, int flag
);
1585 char __kmpc_atomic_fixed1_div_cpt_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1586 _Quad rhs
, int flag
);
1587 unsigned char __kmpc_atomic_fixed1u_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1588 unsigned char *lhs
, _Quad rhs
,
1590 unsigned char __kmpc_atomic_fixed1u_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1591 unsigned char *lhs
, _Quad rhs
,
1593 unsigned char __kmpc_atomic_fixed1u_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1594 unsigned char *lhs
, _Quad rhs
,
1596 unsigned char __kmpc_atomic_fixed1u_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1597 unsigned char *lhs
, _Quad rhs
,
1600 short __kmpc_atomic_fixed2_add_cpt_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1601 _Quad rhs
, int flag
);
1602 short __kmpc_atomic_fixed2_sub_cpt_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1603 _Quad rhs
, int flag
);
1604 short __kmpc_atomic_fixed2_mul_cpt_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1605 _Quad rhs
, int flag
);
1606 short __kmpc_atomic_fixed2_div_cpt_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1607 _Quad rhs
, int flag
);
1608 unsigned short __kmpc_atomic_fixed2u_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1609 unsigned short *lhs
, _Quad rhs
,
1611 unsigned short __kmpc_atomic_fixed2u_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1612 unsigned short *lhs
, _Quad rhs
,
1614 unsigned short __kmpc_atomic_fixed2u_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1615 unsigned short *lhs
, _Quad rhs
,
1617 unsigned short __kmpc_atomic_fixed2u_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1618 unsigned short *lhs
, _Quad rhs
,
1621 kmp_int32
__kmpc_atomic_fixed4_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1622 kmp_int32
*lhs
, _Quad rhs
, int flag
);
1623 kmp_int32
__kmpc_atomic_fixed4_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1624 kmp_int32
*lhs
, _Quad rhs
, int flag
);
1625 kmp_int32
__kmpc_atomic_fixed4_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1626 kmp_int32
*lhs
, _Quad rhs
, int flag
);
1627 kmp_int32
__kmpc_atomic_fixed4_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1628 kmp_int32
*lhs
, _Quad rhs
, int flag
);
1629 kmp_uint32
__kmpc_atomic_fixed4u_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1630 kmp_uint32
*lhs
, _Quad rhs
,
1632 kmp_uint32
__kmpc_atomic_fixed4u_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1633 kmp_uint32
*lhs
, _Quad rhs
,
1635 kmp_uint32
__kmpc_atomic_fixed4u_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1636 kmp_uint32
*lhs
, _Quad rhs
,
1638 kmp_uint32
__kmpc_atomic_fixed4u_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1639 kmp_uint32
*lhs
, _Quad rhs
,
1642 kmp_int64
__kmpc_atomic_fixed8_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1643 kmp_int64
*lhs
, _Quad rhs
, int flag
);
1644 kmp_int64
__kmpc_atomic_fixed8_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1645 kmp_int64
*lhs
, _Quad rhs
, int flag
);
1646 kmp_int64
__kmpc_atomic_fixed8_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1647 kmp_int64
*lhs
, _Quad rhs
, int flag
);
1648 kmp_int64
__kmpc_atomic_fixed8_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1649 kmp_int64
*lhs
, _Quad rhs
, int flag
);
1650 kmp_uint64
__kmpc_atomic_fixed8u_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1651 kmp_uint64
*lhs
, _Quad rhs
,
1653 kmp_uint64
__kmpc_atomic_fixed8u_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1654 kmp_uint64
*lhs
, _Quad rhs
,
1656 kmp_uint64
__kmpc_atomic_fixed8u_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1657 kmp_uint64
*lhs
, _Quad rhs
,
1659 kmp_uint64
__kmpc_atomic_fixed8u_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1660 kmp_uint64
*lhs
, _Quad rhs
,
1663 float __kmpc_atomic_float4_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1664 kmp_real32
*lhs
, _Quad rhs
, int flag
);
1665 float __kmpc_atomic_float4_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1666 kmp_real32
*lhs
, _Quad rhs
, int flag
);
1667 float __kmpc_atomic_float4_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1668 kmp_real32
*lhs
, _Quad rhs
, int flag
);
1669 float __kmpc_atomic_float4_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1670 kmp_real32
*lhs
, _Quad rhs
, int flag
);
1672 double __kmpc_atomic_float8_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1673 kmp_real64
*lhs
, _Quad rhs
, int flag
);
1674 double __kmpc_atomic_float8_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1675 kmp_real64
*lhs
, _Quad rhs
, int flag
);
1676 double __kmpc_atomic_float8_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1677 kmp_real64
*lhs
, _Quad rhs
, int flag
);
1678 double __kmpc_atomic_float8_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1679 kmp_real64
*lhs
, _Quad rhs
, int flag
);
1681 long double __kmpc_atomic_float10_add_cpt_fp(ident_t
*id_ref
, int gtid
,
1682 long double *lhs
, _Quad rhs
,
1684 long double __kmpc_atomic_float10_sub_cpt_fp(ident_t
*id_ref
, int gtid
,
1685 long double *lhs
, _Quad rhs
,
1687 long double __kmpc_atomic_float10_mul_cpt_fp(ident_t
*id_ref
, int gtid
,
1688 long double *lhs
, _Quad rhs
,
1690 long double __kmpc_atomic_float10_div_cpt_fp(ident_t
*id_ref
, int gtid
,
1691 long double *lhs
, _Quad rhs
,
1694 char __kmpc_atomic_fixed1_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1695 _Quad rhs
, int flag
);
1696 unsigned char __kmpc_atomic_fixed1u_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1698 _Quad rhs
, int flag
);
1699 char __kmpc_atomic_fixed1_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
, char *lhs
,
1700 _Quad rhs
, int flag
);
1701 unsigned char __kmpc_atomic_fixed1u_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1703 _Quad rhs
, int flag
);
1704 short __kmpc_atomic_fixed2_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1705 _Quad rhs
, int flag
);
1706 unsigned short __kmpc_atomic_fixed2u_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1707 unsigned short *lhs
,
1708 _Quad rhs
, int flag
);
1709 short __kmpc_atomic_fixed2_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
, short *lhs
,
1710 _Quad rhs
, int flag
);
1711 unsigned short __kmpc_atomic_fixed2u_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1712 unsigned short *lhs
,
1713 _Quad rhs
, int flag
);
1714 kmp_int32
__kmpc_atomic_fixed4_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1715 kmp_int32
*lhs
, _Quad rhs
,
1717 kmp_uint32
__kmpc_atomic_fixed4u_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1718 kmp_uint32
*lhs
, _Quad rhs
,
1720 kmp_int32
__kmpc_atomic_fixed4_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1721 kmp_int32
*lhs
, _Quad rhs
,
1723 kmp_uint32
__kmpc_atomic_fixed4u_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1724 kmp_uint32
*lhs
, _Quad rhs
,
1726 kmp_int64
__kmpc_atomic_fixed8_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1727 kmp_int64
*lhs
, _Quad rhs
,
1729 kmp_uint64
__kmpc_atomic_fixed8u_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1730 kmp_uint64
*lhs
, _Quad rhs
,
1732 kmp_int64
__kmpc_atomic_fixed8_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1733 kmp_int64
*lhs
, _Quad rhs
,
1735 kmp_uint64
__kmpc_atomic_fixed8u_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1736 kmp_uint64
*lhs
, _Quad rhs
,
1738 float __kmpc_atomic_float4_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
, float *lhs
,
1739 _Quad rhs
, int flag
);
1740 float __kmpc_atomic_float4_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
, float *lhs
,
1741 _Quad rhs
, int flag
);
1742 double __kmpc_atomic_float8_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1743 double *lhs
, _Quad rhs
, int flag
);
1744 double __kmpc_atomic_float8_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1745 double *lhs
, _Quad rhs
, int flag
);
1746 long double __kmpc_atomic_float10_sub_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1747 long double *lhs
, _Quad rhs
,
1749 long double __kmpc_atomic_float10_div_cpt_rev_fp(ident_t
*id_ref
, int gtid
,
1750 long double *lhs
, _Quad rhs
,
1753 #endif // KMP_HAVE_QUAD
1755 // End of OpenMP 4.0 capture
1757 #endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
1759 /* ------------------------------------------------------------------------ */
1765 #endif /* KMP_ATOMIC_H */