2 * kmp_os.h -- KPTS runtime header file.
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
16 #include "kmp_config.h"
22 #define KMP_FTN_PLAIN 1
23 #define KMP_FTN_APPEND 2
24 #define KMP_FTN_UPPER 3
26 #define KMP_FTN_PREPEND 4
27 #define KMP_FTN_UAPPEND 5
30 #define KMP_PTR_SKIP (sizeof(void *))
32 /* -------------------------- Compiler variations ------------------------ */
37 #define KMP_MEM_CONS_VOLATILE 0
38 #define KMP_MEM_CONS_FENCE 1
40 #ifndef KMP_MEM_CONS_MODEL
41 #define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
44 #ifndef __has_cpp_attribute
45 #define __has_cpp_attribute(x) 0
48 #ifndef __has_attribute
49 #define __has_attribute(x) 0
52 /* ------------------------- Compiler recognition ---------------------- */
53 #define KMP_COMPILER_ICC 0
54 #define KMP_COMPILER_GCC 0
55 #define KMP_COMPILER_CLANG 0
56 #define KMP_COMPILER_MSVC 0
57 #define KMP_COMPILER_ICX 0
59 #if __INTEL_CLANG_COMPILER
60 #undef KMP_COMPILER_ICX
61 #define KMP_COMPILER_ICX 1
62 #elif defined(__INTEL_COMPILER)
63 #undef KMP_COMPILER_ICC
64 #define KMP_COMPILER_ICC 1
65 #elif defined(__clang__)
66 #undef KMP_COMPILER_CLANG
67 #define KMP_COMPILER_CLANG 1
68 #elif defined(__GNUC__)
69 #undef KMP_COMPILER_GCC
70 #define KMP_COMPILER_GCC 1
71 #elif defined(_MSC_VER)
72 #undef KMP_COMPILER_MSVC
73 #define KMP_COMPILER_MSVC 1
75 #error Unknown compiler
78 #if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD)
79 #define KMP_AFFINITY_SUPPORTED 1
80 #if KMP_OS_WINDOWS && KMP_ARCH_X86_64
81 #define KMP_GROUP_AFFINITY 1
83 #define KMP_GROUP_AFFINITY 0
86 #define KMP_AFFINITY_SUPPORTED 0
87 #define KMP_GROUP_AFFINITY 0
90 #if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000))
91 #define KMP_HAVE_SCHED_GETCPU 1
93 #define KMP_HAVE_SCHED_GETCPU 0
96 /* Check for quad-precision extension. */
97 #define KMP_HAVE_QUAD 0
98 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
99 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX
100 /* _Quad is already defined for icc */
102 #define KMP_HAVE_QUAD 1
103 #elif KMP_COMPILER_CLANG
104 /* Clang doesn't support a software-implemented
105 128-bit extended precision type yet */
106 typedef long double _Quad
;
107 #elif KMP_COMPILER_GCC
108 /* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad */
110 typedef __float128 _Quad
;
112 #define KMP_HAVE_QUAD 1
114 #elif KMP_COMPILER_MSVC
115 typedef long double _Quad
;
118 #if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
119 typedef long double _Quad
;
121 #define KMP_HAVE_QUAD 1
123 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
125 #define KMP_USE_X87CONTROL 0
127 #define KMP_END_OF_LINE "\r\n"
128 typedef char kmp_int8
;
129 typedef unsigned char kmp_uint8
;
130 typedef short kmp_int16
;
131 typedef unsigned short kmp_uint16
;
132 typedef int kmp_int32
;
133 typedef unsigned int kmp_uint32
;
134 #define KMP_INT32_SPEC "d"
135 #define KMP_UINT32_SPEC "u"
137 typedef __int64 kmp_int64
;
138 typedef unsigned __int64 kmp_uint64
;
139 #define KMP_INT64_SPEC "I64d"
140 #define KMP_UINT64_SPEC "I64u"
142 struct kmp_struct64
{
145 typedef struct kmp_struct64 kmp_int64
;
146 typedef struct kmp_struct64 kmp_uint64
;
147 /* Not sure what to use for KMP_[U]INT64_SPEC here */
149 #if KMP_ARCH_X86 && KMP_MSVC_COMPAT
150 #undef KMP_USE_X87CONTROL
151 #define KMP_USE_X87CONTROL 1
153 #if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
155 typedef __int64 kmp_intptr_t
;
156 typedef unsigned __int64 kmp_uintptr_t
;
157 #define KMP_INTPTR_SPEC "I64d"
158 #define KMP_UINTPTR_SPEC "I64u"
160 #endif /* KMP_OS_WINDOWS */
163 #define KMP_END_OF_LINE "\n"
164 typedef char kmp_int8
;
165 typedef unsigned char kmp_uint8
;
166 typedef short kmp_int16
;
167 typedef unsigned short kmp_uint16
;
168 typedef int kmp_int32
;
169 typedef unsigned int kmp_uint32
;
170 typedef long long kmp_int64
;
171 typedef unsigned long long kmp_uint64
;
172 #define KMP_INT32_SPEC "d"
173 #define KMP_UINT32_SPEC "u"
174 #define KMP_INT64_SPEC "lld"
175 #define KMP_UINT64_SPEC "llu"
176 #endif /* KMP_OS_UNIX */
178 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
179 #define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
180 #elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
181 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || KMP_ARCH_VE
182 #define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
184 #error "Can't determine size_t printf format specifier."
187 #if KMP_ARCH_X86 || KMP_ARCH_ARM
188 #define KMP_SIZE_T_MAX (0xFFFFFFFF)
190 #define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
193 typedef size_t kmp_size_t
;
194 typedef float kmp_real32
;
195 typedef double kmp_real64
;
199 typedef long kmp_intptr_t
;
200 typedef unsigned long kmp_uintptr_t
;
201 #define KMP_INTPTR_SPEC "ld"
202 #define KMP_UINTPTR_SPEC "lu"
206 typedef kmp_int64 kmp_int
;
207 typedef kmp_uint64 kmp_uint
;
209 typedef kmp_int32 kmp_int
;
210 typedef kmp_uint32 kmp_uint
;
211 #endif /* BUILD_I8 */
212 #define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
213 #define KMP_INT_MIN ((kmp_int32)0x80000000)
216 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \
217 (KMP_OS_FREEBSD || KMP_OS_LINUX)
218 typedef va_list *kmp_va_list
;
219 #define kmp_va_deref(ap) (*(ap))
220 #define kmp_va_addr_of(ap) (&(ap))
222 typedef va_list kmp_va_list
;
223 #define kmp_va_deref(ap) (ap)
224 #define kmp_va_addr_of(ap) (ap)
228 // macros to cast out qualifiers and to re-interpret types
229 #define CCAST(type, var) const_cast<type>(var)
230 #define RCAST(type, var) reinterpret_cast<type>(var)
231 //-------------------------------------------------------------------------
232 // template for debug prints specification ( d, u, lld, llu ), and to obtain
233 // signed/unsigned flavors of a type
234 template <typename T
> struct traits_t
{};
236 template <> struct traits_t
<signed int> {
237 typedef signed int signed_t
;
238 typedef unsigned int unsigned_t
;
239 typedef double floating_t
;
240 static char const *spec
;
241 static const signed_t max_value
= 0x7fffffff;
242 static const signed_t min_value
= 0x80000000;
243 static const int type_size
= sizeof(signed_t
);
246 template <> struct traits_t
<unsigned int> {
247 typedef signed int signed_t
;
248 typedef unsigned int unsigned_t
;
249 typedef double floating_t
;
250 static char const *spec
;
251 static const unsigned_t max_value
= 0xffffffff;
252 static const unsigned_t min_value
= 0x00000000;
253 static const int type_size
= sizeof(unsigned_t
);
256 template <> struct traits_t
<signed long> {
257 typedef signed long signed_t
;
258 typedef unsigned long unsigned_t
;
259 typedef long double floating_t
;
260 static char const *spec
;
261 static const int type_size
= sizeof(signed_t
);
264 template <> struct traits_t
<signed long long> {
265 typedef signed long long signed_t
;
266 typedef unsigned long long unsigned_t
;
267 typedef long double floating_t
;
268 static char const *spec
;
269 static const signed_t max_value
= 0x7fffffffffffffffLL
;
270 static const signed_t min_value
= 0x8000000000000000LL
;
271 static const int type_size
= sizeof(signed_t
);
273 // unsigned long long
274 template <> struct traits_t
<unsigned long long> {
275 typedef signed long long signed_t
;
276 typedef unsigned long long unsigned_t
;
277 typedef long double floating_t
;
278 static char const *spec
;
279 static const unsigned_t max_value
= 0xffffffffffffffffLL
;
280 static const unsigned_t min_value
= 0x0000000000000000LL
;
281 static const int type_size
= sizeof(unsigned_t
);
283 //-------------------------------------------------------------------------
285 #define CCAST(type, var) (type)(var)
286 #define RCAST(type, var) (type)(var)
287 #endif // __cplusplus
289 #define KMP_EXPORT extern /* export declaration in guide libraries */
291 #if __GNUC__ >= 4 && !defined(__MINGW32__)
292 #define __forceinline __inline
295 /* Check if the OS/arch can support user-level mwait */
296 // All mwait code tests for UMWAIT first, so it should only fall back to ring3
298 #define KMP_HAVE_MWAIT \
299 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
301 #define KMP_HAVE_UMWAIT \
302 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
308 static inline int KMP_GET_PAGE_SIZE(void) {
311 return si
.dwPageSize
;
314 #define KMP_GET_PAGE_SIZE() getpagesize()
317 #define PAGE_ALIGNED(_addr) \
318 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
319 #define ALIGN_TO_PAGE(x) \
320 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
322 /* ---------- Support for cache alignment, padding, etc. ----------------*/
326 #endif // __cplusplus
328 #define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */
330 /* Define the default size of the cache line */
332 #define CACHE_LINE 128 /* cache line size in bytes */
334 #if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
335 // 2006-02-13: This produces too many warnings on OS X*. Disable for now
336 #warning CACHE_LINE is too small.
338 #endif /* CACHE_LINE */
340 #define KMP_CACHE_PREFETCH(ADDR) /* nothing */
342 // Define attribute that indicates that the fall through from the previous
343 // case label is intentional and should not be diagnosed by a compiler
344 // Code from libcxx/include/__config
345 // Use a function like macro to imply that it must be followed by a semicolon
346 #if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
347 #define KMP_FALLTHROUGH() [[fallthrough]]
348 // icc cannot properly tell this attribute is absent so force off
349 #elif KMP_COMPILER_ICC
350 #define KMP_FALLTHROUGH() ((void)0)
351 #elif __has_cpp_attribute(clang::fallthrough)
352 #define KMP_FALLTHROUGH() [[clang::fallthrough]]
353 #elif __has_attribute(fallthrough) || __GNUC__ >= 7
354 #define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
356 #define KMP_FALLTHROUGH() ((void)0)
359 #if KMP_HAVE_ATTRIBUTE_WAITPKG
360 #define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg")))
362 #define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */
365 #if KMP_HAVE_ATTRIBUTE_RTM
366 #define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm")))
368 #define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */
371 // Define attribute that indicates a function does not return
372 #if __cplusplus >= 201103L
373 #define KMP_NORETURN [[noreturn]]
375 #define KMP_NORETURN __declspec(noreturn)
377 #define KMP_NORETURN __attribute__((noreturn))
380 #if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
381 #define KMP_ALIGN(bytes) __declspec(align(bytes))
382 #define KMP_THREAD_LOCAL __declspec(thread)
383 #define KMP_ALIAS /* Nothing */
385 #define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
386 #define KMP_THREAD_LOCAL __thread
387 #define KMP_ALIAS(alias_of) __attribute__((alias(alias_of)))
390 #if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
391 #define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak))
393 #define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */
396 #if KMP_HAVE_WEAK_ATTRIBUTE
397 #define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak))
399 #define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */
402 // Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
404 #define KMP_STR(x) _KMP_STR(x)
405 #define _KMP_STR(x) #x
408 #ifdef KMP_USE_VERSION_SYMBOLS
409 // If using versioned symbols, KMP_EXPAND_NAME prepends
410 // __kmp_api_ to the real API name
411 #define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name)
412 #define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name
413 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \
414 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION")
415 #define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \
416 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \
417 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \
419 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \
420 api_name) "@" ver_str "\n\t"); \
421 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \
422 api_name) "@@" default_ver "\n\t")
424 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \
425 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION")
426 #define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \
428 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \
429 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \
430 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \
431 apic_name) "@@" default_ver "\n\t"); \
433 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \
434 api_name) "@" ver_str "\n\t")
436 #else // KMP_USE_VERSION_SYMBOLS
437 #define KMP_EXPAND_NAME(api_name) api_name
438 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */
439 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \
440 ver_str) /* Nothing */
441 #endif // KMP_USE_VERSION_SYMBOLS
443 /* Temporary note: if performance testing of this passes, we can remove
444 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
445 #define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes)
446 #define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE)
447 #define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE)
449 /* General purpose fence types for memory operations */
450 enum kmp_mem_fence_type
{
451 kmp_no_fence
, /* No memory fence */
452 kmp_acquire_fence
, /* Acquire (read) memory fence */
453 kmp_release_fence
, /* Release (write) memory fence */
454 kmp_full_fence
/* Full (read+write) memory fence */
457 // Synchronization primitives
459 #if KMP_ASM_INTRINS && KMP_OS_WINDOWS && !((KMP_ARCH_AARCH64 || KMP_ARCH_ARM) && (KMP_COMPILER_CLANG || KMP_COMPILER_GCC))
461 #if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
462 #pragma intrinsic(InterlockedExchangeAdd)
463 #pragma intrinsic(InterlockedCompareExchange)
464 #pragma intrinsic(InterlockedExchange)
465 #if !(KMP_COMPILER_ICX && KMP_32_BIT_ARCH)
466 #pragma intrinsic(InterlockedExchange64)
470 // Using InterlockedIncrement / InterlockedDecrement causes a library loading
471 // ordering problem, so we use InterlockedExchangeAdd instead.
472 #define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
473 #define KMP_TEST_THEN_INC_ACQ32(p) \
474 InterlockedExchangeAdd((volatile long *)(p), 1)
475 #define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
476 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
477 InterlockedExchangeAdd((volatile long *)(p), 4)
478 #define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
479 #define KMP_TEST_THEN_DEC_ACQ32(p) \
480 InterlockedExchangeAdd((volatile long *)(p), -1)
481 #define KMP_TEST_THEN_ADD32(p, v) \
482 InterlockedExchangeAdd((volatile long *)(p), (v))
484 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
485 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
487 #define KMP_XCHG_FIXED32(p, v) \
488 InterlockedExchange((volatile long *)(p), (long)(v))
489 #define KMP_XCHG_FIXED64(p, v) \
490 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
492 inline kmp_real32
KMP_XCHG_REAL32(volatile kmp_real32
*p
, kmp_real32 v
) {
493 kmp_int32 tmp
= InterlockedExchange((volatile long *)p
, *(long *)&v
);
494 return *(kmp_real32
*)&tmp
;
497 #define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
498 #define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
499 #define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
500 #define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
501 #define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
502 #define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
504 extern kmp_int8
__kmp_test_then_or8(volatile kmp_int8
*p
, kmp_int8 v
);
505 extern kmp_int8
__kmp_test_then_and8(volatile kmp_int8
*p
, kmp_int8 v
);
506 extern kmp_int32
__kmp_test_then_add32(volatile kmp_int32
*p
, kmp_int32 v
);
507 extern kmp_uint32
__kmp_test_then_or32(volatile kmp_uint32
*p
, kmp_uint32 v
);
508 extern kmp_uint32
__kmp_test_then_and32(volatile kmp_uint32
*p
, kmp_uint32 v
);
509 extern kmp_int64
__kmp_test_then_add64(volatile kmp_int64
*p
, kmp_int64 v
);
510 extern kmp_uint64
__kmp_test_then_or64(volatile kmp_uint64
*p
, kmp_uint64 v
);
511 extern kmp_uint64
__kmp_test_then_and64(volatile kmp_uint64
*p
, kmp_uint64 v
);
513 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
514 #define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL)
515 #define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL)
516 #define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL)
517 // #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
518 // #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
519 // #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
520 // #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
521 #define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v))
523 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
524 __kmp_compare_and_store_acq8((p), (cv), (sv))
525 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
526 __kmp_compare_and_store_rel8((p), (cv), (sv))
527 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
528 __kmp_compare_and_store_acq16((p), (cv), (sv))
530 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
531 __kmp_compare_and_store_rel16((p), (cv), (sv))
533 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
534 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
536 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
537 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
539 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
540 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
542 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
543 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
545 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
546 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv))
548 // KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange
549 // _InterlockedCompareExchange expects this order: pointer, exchange, compare
550 // KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
551 // write is successful if the return value of _InterlockedCompareExchange is the
552 // same as the compare value.
553 inline kmp_int8
__kmp_compare_and_store_acq8(volatile kmp_int8
*p
, kmp_int8 cv
,
555 return _InterlockedCompareExchange8_acq(p
, sv
, cv
) == cv
;
558 inline kmp_int8
__kmp_compare_and_store_rel8(volatile kmp_int8
*p
, kmp_int8 cv
,
560 return _InterlockedCompareExchange8_rel(p
, sv
, cv
) == cv
;
563 inline kmp_int16
__kmp_compare_and_store_acq16(volatile kmp_int16
*p
,
564 kmp_int16 cv
, kmp_int16 sv
) {
565 return _InterlockedCompareExchange16_acq(p
, sv
, cv
) == cv
;
568 inline kmp_int16
__kmp_compare_and_store_rel16(volatile kmp_int16
*p
,
569 kmp_int16 cv
, kmp_int16 sv
) {
570 return _InterlockedCompareExchange16_rel(p
, sv
, cv
) == cv
;
573 inline kmp_int32
__kmp_compare_and_store_acq32(volatile kmp_int32
*p
,
574 kmp_int32 cv
, kmp_int32 sv
) {
575 return _InterlockedCompareExchange_acq((volatile long *)p
, sv
, cv
) == cv
;
578 inline kmp_int32
__kmp_compare_and_store_rel32(volatile kmp_int32
*p
,
579 kmp_int32 cv
, kmp_int32 sv
) {
580 return _InterlockedCompareExchange_rel((volatile long *)p
, sv
, cv
) == cv
;
583 inline kmp_int32
__kmp_compare_and_store_acq64(volatile kmp_int64
*p
,
584 kmp_int64 cv
, kmp_int64 sv
) {
585 return _InterlockedCompareExchange64_acq(p
, sv
, cv
) == cv
;
588 inline kmp_int32
__kmp_compare_and_store_rel64(volatile kmp_int64
*p
,
589 kmp_int64 cv
, kmp_int64 sv
) {
590 return _InterlockedCompareExchange64_rel(p
, sv
, cv
) == cv
;
593 inline kmp_int32
__kmp_compare_and_store_ptr(void *volatile *p
, void *cv
,
595 return _InterlockedCompareExchangePointer(p
, sv
, cv
) == cv
;
598 // The _RET versions return the value instead of a bool
600 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
601 _InterlockedCompareExchange8((p), (sv), (cv))
602 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
603 _InterlockedCompareExchange16((p), (sv), (cv))
605 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
606 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \
610 #define KMP_XCHG_FIXED8(p, v) \
611 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
612 #define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
613 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
615 inline kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
) {
616 kmp_int64 tmp
= _InterlockedExchange64((volatile kmp_int64
*)p
, *(kmp_int64
617 *)&v
); return *(kmp_real64
*)&tmp
;
620 #else // !KMP_ARCH_AARCH64
622 // Routines that we still need to implement in assembly.
623 extern kmp_int8
__kmp_test_then_add8(volatile kmp_int8
*p
, kmp_int8 v
);
625 extern kmp_int8
__kmp_compare_and_store8(volatile kmp_int8
*p
, kmp_int8 cv
,
627 extern kmp_int16
__kmp_compare_and_store16(volatile kmp_int16
*p
, kmp_int16 cv
,
629 extern kmp_int32
__kmp_compare_and_store32(volatile kmp_int32
*p
, kmp_int32 cv
,
631 extern kmp_int32
__kmp_compare_and_store64(volatile kmp_int64
*p
, kmp_int64 cv
,
633 extern kmp_int8
__kmp_compare_and_store_ret8(volatile kmp_int8
*p
, kmp_int8 cv
,
635 extern kmp_int16
__kmp_compare_and_store_ret16(volatile kmp_int16
*p
,
636 kmp_int16 cv
, kmp_int16 sv
);
637 extern kmp_int32
__kmp_compare_and_store_ret32(volatile kmp_int32
*p
,
638 kmp_int32 cv
, kmp_int32 sv
);
639 extern kmp_int64
__kmp_compare_and_store_ret64(volatile kmp_int64
*p
,
640 kmp_int64 cv
, kmp_int64 sv
);
642 extern kmp_int8
__kmp_xchg_fixed8(volatile kmp_int8
*p
, kmp_int8 v
);
643 extern kmp_int16
__kmp_xchg_fixed16(volatile kmp_int16
*p
, kmp_int16 v
);
644 extern kmp_int32
__kmp_xchg_fixed32(volatile kmp_int32
*p
, kmp_int32 v
);
645 extern kmp_int64
__kmp_xchg_fixed64(volatile kmp_int64
*p
, kmp_int64 v
);
646 extern kmp_real32
__kmp_xchg_real32(volatile kmp_real32
*p
, kmp_real32 v
);
647 extern kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
);
649 //#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
650 //#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
651 #define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
652 #define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
653 //#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
654 //#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
655 #define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
656 #define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
657 //#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
658 //#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
659 #define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
660 #define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
661 //#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
662 #define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
663 #define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
666 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
667 __kmp_compare_and_store8((p), (cv), (sv))
668 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
669 __kmp_compare_and_store8((p), (cv), (sv))
670 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
671 __kmp_compare_and_store16((p), (cv), (sv))
672 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
673 __kmp_compare_and_store16((p), (cv), (sv))
674 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
675 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
677 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
678 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
680 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
681 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
683 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
684 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
688 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
689 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
691 #else /* 64 bit pointers */
692 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
693 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
695 #endif /* KMP_ARCH_X86 */
697 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
698 __kmp_compare_and_store_ret8((p), (cv), (sv))
699 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
700 __kmp_compare_and_store_ret16((p), (cv), (sv))
701 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
702 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
705 #define KMP_XCHG_FIXED8(p, v) \
706 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
707 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
708 //#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
709 //#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
710 //#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
711 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
714 #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
716 /* cast p to correct type so that proper intrinsic will be used */
717 #define KMP_TEST_THEN_INC32(p) \
718 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
719 #define KMP_TEST_THEN_INC_ACQ32(p) \
720 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
722 #define KMP_TEST_THEN_INC64(p) \
723 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
724 #define KMP_TEST_THEN_INC_ACQ64(p) \
725 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
727 #define KMP_TEST_THEN_INC64(p) \
728 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
729 #define KMP_TEST_THEN_INC_ACQ64(p) \
730 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
732 #define KMP_TEST_THEN_ADD4_32(p) \
733 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
734 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
735 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
737 #define KMP_TEST_THEN_ADD4_64(p) \
738 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
739 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
740 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
741 #define KMP_TEST_THEN_DEC64(p) \
742 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
743 #define KMP_TEST_THEN_DEC_ACQ64(p) \
744 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
746 #define KMP_TEST_THEN_ADD4_64(p) \
747 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
748 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
749 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
750 #define KMP_TEST_THEN_DEC64(p) \
751 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
752 #define KMP_TEST_THEN_DEC_ACQ64(p) \
753 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
755 #define KMP_TEST_THEN_DEC32(p) \
756 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
757 #define KMP_TEST_THEN_DEC_ACQ32(p) \
758 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
759 #define KMP_TEST_THEN_ADD8(p, v) \
760 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
761 #define KMP_TEST_THEN_ADD32(p, v) \
762 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
764 #define KMP_TEST_THEN_ADD64(p, v) \
765 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
768 #define KMP_TEST_THEN_ADD64(p, v) \
769 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
772 #define KMP_TEST_THEN_OR8(p, v) \
773 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
774 #define KMP_TEST_THEN_AND8(p, v) \
775 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v))
776 #define KMP_TEST_THEN_OR32(p, v) \
777 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
778 #define KMP_TEST_THEN_AND32(p, v) \
779 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
781 #define KMP_TEST_THEN_OR64(p, v) \
782 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
784 #define KMP_TEST_THEN_AND64(p, v) \
785 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
788 #define KMP_TEST_THEN_OR64(p, v) \
789 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
790 #define KMP_TEST_THEN_AND64(p, v) \
791 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
794 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
795 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
797 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
798 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
800 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
801 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
803 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
804 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
806 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
807 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
809 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
810 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
812 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
813 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
816 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
817 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
819 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
820 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
822 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
823 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
826 static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64
*p
,
829 return __atomic_compare_exchange(p
, &cv
, &sv
, false, __ATOMIC_SEQ_CST
,
832 static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64
*p
,
835 __atomic_compare_exchange(p
, &cv
, &sv
, false, __ATOMIC_SEQ_CST
,
839 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
840 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
841 (kmp_uint64)(cv), (kmp_uint64)(sv))
842 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
843 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
844 (kmp_uint64)(cv), (kmp_uint64)(sv))
845 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
846 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
849 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
850 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
852 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
853 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
855 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
856 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
860 #if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800
861 #define KMP_XCHG_FIXED8(p, v) \
862 __atomic_exchange_1((volatile kmp_uint8 *)(p), (kmp_uint8)(v), \
865 #define KMP_XCHG_FIXED8(p, v) \
866 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
868 #define KMP_XCHG_FIXED16(p, v) \
869 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
870 #define KMP_XCHG_FIXED32(p, v) \
871 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
872 #define KMP_XCHG_FIXED64(p, v) \
873 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
875 inline kmp_real32
KMP_XCHG_REAL32(volatile kmp_real32
*p
, kmp_real32 v
) {
876 volatile kmp_uint32
*up
;
878 memcpy(&up
, &p
, sizeof(up
));
879 memcpy(&uv
, &v
, sizeof(uv
));
880 kmp_int32 tmp
= __sync_lock_test_and_set(up
, uv
);
882 memcpy(&ftmp
, &tmp
, sizeof(tmp
));
886 inline kmp_real64
KMP_XCHG_REAL64(volatile kmp_real64
*p
, kmp_real64 v
) {
887 volatile kmp_uint64
*up
;
889 memcpy(&up
, &p
, sizeof(up
));
890 memcpy(&uv
, &v
, sizeof(uv
));
891 kmp_int64 tmp
= __sync_lock_test_and_set(up
, uv
);
893 memcpy(&dtmp
, &tmp
, sizeof(tmp
));
899 extern kmp_int8
__kmp_test_then_add8(volatile kmp_int8
*p
, kmp_int8 v
);
900 extern kmp_int8
__kmp_test_then_or8(volatile kmp_int8
*p
, kmp_int8 v
);
901 extern kmp_int8
__kmp_test_then_and8(volatile kmp_int8
*p
, kmp_int8 v
);
902 extern kmp_int32
__kmp_test_then_add32(volatile kmp_int32
*p
, kmp_int32 v
);
903 extern kmp_uint32
__kmp_test_then_or32(volatile kmp_uint32
*p
, kmp_uint32 v
);
904 extern kmp_uint32
__kmp_test_then_and32(volatile kmp_uint32
*p
, kmp_uint32 v
);
905 extern kmp_int64
__kmp_test_then_add64(volatile kmp_int64
*p
, kmp_int64 v
);
906 extern kmp_uint64
__kmp_test_then_or64(volatile kmp_uint64
*p
, kmp_uint64 v
);
907 extern kmp_uint64
__kmp_test_then_and64(volatile kmp_uint64
*p
, kmp_uint64 v
);
909 extern kmp_int8
__kmp_compare_and_store8(volatile kmp_int8
*p
, kmp_int8 cv
,
911 extern kmp_int16
__kmp_compare_and_store16(volatile kmp_int16
*p
, kmp_int16 cv
,
913 extern kmp_int32
__kmp_compare_and_store32(volatile kmp_int32
*p
, kmp_int32 cv
,
915 extern kmp_int32
__kmp_compare_and_store64(volatile kmp_int64
*p
, kmp_int64 cv
,
917 extern kmp_int8
__kmp_compare_and_store_ret8(volatile kmp_int8
*p
, kmp_int8 cv
,
919 extern kmp_int16
__kmp_compare_and_store_ret16(volatile kmp_int16
*p
,
920 kmp_int16 cv
, kmp_int16 sv
);
921 extern kmp_int32
__kmp_compare_and_store_ret32(volatile kmp_int32
*p
,
922 kmp_int32 cv
, kmp_int32 sv
);
923 extern kmp_int64
__kmp_compare_and_store_ret64(volatile kmp_int64
*p
,
924 kmp_int64 cv
, kmp_int64 sv
);
926 extern kmp_int8
__kmp_xchg_fixed8(volatile kmp_int8
*p
, kmp_int8 v
);
927 extern kmp_int16
__kmp_xchg_fixed16(volatile kmp_int16
*p
, kmp_int16 v
);
928 extern kmp_int32
__kmp_xchg_fixed32(volatile kmp_int32
*p
, kmp_int32 v
);
929 extern kmp_int64
__kmp_xchg_fixed64(volatile kmp_int64
*p
, kmp_int64 v
);
930 extern kmp_real32
__kmp_xchg_real32(volatile kmp_real32
*p
, kmp_real32 v
);
931 extern kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
);
933 #define KMP_TEST_THEN_INC32(p) \
934 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
935 #define KMP_TEST_THEN_INC_ACQ32(p) \
936 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
937 #define KMP_TEST_THEN_INC64(p) \
938 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
939 #define KMP_TEST_THEN_INC_ACQ64(p) \
940 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
941 #define KMP_TEST_THEN_ADD4_32(p) \
942 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
943 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
944 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
945 #define KMP_TEST_THEN_ADD4_64(p) \
946 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
947 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
948 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
949 #define KMP_TEST_THEN_DEC32(p) \
950 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
951 #define KMP_TEST_THEN_DEC_ACQ32(p) \
952 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
953 #define KMP_TEST_THEN_DEC64(p) \
954 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
955 #define KMP_TEST_THEN_DEC_ACQ64(p) \
956 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
957 #define KMP_TEST_THEN_ADD8(p, v) \
958 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v))
959 #define KMP_TEST_THEN_ADD32(p, v) \
960 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v))
961 #define KMP_TEST_THEN_ADD64(p, v) \
962 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v))
964 #define KMP_TEST_THEN_OR8(p, v) \
965 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v))
966 #define KMP_TEST_THEN_AND8(p, v) \
967 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v))
968 #define KMP_TEST_THEN_OR32(p, v) \
969 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
970 #define KMP_TEST_THEN_AND32(p, v) \
971 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
972 #define KMP_TEST_THEN_OR64(p, v) \
973 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
974 #define KMP_TEST_THEN_AND64(p, v) \
975 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
977 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
978 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
980 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
981 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
983 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
984 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
986 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
987 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
989 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
990 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
992 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
993 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
995 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
996 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
998 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
999 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1003 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1004 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1006 #else /* 64 bit pointers */
1007 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1008 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1010 #endif /* KMP_ARCH_X86 */
1012 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
1013 __kmp_compare_and_store_ret8((p), (cv), (sv))
1014 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
1015 __kmp_compare_and_store_ret16((p), (cv), (sv))
1016 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
1017 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1019 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
1020 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1023 #define KMP_XCHG_FIXED8(p, v) \
1024 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
1025 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
1026 #define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
1027 #define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
1028 #define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
1029 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
1031 #endif /* KMP_ASM_INTRINS */
1033 /* ------------- relaxed consistency memory model stuff ------------------ */
1037 #define KMP_MB() asm("nop")
1038 #define KMP_IMB() asm("nop")
1040 #define KMP_MB() /* _asm{ nop } */
1041 #define KMP_IMB() /* _asm{ nop } */
1043 #endif /* KMP_OS_WINDOWS */
1045 #if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
1046 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || KMP_ARCH_VE
1049 #define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
1050 #else /* !KMP_OS_WINDOWS */
1051 #define KMP_MB() __sync_synchronize()
1056 #define KMP_MB() /* nothing to do */
1059 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1061 // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
1062 // We shouldn't need it, though, since the ABI rules require that
1063 // * If the compiler generates NGO stores it also generates the fence
1064 // * If users hand-code NGO stores they should insert the fence
1065 // therefore no incomplete unordered stores should be visible.
1066 #define KMP_MFENCE() /* Nothing */
1067 #define KMP_SFENCE() /* Nothing */
1069 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1070 #define KMP_MFENCE_() _mm_mfence()
1071 #define KMP_SFENCE_() _mm_sfence()
1072 #elif KMP_COMPILER_MSVC
1073 #define KMP_MFENCE_() MemoryBarrier()
1074 #define KMP_SFENCE_() MemoryBarrier()
1076 #define KMP_MFENCE_() __sync_synchronize()
1077 #define KMP_SFENCE_() __sync_synchronize()
1079 #define KMP_MFENCE() \
1080 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \
1081 __kmp_query_cpuid(&__kmp_cpuinfo); \
1083 if (__kmp_cpuinfo.flags.sse2) { \
1086 #define KMP_SFENCE() KMP_SFENCE_()
1089 #define KMP_MFENCE() KMP_MB()
1090 #define KMP_SFENCE() KMP_MB()
1094 #define KMP_IMB() /* nothing to do */
1097 #ifndef KMP_ST_REL32
1098 #define KMP_ST_REL32(A, D) (*(A) = (D))
1101 #ifndef KMP_ST_REL64
1102 #define KMP_ST_REL64(A, D) (*(A) = (D))
1105 #ifndef KMP_LD_ACQ32
1106 #define KMP_LD_ACQ32(A) (*(A))
1109 #ifndef KMP_LD_ACQ64
1110 #define KMP_LD_ACQ64(A) (*(A))
1113 /* ------------------------------------------------------------------------ */
1114 // FIXME - maybe this should this be
1116 // #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
1117 // #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b))
1119 // #define TCR_8(a) (*(volatile kmp_int64 *)(a))
1120 // #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b))
1122 // I'm fairly certain this is the correct thing to do, but I'm afraid
1123 // of performance regressions.
1125 #define TCR_1(a) (a)
1126 #define TCW_1(a, b) (a) = (b)
1127 #define TCR_4(a) (a)
1128 #define TCW_4(a, b) (a) = (b)
1129 #define TCI_4(a) (++(a))
1130 #define TCD_4(a) (--(a))
1131 #define TCR_8(a) (a)
1132 #define TCW_8(a, b) (a) = (b)
1133 #define TCI_8(a) (++(a))
1134 #define TCD_8(a) (--(a))
1135 #define TCR_SYNC_4(a) (a)
1136 #define TCW_SYNC_4(a, b) (a) = (b)
1137 #define TCX_SYNC_4(a, b, c) \
1138 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
1139 (kmp_int32)(b), (kmp_int32)(c))
1140 #define TCR_SYNC_8(a) (a)
1141 #define TCW_SYNC_8(a, b) (a) = (b)
1142 #define TCX_SYNC_8(a, b, c) \
1143 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
1144 (kmp_int64)(b), (kmp_int64)(c))
1146 #if KMP_ARCH_X86 || KMP_ARCH_MIPS
1148 #define TCR_PTR(a) ((void *)TCR_4(a))
1149 #define TCW_PTR(a, b) TCW_4((a), (b))
1150 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
1151 #define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
1152 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
1154 #else /* 64 bit pointers */
1156 #define TCR_PTR(a) ((void *)TCR_8(a))
1157 #define TCW_PTR(a, b) TCW_8((a), (b))
1158 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
1159 #define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
1160 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
1162 #endif /* KMP_ARCH_X86 */
1164 /* If these FTN_{TRUE,FALSE} values change, may need to change several places
1165 where they are used to check that language is Fortran, not C. */
1168 #define FTN_TRUE TRUE
1172 #define FTN_FALSE FALSE
1175 typedef void (*microtask_t
)(int *gtid
, int *npr
, ...);
1177 #ifdef USE_VOLATILE_CAST
1178 #define VOLATILE_CAST(x) (volatile x)
1180 #define VOLATILE_CAST(x) (x)
1183 #define KMP_WAIT __kmp_wait_4
1184 #define KMP_WAIT_PTR __kmp_wait_4_ptr
1185 #define KMP_EQ __kmp_eq_4
1186 #define KMP_NEQ __kmp_neq_4
1187 #define KMP_LT __kmp_lt_4
1188 #define KMP_GE __kmp_ge_4
1189 #define KMP_LE __kmp_le_4
1191 /* Workaround for Intel(R) 64 code gen bug when taking address of static array
1192 * (Intel(R) 64 Tracker #138) */
1193 #if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
1194 #define STATIC_EFI2_WORKAROUND
1196 #define STATIC_EFI2_WORKAROUND static
1199 // Support of BGET usage
1200 #ifndef KMP_USE_BGET
1201 #define KMP_USE_BGET 1
1204 // Switches for OSS builds
1205 #ifndef USE_CMPXCHG_FIX
1206 #define USE_CMPXCHG_FIX 1
1209 // Enable dynamic user lock
1210 #define KMP_USE_DYNAMIC_LOCK 1
1212 // Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
1213 // dynamic user lock is turned on
1214 #if KMP_USE_DYNAMIC_LOCK
1215 // Visual studio can't handle the asm sections in this code
1216 #define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
1217 #ifdef KMP_USE_ADAPTIVE_LOCKS
1218 #undef KMP_USE_ADAPTIVE_LOCKS
1220 #define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
1223 // Enable tick time conversion of ticks to seconds
1224 #if KMP_STATS_ENABLED
1225 #define KMP_HAVE_TICK_TIME \
1226 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
1230 enum kmp_warnings_level
{
1231 kmp_warnings_off
= 0, /* No warnings */
1232 kmp_warnings_low
, /* Minimal warnings (default) */
1233 kmp_warnings_explicit
= 6, /* Explicitly set to ON - more warnings */
1234 kmp_warnings_verbose
/* reserved */
1239 #endif // __cplusplus
1242 #include "kmp_safe_c_api.h"
1244 // Macros for C++11 atomic functions
1245 #define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order)
1246 #define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order)
1248 // For non-default load/store
1249 #define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire)
1250 #define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed)
1251 #define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release)
1252 #define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed)
1254 // For non-default fetch_<op>
1255 #define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel)
1256 #define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel)
1257 #define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel)
1258 #define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel)
1259 #define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel)
1260 #define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel)
1261 #define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed)
1262 #define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed)
1264 // Callers of the following functions cannot see the side effect on "expected".
1265 template <typename T
>
1266 bool __kmp_atomic_compare_store(std::atomic
<T
> *p
, T expected
, T desired
) {
1267 return p
->compare_exchange_strong(
1268 expected
, desired
, std::memory_order_acq_rel
, std::memory_order_relaxed
);
1271 template <typename T
>
1272 bool __kmp_atomic_compare_store_acq(std::atomic
<T
> *p
, T expected
, T desired
) {
1273 return p
->compare_exchange_strong(
1274 expected
, desired
, std::memory_order_acquire
, std::memory_order_relaxed
);
1277 template <typename T
>
1278 bool __kmp_atomic_compare_store_rel(std::atomic
<T
> *p
, T expected
, T desired
) {
1279 return p
->compare_exchange_strong(
1280 expected
, desired
, std::memory_order_release
, std::memory_order_relaxed
);
1283 // Symbol lookup on Linux/Windows
1285 extern void *__kmp_lookup_symbol(const char *name
, bool next
= false);
1286 #define KMP_DLSYM(name) __kmp_lookup_symbol(name)
1287 #define KMP_DLSYM_NEXT(name) __kmp_lookup_symbol(name, true)
1289 #define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name)
1290 #define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name)
1293 #endif /* KMP_OS_H */