2 * kmp_os.h -- KPTS runtime header file.
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
16 #include "kmp_config.h"
22 #define KMP_FTN_PLAIN 1
23 #define KMP_FTN_APPEND 2
24 #define KMP_FTN_UPPER 3
26 #define KMP_FTN_PREPEND 4
27 #define KMP_FTN_UAPPEND 5
30 #define KMP_PTR_SKIP (sizeof(void *))
32 /* -------------------------- Compiler variations ------------------------ */
37 #define KMP_MEM_CONS_VOLATILE 0
38 #define KMP_MEM_CONS_FENCE 1
40 #ifndef KMP_MEM_CONS_MODEL
41 #define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
44 #ifndef __has_cpp_attribute
45 #define __has_cpp_attribute(x) 0
48 #ifndef __has_attribute
49 #define __has_attribute(x) 0
52 /* ------------------------- Compiler recognition ---------------------- */
53 #define KMP_COMPILER_ICC 0
54 #define KMP_COMPILER_GCC 0
55 #define KMP_COMPILER_CLANG 0
56 #define KMP_COMPILER_MSVC 0
57 #define KMP_COMPILER_ICX 0
59 #if __INTEL_CLANG_COMPILER
60 #undef KMP_COMPILER_ICX
61 #define KMP_COMPILER_ICX 1
62 #elif defined(__INTEL_COMPILER)
63 #undef KMP_COMPILER_ICC
64 #define KMP_COMPILER_ICC 1
65 #elif defined(__clang__)
66 #undef KMP_COMPILER_CLANG
67 #define KMP_COMPILER_CLANG 1
68 #elif defined(__GNUC__)
69 #undef KMP_COMPILER_GCC
70 #define KMP_COMPILER_GCC 1
71 #elif defined(_MSC_VER)
72 #undef KMP_COMPILER_MSVC
73 #define KMP_COMPILER_MSVC 1
75 #error Unknown compiler
78 #if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD) && !KMP_OS_WASI
79 #define KMP_AFFINITY_SUPPORTED 1
80 #if KMP_OS_WINDOWS && KMP_ARCH_X86_64
81 #define KMP_GROUP_AFFINITY 1
83 #define KMP_GROUP_AFFINITY 0
86 #define KMP_AFFINITY_SUPPORTED 0
87 #define KMP_GROUP_AFFINITY 0
90 #if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000))
91 #define KMP_HAVE_SCHED_GETCPU 1
93 #define KMP_HAVE_SCHED_GETCPU 0
96 /* Check for quad-precision extension. */
97 #define KMP_HAVE_QUAD 0
98 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
99 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX
100 /* _Quad is already defined for icc */
102 #define KMP_HAVE_QUAD 1
103 #elif KMP_COMPILER_CLANG
104 /* Clang doesn't support a software-implemented
105 128-bit extended precision type yet */
106 typedef long double _Quad
;
107 #elif KMP_COMPILER_GCC
108 /* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad until
109 NetBSD 10.0 which ships with GCC 10.5 */
110 #if (!KMP_OS_NETBSD || __GNUC__ >= 10)
111 typedef __float128 _Quad
;
113 #define KMP_HAVE_QUAD 1
115 #elif KMP_COMPILER_MSVC
116 typedef long double _Quad
;
119 #if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
120 typedef long double _Quad
;
122 #define KMP_HAVE_QUAD 1
124 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
126 #define KMP_USE_X87CONTROL 0
128 #define KMP_END_OF_LINE "\r\n"
129 typedef char kmp_int8
;
130 typedef unsigned char kmp_uint8
;
131 typedef short kmp_int16
;
132 typedef unsigned short kmp_uint16
;
133 typedef int kmp_int32
;
134 typedef unsigned int kmp_uint32
;
135 #define KMP_INT32_SPEC "d"
136 #define KMP_UINT32_SPEC "u"
138 typedef __int64 kmp_int64
;
139 typedef unsigned __int64 kmp_uint64
;
140 #define KMP_INT64_SPEC "I64d"
141 #define KMP_UINT64_SPEC "I64u"
143 struct kmp_struct64
{
146 typedef struct kmp_struct64 kmp_int64
;
147 typedef struct kmp_struct64 kmp_uint64
;
148 /* Not sure what to use for KMP_[U]INT64_SPEC here */
150 #if KMP_ARCH_X86 && KMP_MSVC_COMPAT
151 #undef KMP_USE_X87CONTROL
152 #define KMP_USE_X87CONTROL 1
154 #if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
156 typedef __int64 kmp_intptr_t
;
157 typedef unsigned __int64 kmp_uintptr_t
;
158 #define KMP_INTPTR_SPEC "I64d"
159 #define KMP_UINTPTR_SPEC "I64u"
161 #endif /* KMP_OS_WINDOWS */
164 #define KMP_END_OF_LINE "\n"
165 typedef char kmp_int8
;
166 typedef unsigned char kmp_uint8
;
167 typedef short kmp_int16
;
168 typedef unsigned short kmp_uint16
;
169 typedef int kmp_int32
;
170 typedef unsigned int kmp_uint32
;
171 typedef long long kmp_int64
;
172 typedef unsigned long long kmp_uint64
;
173 #define KMP_INT32_SPEC "d"
174 #define KMP_UINT32_SPEC "u"
175 #define KMP_INT64_SPEC "lld"
176 #define KMP_UINT64_SPEC "llu"
177 #endif /* KMP_OS_UNIX */
179 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \
181 #define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
182 #elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
183 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
184 KMP_ARCH_VE || KMP_ARCH_S390X
185 #define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
187 #error "Can't determine size_t printf format specifier."
190 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_WASM || KMP_ARCH_PPC
191 #define KMP_SIZE_T_MAX (0xFFFFFFFF)
193 #define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
196 typedef size_t kmp_size_t
;
197 typedef float kmp_real32
;
198 typedef double kmp_real64
;
202 typedef long kmp_intptr_t
;
203 typedef unsigned long kmp_uintptr_t
;
204 #define KMP_INTPTR_SPEC "ld"
205 #define KMP_UINTPTR_SPEC "lu"
209 typedef kmp_int64 kmp_int
;
210 typedef kmp_uint64 kmp_uint
;
212 typedef kmp_int32 kmp_int
;
213 typedef kmp_uint32 kmp_uint
;
214 #endif /* BUILD_I8 */
215 #define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
216 #define KMP_INT_MIN ((kmp_int32)0x80000000)
219 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_WASM) && \
220 (KMP_OS_FREEBSD || KMP_OS_LINUX || KMP_OS_WASI)
221 typedef va_list *kmp_va_list
;
222 #define kmp_va_deref(ap) (*(ap))
223 #define kmp_va_addr_of(ap) (&(ap))
225 typedef va_list kmp_va_list
;
226 #define kmp_va_deref(ap) (ap)
227 #define kmp_va_addr_of(ap) (ap)
231 // macros to cast out qualifiers and to re-interpret types
232 #define CCAST(type, var) const_cast<type>(var)
233 #define RCAST(type, var) reinterpret_cast<type>(var)
234 //-------------------------------------------------------------------------
235 // template for debug prints specification ( d, u, lld, llu ), and to obtain
236 // signed/unsigned flavors of a type
237 template <typename T
> struct traits_t
{};
239 template <> struct traits_t
<signed int> {
240 typedef signed int signed_t
;
241 typedef unsigned int unsigned_t
;
242 typedef double floating_t
;
243 static char const *spec
;
244 static const signed_t max_value
= 0x7fffffff;
245 static const signed_t min_value
= 0x80000000;
246 static const int type_size
= sizeof(signed_t
);
249 template <> struct traits_t
<unsigned int> {
250 typedef signed int signed_t
;
251 typedef unsigned int unsigned_t
;
252 typedef double floating_t
;
253 static char const *spec
;
254 static const unsigned_t max_value
= 0xffffffff;
255 static const unsigned_t min_value
= 0x00000000;
256 static const int type_size
= sizeof(unsigned_t
);
259 template <> struct traits_t
<signed long> {
260 typedef signed long signed_t
;
261 typedef unsigned long unsigned_t
;
262 typedef long double floating_t
;
263 static char const *spec
;
264 static const int type_size
= sizeof(signed_t
);
267 template <> struct traits_t
<signed long long> {
268 typedef signed long long signed_t
;
269 typedef unsigned long long unsigned_t
;
270 typedef long double floating_t
;
271 static char const *spec
;
272 static const signed_t max_value
= 0x7fffffffffffffffLL
;
273 static const signed_t min_value
= 0x8000000000000000LL
;
274 static const int type_size
= sizeof(signed_t
);
276 // unsigned long long
277 template <> struct traits_t
<unsigned long long> {
278 typedef signed long long signed_t
;
279 typedef unsigned long long unsigned_t
;
280 typedef long double floating_t
;
281 static char const *spec
;
282 static const unsigned_t max_value
= 0xffffffffffffffffLL
;
283 static const unsigned_t min_value
= 0x0000000000000000LL
;
284 static const int type_size
= sizeof(unsigned_t
);
286 //-------------------------------------------------------------------------
288 #define CCAST(type, var) (type)(var)
289 #define RCAST(type, var) (type)(var)
290 #endif // __cplusplus
292 #define KMP_EXPORT extern /* export declaration in guide libraries */
294 #if __GNUC__ >= 4 && !defined(__MINGW32__)
295 #define __forceinline __inline
298 /* Check if the OS/arch can support user-level mwait */
299 // All mwait code tests for UMWAIT first, so it should only fall back to ring3
301 #define KMP_HAVE_MWAIT \
302 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
304 #define KMP_HAVE_UMWAIT \
305 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
309 // Don't include everything related to NT status code, we'll do that explicitly
310 #define WIN32_NO_STATUS
313 static inline int KMP_GET_PAGE_SIZE(void) {
316 return si
.dwPageSize
;
319 #define KMP_GET_PAGE_SIZE() getpagesize()
322 #define PAGE_ALIGNED(_addr) \
323 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
324 #define ALIGN_TO_PAGE(x) \
325 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
327 /* ---------- Support for cache alignment, padding, etc. ----------------*/
331 #endif // __cplusplus
333 #define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */
335 /* Define the default size of the cache line */
337 #define CACHE_LINE 128 /* cache line size in bytes */
339 #if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
340 // 2006-02-13: This produces too many warnings on OS X*. Disable for now
341 #warning CACHE_LINE is too small.
343 #endif /* CACHE_LINE */
345 #define KMP_CACHE_PREFETCH(ADDR) /* nothing */
347 // Define attribute that indicates that the fall through from the previous
348 // case label is intentional and should not be diagnosed by a compiler
349 // Code from libcxx/include/__config
350 // Use a function like macro to imply that it must be followed by a semicolon
351 #if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
352 #define KMP_FALLTHROUGH() [[fallthrough]]
353 // icc cannot properly tell this attribute is absent so force off
354 #elif KMP_COMPILER_ICC
355 #define KMP_FALLTHROUGH() ((void)0)
356 #elif __has_cpp_attribute(clang::fallthrough)
357 #define KMP_FALLTHROUGH() [[clang::fallthrough]]
358 #elif __has_attribute(fallthrough) || __GNUC__ >= 7
359 #define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
361 #define KMP_FALLTHROUGH() ((void)0)
364 #if KMP_HAVE_ATTRIBUTE_WAITPKG
365 #define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg")))
367 #define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */
370 #if KMP_HAVE_ATTRIBUTE_RTM
371 #define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm")))
373 #define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */
376 // Define attribute that indicates a function does not return
377 #if __cplusplus >= 201103L
378 #define KMP_NORETURN [[noreturn]]
380 #define KMP_NORETURN __declspec(noreturn)
382 #define KMP_NORETURN __attribute__((noreturn))
385 #if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
386 #define KMP_ALIGN(bytes) __declspec(align(bytes))
387 #define KMP_THREAD_LOCAL __declspec(thread)
388 #define KMP_ALIAS /* Nothing */
390 #define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
391 #define KMP_THREAD_LOCAL __thread
392 #define KMP_ALIAS(alias_of) __attribute__((alias(alias_of)))
395 #if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
396 #define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak))
398 #define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */
401 #if KMP_HAVE_WEAK_ATTRIBUTE
402 #define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak))
404 #define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */
407 // Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
409 #define KMP_STR(x) _KMP_STR(x)
410 #define _KMP_STR(x) #x
413 #ifdef KMP_USE_VERSION_SYMBOLS
414 // If using versioned symbols, KMP_EXPAND_NAME prepends
415 // __kmp_api_ to the real API name
416 #define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name)
417 #define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name
418 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \
419 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION")
420 #define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \
421 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \
422 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \
424 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \
425 api_name) "@" ver_str "\n\t"); \
426 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \
427 api_name) "@@" default_ver "\n\t")
429 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \
430 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION")
431 #define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \
433 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \
434 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \
435 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \
436 apic_name) "@@" default_ver "\n\t"); \
438 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \
439 api_name) "@" ver_str "\n\t")
441 #else // KMP_USE_VERSION_SYMBOLS
442 #define KMP_EXPAND_NAME(api_name) api_name
443 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */
444 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \
445 ver_str) /* Nothing */
446 #endif // KMP_USE_VERSION_SYMBOLS
448 /* Temporary note: if performance testing of this passes, we can remove
449 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
450 #define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes)
451 #define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE)
452 #define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE)
454 /* General purpose fence types for memory operations */
455 enum kmp_mem_fence_type
{
456 kmp_no_fence
, /* No memory fence */
457 kmp_acquire_fence
, /* Acquire (read) memory fence */
458 kmp_release_fence
, /* Release (write) memory fence */
459 kmp_full_fence
/* Full (read+write) memory fence */
462 // Synchronization primitives
464 #if KMP_ASM_INTRINS && KMP_OS_WINDOWS && !((KMP_ARCH_AARCH64 || KMP_ARCH_ARM) && (KMP_COMPILER_CLANG || KMP_COMPILER_GCC))
466 #if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
467 #pragma intrinsic(InterlockedExchangeAdd)
468 #pragma intrinsic(InterlockedCompareExchange)
469 #pragma intrinsic(InterlockedExchange)
470 #if !(KMP_COMPILER_ICX && KMP_32_BIT_ARCH)
471 #pragma intrinsic(InterlockedExchange64)
475 // Using InterlockedIncrement / InterlockedDecrement causes a library loading
476 // ordering problem, so we use InterlockedExchangeAdd instead.
477 #define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
478 #define KMP_TEST_THEN_INC_ACQ32(p) \
479 InterlockedExchangeAdd((volatile long *)(p), 1)
480 #define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
481 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
482 InterlockedExchangeAdd((volatile long *)(p), 4)
483 #define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
484 #define KMP_TEST_THEN_DEC_ACQ32(p) \
485 InterlockedExchangeAdd((volatile long *)(p), -1)
486 #define KMP_TEST_THEN_ADD32(p, v) \
487 InterlockedExchangeAdd((volatile long *)(p), (v))
489 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
490 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
492 #define KMP_XCHG_FIXED32(p, v) \
493 InterlockedExchange((volatile long *)(p), (long)(v))
494 #define KMP_XCHG_FIXED64(p, v) \
495 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
497 inline kmp_real32
KMP_XCHG_REAL32(volatile kmp_real32
*p
, kmp_real32 v
) {
498 kmp_int32 tmp
= InterlockedExchange((volatile long *)p
, *(long *)&v
);
499 return *(kmp_real32
*)&tmp
;
502 #define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
503 #define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
504 #define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
505 #define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
506 #define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
507 #define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
509 extern kmp_int8
__kmp_test_then_or8(volatile kmp_int8
*p
, kmp_int8 v
);
510 extern kmp_int8
__kmp_test_then_and8(volatile kmp_int8
*p
, kmp_int8 v
);
511 extern kmp_int32
__kmp_test_then_add32(volatile kmp_int32
*p
, kmp_int32 v
);
512 extern kmp_uint32
__kmp_test_then_or32(volatile kmp_uint32
*p
, kmp_uint32 v
);
513 extern kmp_uint32
__kmp_test_then_and32(volatile kmp_uint32
*p
, kmp_uint32 v
);
514 extern kmp_int64
__kmp_test_then_add64(volatile kmp_int64
*p
, kmp_int64 v
);
515 extern kmp_uint64
__kmp_test_then_or64(volatile kmp_uint64
*p
, kmp_uint64 v
);
516 extern kmp_uint64
__kmp_test_then_and64(volatile kmp_uint64
*p
, kmp_uint64 v
);
518 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
519 #define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL)
520 #define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL)
521 #define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL)
522 // #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
523 // #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
524 // #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
525 // #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
526 #define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v))
528 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
529 __kmp_compare_and_store_acq8((p), (cv), (sv))
530 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
531 __kmp_compare_and_store_rel8((p), (cv), (sv))
532 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
533 __kmp_compare_and_store_acq16((p), (cv), (sv))
535 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
536 __kmp_compare_and_store_rel16((p), (cv), (sv))
538 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
539 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
541 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
542 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
544 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
545 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
547 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
548 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
550 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
551 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv))
553 // KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange
554 // _InterlockedCompareExchange expects this order: pointer, exchange, compare
555 // KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
556 // write is successful if the return value of _InterlockedCompareExchange is the
557 // same as the compare value.
558 inline kmp_int8
__kmp_compare_and_store_acq8(volatile kmp_int8
*p
, kmp_int8 cv
,
560 return _InterlockedCompareExchange8_acq(p
, sv
, cv
) == cv
;
563 inline kmp_int8
__kmp_compare_and_store_rel8(volatile kmp_int8
*p
, kmp_int8 cv
,
565 return _InterlockedCompareExchange8_rel(p
, sv
, cv
) == cv
;
568 inline kmp_int16
__kmp_compare_and_store_acq16(volatile kmp_int16
*p
,
569 kmp_int16 cv
, kmp_int16 sv
) {
570 return _InterlockedCompareExchange16_acq(p
, sv
, cv
) == cv
;
573 inline kmp_int16
__kmp_compare_and_store_rel16(volatile kmp_int16
*p
,
574 kmp_int16 cv
, kmp_int16 sv
) {
575 return _InterlockedCompareExchange16_rel(p
, sv
, cv
) == cv
;
578 inline kmp_int32
__kmp_compare_and_store_acq32(volatile kmp_int32
*p
,
579 kmp_int32 cv
, kmp_int32 sv
) {
580 return _InterlockedCompareExchange_acq((volatile long *)p
, sv
, cv
) == cv
;
583 inline kmp_int32
__kmp_compare_and_store_rel32(volatile kmp_int32
*p
,
584 kmp_int32 cv
, kmp_int32 sv
) {
585 return _InterlockedCompareExchange_rel((volatile long *)p
, sv
, cv
) == cv
;
588 inline kmp_int32
__kmp_compare_and_store_acq64(volatile kmp_int64
*p
,
589 kmp_int64 cv
, kmp_int64 sv
) {
590 return _InterlockedCompareExchange64_acq(p
, sv
, cv
) == cv
;
593 inline kmp_int32
__kmp_compare_and_store_rel64(volatile kmp_int64
*p
,
594 kmp_int64 cv
, kmp_int64 sv
) {
595 return _InterlockedCompareExchange64_rel(p
, sv
, cv
) == cv
;
598 inline kmp_int32
__kmp_compare_and_store_ptr(void *volatile *p
, void *cv
,
600 return _InterlockedCompareExchangePointer(p
, sv
, cv
) == cv
;
603 // The _RET versions return the value instead of a bool
605 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
606 _InterlockedCompareExchange8((p), (sv), (cv))
607 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
608 _InterlockedCompareExchange16((p), (sv), (cv))
610 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
611 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \
615 #define KMP_XCHG_FIXED8(p, v) \
616 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
617 #define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
618 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
620 inline kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
) {
621 kmp_int64 tmp
= _InterlockedExchange64((volatile kmp_int64
*)p
, *(kmp_int64
622 *)&v
); return *(kmp_real64
*)&tmp
;
625 #else // !KMP_ARCH_AARCH64
627 // Routines that we still need to implement in assembly.
628 extern kmp_int8
__kmp_test_then_add8(volatile kmp_int8
*p
, kmp_int8 v
);
630 extern kmp_int8
__kmp_compare_and_store8(volatile kmp_int8
*p
, kmp_int8 cv
,
632 extern kmp_int16
__kmp_compare_and_store16(volatile kmp_int16
*p
, kmp_int16 cv
,
634 extern kmp_int32
__kmp_compare_and_store32(volatile kmp_int32
*p
, kmp_int32 cv
,
636 extern kmp_int32
__kmp_compare_and_store64(volatile kmp_int64
*p
, kmp_int64 cv
,
638 extern kmp_int8
__kmp_compare_and_store_ret8(volatile kmp_int8
*p
, kmp_int8 cv
,
640 extern kmp_int16
__kmp_compare_and_store_ret16(volatile kmp_int16
*p
,
641 kmp_int16 cv
, kmp_int16 sv
);
642 extern kmp_int32
__kmp_compare_and_store_ret32(volatile kmp_int32
*p
,
643 kmp_int32 cv
, kmp_int32 sv
);
644 extern kmp_int64
__kmp_compare_and_store_ret64(volatile kmp_int64
*p
,
645 kmp_int64 cv
, kmp_int64 sv
);
647 extern kmp_int8
__kmp_xchg_fixed8(volatile kmp_int8
*p
, kmp_int8 v
);
648 extern kmp_int16
__kmp_xchg_fixed16(volatile kmp_int16
*p
, kmp_int16 v
);
649 extern kmp_int32
__kmp_xchg_fixed32(volatile kmp_int32
*p
, kmp_int32 v
);
650 extern kmp_int64
__kmp_xchg_fixed64(volatile kmp_int64
*p
, kmp_int64 v
);
651 extern kmp_real32
__kmp_xchg_real32(volatile kmp_real32
*p
, kmp_real32 v
);
652 extern kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
);
654 //#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
655 //#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
656 #define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
657 #define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
658 //#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
659 //#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
660 #define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
661 #define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
662 //#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
663 //#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
664 #define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
665 #define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
666 //#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
667 #define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
668 #define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
671 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
672 __kmp_compare_and_store8((p), (cv), (sv))
673 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
674 __kmp_compare_and_store8((p), (cv), (sv))
675 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
676 __kmp_compare_and_store16((p), (cv), (sv))
677 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
678 __kmp_compare_and_store16((p), (cv), (sv))
679 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
680 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
682 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
683 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
685 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
686 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
688 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
689 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
693 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
694 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
696 #else /* 64 bit pointers */
697 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
698 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
700 #endif /* KMP_ARCH_X86 */
702 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
703 __kmp_compare_and_store_ret8((p), (cv), (sv))
704 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
705 __kmp_compare_and_store_ret16((p), (cv), (sv))
706 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
707 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
710 #define KMP_XCHG_FIXED8(p, v) \
711 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
712 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
713 //#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
714 //#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
715 //#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
716 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
719 #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
721 /* cast p to correct type so that proper intrinsic will be used */
722 #define KMP_TEST_THEN_INC32(p) \
723 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
724 #define KMP_TEST_THEN_INC_ACQ32(p) \
725 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
727 #define KMP_TEST_THEN_INC64(p) \
728 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
729 #define KMP_TEST_THEN_INC_ACQ64(p) \
730 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
732 #define KMP_TEST_THEN_INC64(p) \
733 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
734 #define KMP_TEST_THEN_INC_ACQ64(p) \
735 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
737 #define KMP_TEST_THEN_ADD4_32(p) \
738 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
739 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
740 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
742 #define KMP_TEST_THEN_ADD4_64(p) \
743 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
744 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
745 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
746 #define KMP_TEST_THEN_DEC64(p) \
747 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
748 #define KMP_TEST_THEN_DEC_ACQ64(p) \
749 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
751 #define KMP_TEST_THEN_ADD4_64(p) \
752 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
753 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
754 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
755 #define KMP_TEST_THEN_DEC64(p) \
756 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
757 #define KMP_TEST_THEN_DEC_ACQ64(p) \
758 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
760 #define KMP_TEST_THEN_DEC32(p) \
761 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
762 #define KMP_TEST_THEN_DEC_ACQ32(p) \
763 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
764 #define KMP_TEST_THEN_ADD8(p, v) \
765 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
766 #define KMP_TEST_THEN_ADD32(p, v) \
767 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
769 #define KMP_TEST_THEN_ADD64(p, v) \
770 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
773 #define KMP_TEST_THEN_ADD64(p, v) \
774 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
777 #define KMP_TEST_THEN_OR8(p, v) \
778 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
779 #define KMP_TEST_THEN_AND8(p, v) \
780 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v))
781 #define KMP_TEST_THEN_OR32(p, v) \
782 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
783 #define KMP_TEST_THEN_AND32(p, v) \
784 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
786 #define KMP_TEST_THEN_OR64(p, v) \
787 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
789 #define KMP_TEST_THEN_AND64(p, v) \
790 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
793 #define KMP_TEST_THEN_OR64(p, v) \
794 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
795 #define KMP_TEST_THEN_AND64(p, v) \
796 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
799 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
800 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
802 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
803 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
805 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
806 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
808 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
809 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
811 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
812 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
814 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
815 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
817 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
818 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
821 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
822 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
824 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
825 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
827 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
828 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
831 static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64
*p
,
834 return __atomic_compare_exchange(p
, &cv
, &sv
, false, __ATOMIC_SEQ_CST
,
837 static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64
*p
,
840 __atomic_compare_exchange(p
, &cv
, &sv
, false, __ATOMIC_SEQ_CST
,
844 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
845 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
846 (kmp_uint64)(cv), (kmp_uint64)(sv))
847 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
848 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
849 (kmp_uint64)(cv), (kmp_uint64)(sv))
850 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
851 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
854 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
855 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
857 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
858 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
860 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
861 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
865 #if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800
866 #define KMP_XCHG_FIXED8(p, v) \
867 __atomic_exchange_1((volatile kmp_uint8 *)(p), (kmp_uint8)(v), \
870 #define KMP_XCHG_FIXED8(p, v) \
871 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
873 #define KMP_XCHG_FIXED16(p, v) \
874 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
875 #define KMP_XCHG_FIXED32(p, v) \
876 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
877 #define KMP_XCHG_FIXED64(p, v) \
878 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
880 inline kmp_real32
KMP_XCHG_REAL32(volatile kmp_real32
*p
, kmp_real32 v
) {
881 volatile kmp_uint32
*up
;
883 memcpy(&up
, &p
, sizeof(up
));
884 memcpy(&uv
, &v
, sizeof(uv
));
885 kmp_int32 tmp
= __sync_lock_test_and_set(up
, uv
);
887 memcpy(&ftmp
, &tmp
, sizeof(tmp
));
891 inline kmp_real64
KMP_XCHG_REAL64(volatile kmp_real64
*p
, kmp_real64 v
) {
892 volatile kmp_uint64
*up
;
894 memcpy(&up
, &p
, sizeof(up
));
895 memcpy(&uv
, &v
, sizeof(uv
));
896 kmp_int64 tmp
= __sync_lock_test_and_set(up
, uv
);
898 memcpy(&dtmp
, &tmp
, sizeof(tmp
));
904 extern kmp_int8
__kmp_test_then_add8(volatile kmp_int8
*p
, kmp_int8 v
);
905 extern kmp_int8
__kmp_test_then_or8(volatile kmp_int8
*p
, kmp_int8 v
);
906 extern kmp_int8
__kmp_test_then_and8(volatile kmp_int8
*p
, kmp_int8 v
);
907 extern kmp_int32
__kmp_test_then_add32(volatile kmp_int32
*p
, kmp_int32 v
);
908 extern kmp_uint32
__kmp_test_then_or32(volatile kmp_uint32
*p
, kmp_uint32 v
);
909 extern kmp_uint32
__kmp_test_then_and32(volatile kmp_uint32
*p
, kmp_uint32 v
);
910 extern kmp_int64
__kmp_test_then_add64(volatile kmp_int64
*p
, kmp_int64 v
);
911 extern kmp_uint64
__kmp_test_then_or64(volatile kmp_uint64
*p
, kmp_uint64 v
);
912 extern kmp_uint64
__kmp_test_then_and64(volatile kmp_uint64
*p
, kmp_uint64 v
);
914 extern kmp_int8
__kmp_compare_and_store8(volatile kmp_int8
*p
, kmp_int8 cv
,
916 extern kmp_int16
__kmp_compare_and_store16(volatile kmp_int16
*p
, kmp_int16 cv
,
918 extern kmp_int32
__kmp_compare_and_store32(volatile kmp_int32
*p
, kmp_int32 cv
,
920 extern kmp_int32
__kmp_compare_and_store64(volatile kmp_int64
*p
, kmp_int64 cv
,
922 extern kmp_int8
__kmp_compare_and_store_ret8(volatile kmp_int8
*p
, kmp_int8 cv
,
924 extern kmp_int16
__kmp_compare_and_store_ret16(volatile kmp_int16
*p
,
925 kmp_int16 cv
, kmp_int16 sv
);
926 extern kmp_int32
__kmp_compare_and_store_ret32(volatile kmp_int32
*p
,
927 kmp_int32 cv
, kmp_int32 sv
);
928 extern kmp_int64
__kmp_compare_and_store_ret64(volatile kmp_int64
*p
,
929 kmp_int64 cv
, kmp_int64 sv
);
931 extern kmp_int8
__kmp_xchg_fixed8(volatile kmp_int8
*p
, kmp_int8 v
);
932 extern kmp_int16
__kmp_xchg_fixed16(volatile kmp_int16
*p
, kmp_int16 v
);
933 extern kmp_int32
__kmp_xchg_fixed32(volatile kmp_int32
*p
, kmp_int32 v
);
934 extern kmp_int64
__kmp_xchg_fixed64(volatile kmp_int64
*p
, kmp_int64 v
);
935 extern kmp_real32
__kmp_xchg_real32(volatile kmp_real32
*p
, kmp_real32 v
);
936 extern kmp_real64
__kmp_xchg_real64(volatile kmp_real64
*p
, kmp_real64 v
);
938 #define KMP_TEST_THEN_INC32(p) \
939 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
940 #define KMP_TEST_THEN_INC_ACQ32(p) \
941 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
942 #define KMP_TEST_THEN_INC64(p) \
943 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
944 #define KMP_TEST_THEN_INC_ACQ64(p) \
945 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
946 #define KMP_TEST_THEN_ADD4_32(p) \
947 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
948 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
949 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
950 #define KMP_TEST_THEN_ADD4_64(p) \
951 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
952 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
953 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
954 #define KMP_TEST_THEN_DEC32(p) \
955 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
956 #define KMP_TEST_THEN_DEC_ACQ32(p) \
957 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
958 #define KMP_TEST_THEN_DEC64(p) \
959 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
960 #define KMP_TEST_THEN_DEC_ACQ64(p) \
961 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
962 #define KMP_TEST_THEN_ADD8(p, v) \
963 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v))
964 #define KMP_TEST_THEN_ADD32(p, v) \
965 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v))
966 #define KMP_TEST_THEN_ADD64(p, v) \
967 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v))
969 #define KMP_TEST_THEN_OR8(p, v) \
970 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v))
971 #define KMP_TEST_THEN_AND8(p, v) \
972 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v))
973 #define KMP_TEST_THEN_OR32(p, v) \
974 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
975 #define KMP_TEST_THEN_AND32(p, v) \
976 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
977 #define KMP_TEST_THEN_OR64(p, v) \
978 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
979 #define KMP_TEST_THEN_AND64(p, v) \
980 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
982 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
983 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
985 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
986 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
988 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
989 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
991 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
992 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
994 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
995 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
997 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
998 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1000 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
1001 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1003 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
1004 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1008 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1009 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1011 #else /* 64 bit pointers */
1012 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1013 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1015 #endif /* KMP_ARCH_X86 */
1017 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
1018 __kmp_compare_and_store_ret8((p), (cv), (sv))
1019 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
1020 __kmp_compare_and_store_ret16((p), (cv), (sv))
1021 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
1022 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1024 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
1025 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1028 #define KMP_XCHG_FIXED8(p, v) \
1029 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
1030 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
1031 #define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
1032 #define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
1033 #define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
1034 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
1036 #endif /* KMP_ASM_INTRINS */
1038 /* ------------- relaxed consistency memory model stuff ------------------ */
1042 #define KMP_MB() asm("nop")
1043 #define KMP_IMB() asm("nop")
1045 #define KMP_MB() /* _asm{ nop } */
1046 #define KMP_IMB() /* _asm{ nop } */
1048 #endif /* KMP_OS_WINDOWS */
1050 #if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
1051 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
1052 KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC
1055 #define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
1056 #else /* !KMP_OS_WINDOWS */
1057 #define KMP_MB() __sync_synchronize()
1062 #define KMP_MB() /* nothing to do */
1065 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1067 // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
1068 // We shouldn't need it, though, since the ABI rules require that
1069 // * If the compiler generates NGO stores it also generates the fence
1070 // * If users hand-code NGO stores they should insert the fence
1071 // therefore no incomplete unordered stores should be visible.
1072 #define KMP_MFENCE() /* Nothing */
1073 #define KMP_SFENCE() /* Nothing */
1075 #if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1076 #define KMP_MFENCE_() _mm_mfence()
1077 #define KMP_SFENCE_() _mm_sfence()
1078 #elif KMP_COMPILER_MSVC
1079 #define KMP_MFENCE_() MemoryBarrier()
1080 #define KMP_SFENCE_() MemoryBarrier()
1082 #define KMP_MFENCE_() __sync_synchronize()
1083 #define KMP_SFENCE_() __sync_synchronize()
1085 #define KMP_MFENCE() \
1086 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \
1087 __kmp_query_cpuid(&__kmp_cpuinfo); \
1089 if (__kmp_cpuinfo.flags.sse2) { \
1092 #define KMP_SFENCE() KMP_SFENCE_()
1095 #define KMP_MFENCE() KMP_MB()
1096 #define KMP_SFENCE() KMP_MB()
1100 #define KMP_IMB() /* nothing to do */
1103 #ifndef KMP_ST_REL32
1104 #define KMP_ST_REL32(A, D) (*(A) = (D))
1107 #ifndef KMP_ST_REL64
1108 #define KMP_ST_REL64(A, D) (*(A) = (D))
1111 #ifndef KMP_LD_ACQ32
1112 #define KMP_LD_ACQ32(A) (*(A))
1115 #ifndef KMP_LD_ACQ64
1116 #define KMP_LD_ACQ64(A) (*(A))
1119 /* ------------------------------------------------------------------------ */
1120 // FIXME - maybe this should this be
1122 // #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
1123 // #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b))
1125 // #define TCR_8(a) (*(volatile kmp_int64 *)(a))
1126 // #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b))
1128 // I'm fairly certain this is the correct thing to do, but I'm afraid
1129 // of performance regressions.
1131 #define TCR_1(a) (a)
1132 #define TCW_1(a, b) (a) = (b)
1133 #define TCR_4(a) (a)
1134 #define TCW_4(a, b) (a) = (b)
1135 #define TCI_4(a) (++(a))
1136 #define TCD_4(a) (--(a))
1137 #define TCR_8(a) (a)
1138 #define TCW_8(a, b) (a) = (b)
1139 #define TCI_8(a) (++(a))
1140 #define TCD_8(a) (--(a))
1141 #define TCR_SYNC_4(a) (a)
1142 #define TCW_SYNC_4(a, b) (a) = (b)
1143 #define TCX_SYNC_4(a, b, c) \
1144 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
1145 (kmp_int32)(b), (kmp_int32)(c))
1146 #define TCR_SYNC_8(a) (a)
1147 #define TCW_SYNC_8(a, b) (a) = (b)
1148 #define TCX_SYNC_8(a, b, c) \
1149 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
1150 (kmp_int64)(b), (kmp_int64)(c))
1152 #if KMP_ARCH_X86 || KMP_ARCH_MIPS || KMP_ARCH_WASM || KMP_ARCH_PPC
1154 #define TCR_PTR(a) ((void *)TCR_4(a))
1155 #define TCW_PTR(a, b) TCW_4((a), (b))
1156 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
1157 #define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
1158 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
1160 #else /* 64 bit pointers */
1162 #define TCR_PTR(a) ((void *)TCR_8(a))
1163 #define TCW_PTR(a, b) TCW_8((a), (b))
1164 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
1165 #define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
1166 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
1168 #endif /* KMP_ARCH_X86 */
1170 /* If these FTN_{TRUE,FALSE} values change, may need to change several places
1171 where they are used to check that language is Fortran, not C. */
1174 #define FTN_TRUE TRUE
1178 #define FTN_FALSE FALSE
1181 typedef void (*microtask_t
)(int *gtid
, int *npr
, ...);
1183 #ifdef USE_VOLATILE_CAST
1184 #define VOLATILE_CAST(x) (volatile x)
1186 #define VOLATILE_CAST(x) (x)
1189 #define KMP_WAIT __kmp_wait_4
1190 #define KMP_WAIT_PTR __kmp_wait_4_ptr
1191 #define KMP_EQ __kmp_eq_4
1192 #define KMP_NEQ __kmp_neq_4
1193 #define KMP_LT __kmp_lt_4
1194 #define KMP_GE __kmp_ge_4
1195 #define KMP_LE __kmp_le_4
1197 /* Workaround for Intel(R) 64 code gen bug when taking address of static array
1198 * (Intel(R) 64 Tracker #138) */
1199 #if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
1200 #define STATIC_EFI2_WORKAROUND
1202 #define STATIC_EFI2_WORKAROUND static
1205 // Support of BGET usage
1206 #ifndef KMP_USE_BGET
1207 #define KMP_USE_BGET 1
1210 // Switches for OSS builds
1211 #ifndef USE_CMPXCHG_FIX
1212 #define USE_CMPXCHG_FIX 1
1215 // Enable dynamic user lock
1216 #define KMP_USE_DYNAMIC_LOCK 1
1218 // Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
1219 // dynamic user lock is turned on
1220 #if KMP_USE_DYNAMIC_LOCK
1221 // Visual studio can't handle the asm sections in this code
1222 #define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
1223 #ifdef KMP_USE_ADAPTIVE_LOCKS
1224 #undef KMP_USE_ADAPTIVE_LOCKS
1226 #define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
1229 // Enable tick time conversion of ticks to seconds
1230 #if KMP_STATS_ENABLED
1231 #define KMP_HAVE_TICK_TIME \
1232 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
1236 enum kmp_warnings_level
{
1237 kmp_warnings_off
= 0, /* No warnings */
1238 kmp_warnings_low
, /* Minimal warnings (default) */
1239 kmp_warnings_explicit
= 6, /* Explicitly set to ON - more warnings */
1240 kmp_warnings_verbose
/* reserved */
1245 #endif // __cplusplus
1248 #include "kmp_safe_c_api.h"
1250 // Macros for C++11 atomic functions
1251 #define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order)
1252 #define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order)
1254 // For non-default load/store
1255 #define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire)
1256 #define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed)
1257 #define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release)
1258 #define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed)
1260 // For non-default fetch_<op>
1261 #define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel)
1262 #define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel)
1263 #define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel)
1264 #define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel)
1265 #define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel)
1266 #define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel)
1267 #define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed)
1268 #define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed)
1270 // Callers of the following functions cannot see the side effect on "expected".
1271 template <typename T
>
1272 bool __kmp_atomic_compare_store(std::atomic
<T
> *p
, T expected
, T desired
) {
1273 return p
->compare_exchange_strong(
1274 expected
, desired
, std::memory_order_acq_rel
, std::memory_order_relaxed
);
1277 template <typename T
>
1278 bool __kmp_atomic_compare_store_acq(std::atomic
<T
> *p
, T expected
, T desired
) {
1279 return p
->compare_exchange_strong(
1280 expected
, desired
, std::memory_order_acquire
, std::memory_order_relaxed
);
1283 template <typename T
>
1284 bool __kmp_atomic_compare_store_rel(std::atomic
<T
> *p
, T expected
, T desired
) {
1285 return p
->compare_exchange_strong(
1286 expected
, desired
, std::memory_order_release
, std::memory_order_relaxed
);
1289 // Symbol lookup on Linux/Windows
1291 extern void *__kmp_lookup_symbol(const char *name
, bool next
= false);
1292 #define KMP_DLSYM(name) __kmp_lookup_symbol(name)
1293 #define KMP_DLSYM_NEXT(name) __kmp_lookup_symbol(name, true)
1295 #define KMP_DLSYM(name) nullptr
1296 #define KMP_DLSYM_NEXT(name) nullptr
1298 #define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name)
1299 #define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name)
1302 // MSVC doesn't have this, but clang/clang-cl does.
1303 #ifndef __has_builtin
1304 #define __has_builtin(x) 0
1307 // Same as LLVM_BUILTIN_UNREACHABLE. States that it is UB to reach this point.
1308 #if __has_builtin(__builtin_unreachable) || defined(__GNUC__)
1309 #define KMP_BUILTIN_UNREACHABLE __builtin_unreachable()
1310 #elif defined(_MSC_VER)
1311 #define KMP_BUILTIN_UNREACHABLE __assume(false)
1313 #define KMP_BUILTIN_UNREACHABLE
1316 #endif /* KMP_OS_H */