[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / openmp / runtime / src / kmp_os.h
blob519f8fe7e0d45c0c0204f31d3c97301c136c6344
1 /*
2 * kmp_os.h -- KPTS runtime header file.
3 */
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 #ifndef KMP_OS_H
14 #define KMP_OS_H
16 #include "kmp_config.h"
17 #include <atomic>
18 #include <stdarg.h>
19 #include <stdlib.h>
21 #define KMP_FTN_PLAIN 1
22 #define KMP_FTN_APPEND 2
23 #define KMP_FTN_UPPER 3
25 #define KMP_FTN_PREPEND 4
26 #define KMP_FTN_UAPPEND 5
29 #define KMP_PTR_SKIP (sizeof(void *))
31 /* -------------------------- Compiler variations ------------------------ */
33 #define KMP_OFF 0
34 #define KMP_ON 1
36 #define KMP_MEM_CONS_VOLATILE 0
37 #define KMP_MEM_CONS_FENCE 1
39 #ifndef KMP_MEM_CONS_MODEL
40 #define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
41 #endif
43 #ifndef __has_cpp_attribute
44 #define __has_cpp_attribute(x) 0
45 #endif
47 #ifndef __has_attribute
48 #define __has_attribute(x) 0
49 #endif
51 /* ------------------------- Compiler recognition ---------------------- */
52 #define KMP_COMPILER_ICC 0
53 #define KMP_COMPILER_GCC 0
54 #define KMP_COMPILER_CLANG 0
55 #define KMP_COMPILER_MSVC 0
57 #if defined(__INTEL_COMPILER)
58 #undef KMP_COMPILER_ICC
59 #define KMP_COMPILER_ICC 1
60 #elif defined(__clang__)
61 #undef KMP_COMPILER_CLANG
62 #define KMP_COMPILER_CLANG 1
63 #elif defined(__GNUC__)
64 #undef KMP_COMPILER_GCC
65 #define KMP_COMPILER_GCC 1
66 #elif defined(_MSC_VER)
67 #undef KMP_COMPILER_MSVC
68 #define KMP_COMPILER_MSVC 1
69 #else
70 #error Unknown compiler
71 #endif
73 #if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD)
74 #define KMP_AFFINITY_SUPPORTED 1
75 #if KMP_OS_WINDOWS && KMP_ARCH_X86_64
76 #define KMP_GROUP_AFFINITY 1
77 #else
78 #define KMP_GROUP_AFFINITY 0
79 #endif
80 #else
81 #define KMP_AFFINITY_SUPPORTED 0
82 #define KMP_GROUP_AFFINITY 0
83 #endif
85 /* Check for quad-precision extension. */
86 #define KMP_HAVE_QUAD 0
87 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
88 #if KMP_COMPILER_ICC
89 /* _Quad is already defined for icc */
90 #undef KMP_HAVE_QUAD
91 #define KMP_HAVE_QUAD 1
92 #elif KMP_COMPILER_CLANG
93 /* Clang doesn't support a software-implemented
94 128-bit extended precision type yet */
95 typedef long double _Quad;
96 #elif KMP_COMPILER_GCC
97 /* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad */
98 #if !KMP_OS_NETBSD
99 typedef __float128 _Quad;
100 #undef KMP_HAVE_QUAD
101 #define KMP_HAVE_QUAD 1
102 #endif
103 #elif KMP_COMPILER_MSVC
104 typedef long double _Quad;
105 #endif
106 #else
107 #if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
108 typedef long double _Quad;
109 #undef KMP_HAVE_QUAD
110 #define KMP_HAVE_QUAD 1
111 #endif
112 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
114 #define KMP_USE_X87CONTROL 0
115 #if KMP_OS_WINDOWS
116 #define KMP_END_OF_LINE "\r\n"
117 typedef char kmp_int8;
118 typedef unsigned char kmp_uint8;
119 typedef short kmp_int16;
120 typedef unsigned short kmp_uint16;
121 typedef int kmp_int32;
122 typedef unsigned int kmp_uint32;
123 #define KMP_INT32_SPEC "d"
124 #define KMP_UINT32_SPEC "u"
125 #ifndef KMP_STRUCT64
126 typedef __int64 kmp_int64;
127 typedef unsigned __int64 kmp_uint64;
128 #define KMP_INT64_SPEC "I64d"
129 #define KMP_UINT64_SPEC "I64u"
130 #else
131 struct kmp_struct64 {
132 kmp_int32 a, b;
134 typedef struct kmp_struct64 kmp_int64;
135 typedef struct kmp_struct64 kmp_uint64;
136 /* Not sure what to use for KMP_[U]INT64_SPEC here */
137 #endif
138 #if KMP_ARCH_X86 && KMP_MSVC_COMPAT
139 #undef KMP_USE_X87CONTROL
140 #define KMP_USE_X87CONTROL 1
141 #endif
142 #if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
143 #define KMP_INTPTR 1
144 typedef __int64 kmp_intptr_t;
145 typedef unsigned __int64 kmp_uintptr_t;
146 #define KMP_INTPTR_SPEC "I64d"
147 #define KMP_UINTPTR_SPEC "I64u"
148 #endif
149 #endif /* KMP_OS_WINDOWS */
151 #if KMP_OS_UNIX
152 #define KMP_END_OF_LINE "\n"
153 typedef char kmp_int8;
154 typedef unsigned char kmp_uint8;
155 typedef short kmp_int16;
156 typedef unsigned short kmp_uint16;
157 typedef int kmp_int32;
158 typedef unsigned int kmp_uint32;
159 typedef long long kmp_int64;
160 typedef unsigned long long kmp_uint64;
161 #define KMP_INT32_SPEC "d"
162 #define KMP_UINT32_SPEC "u"
163 #define KMP_INT64_SPEC "lld"
164 #define KMP_UINT64_SPEC "llu"
165 #endif /* KMP_OS_UNIX */
167 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
168 #define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
169 #elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
170 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64
171 #define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
172 #else
173 #error "Can't determine size_t printf format specifier."
174 #endif
176 #if KMP_ARCH_X86
177 #define KMP_SIZE_T_MAX (0xFFFFFFFF)
178 #else
179 #define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
180 #endif
182 typedef size_t kmp_size_t;
183 typedef float kmp_real32;
184 typedef double kmp_real64;
186 #ifndef KMP_INTPTR
187 #define KMP_INTPTR 1
188 typedef long kmp_intptr_t;
189 typedef unsigned long kmp_uintptr_t;
190 #define KMP_INTPTR_SPEC "ld"
191 #define KMP_UINTPTR_SPEC "lu"
192 #endif
194 #ifdef BUILD_I8
195 typedef kmp_int64 kmp_int;
196 typedef kmp_uint64 kmp_uint;
197 #else
198 typedef kmp_int32 kmp_int;
199 typedef kmp_uint32 kmp_uint;
200 #endif /* BUILD_I8 */
201 #define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
202 #define KMP_INT_MIN ((kmp_int32)0x80000000)
204 // stdarg handling
205 #if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \
206 (KMP_OS_FREEBSD || KMP_OS_LINUX)
207 typedef va_list *kmp_va_list;
208 #define kmp_va_deref(ap) (*(ap))
209 #define kmp_va_addr_of(ap) (&(ap))
210 #else
211 typedef va_list kmp_va_list;
212 #define kmp_va_deref(ap) (ap)
213 #define kmp_va_addr_of(ap) (ap)
214 #endif
216 #ifdef __cplusplus
217 // macros to cast out qualifiers and to re-interpret types
218 #define CCAST(type, var) const_cast<type>(var)
219 #define RCAST(type, var) reinterpret_cast<type>(var)
220 //-------------------------------------------------------------------------
221 // template for debug prints specification ( d, u, lld, llu ), and to obtain
222 // signed/unsigned flavors of a type
223 template <typename T> struct traits_t {};
224 // int
225 template <> struct traits_t<signed int> {
226 typedef signed int signed_t;
227 typedef unsigned int unsigned_t;
228 typedef double floating_t;
229 static char const *spec;
230 static const signed_t max_value = 0x7fffffff;
231 static const signed_t min_value = 0x80000000;
232 static const int type_size = sizeof(signed_t);
234 // unsigned int
235 template <> struct traits_t<unsigned int> {
236 typedef signed int signed_t;
237 typedef unsigned int unsigned_t;
238 typedef double floating_t;
239 static char const *spec;
240 static const unsigned_t max_value = 0xffffffff;
241 static const unsigned_t min_value = 0x00000000;
242 static const int type_size = sizeof(unsigned_t);
244 // long
245 template <> struct traits_t<signed long> {
246 typedef signed long signed_t;
247 typedef unsigned long unsigned_t;
248 typedef long double floating_t;
249 static char const *spec;
250 static const int type_size = sizeof(signed_t);
252 // long long
253 template <> struct traits_t<signed long long> {
254 typedef signed long long signed_t;
255 typedef unsigned long long unsigned_t;
256 typedef long double floating_t;
257 static char const *spec;
258 static const signed_t max_value = 0x7fffffffffffffffLL;
259 static const signed_t min_value = 0x8000000000000000LL;
260 static const int type_size = sizeof(signed_t);
262 // unsigned long long
263 template <> struct traits_t<unsigned long long> {
264 typedef signed long long signed_t;
265 typedef unsigned long long unsigned_t;
266 typedef long double floating_t;
267 static char const *spec;
268 static const unsigned_t max_value = 0xffffffffffffffffLL;
269 static const unsigned_t min_value = 0x0000000000000000LL;
270 static const int type_size = sizeof(unsigned_t);
272 //-------------------------------------------------------------------------
273 #else
274 #define CCAST(type, var) (type)(var)
275 #define RCAST(type, var) (type)(var)
276 #endif // __cplusplus
278 #define KMP_EXPORT extern /* export declaration in guide libraries */
280 #if __GNUC__ >= 4 && !defined(__MINGW32__)
281 #define __forceinline __inline
282 #endif
284 /* Check if the OS/arch can support user-level mwait */
285 // All mwait code tests for UMWAIT first, so it should only fall back to ring3
286 // MWAIT for KNL.
287 #define KMP_HAVE_MWAIT \
288 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
289 !KMP_MIC2)
290 #define KMP_HAVE_UMWAIT \
291 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
292 !KMP_MIC)
294 #if KMP_OS_WINDOWS
295 #include <windows.h>
297 static inline int KMP_GET_PAGE_SIZE(void) {
298 SYSTEM_INFO si;
299 GetSystemInfo(&si);
300 return si.dwPageSize;
302 #else
303 #define KMP_GET_PAGE_SIZE() getpagesize()
304 #endif
306 #define PAGE_ALIGNED(_addr) \
307 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
308 #define ALIGN_TO_PAGE(x) \
309 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
311 /* ---------- Support for cache alignment, padding, etc. ----------------*/
313 #ifdef __cplusplus
314 extern "C" {
315 #endif // __cplusplus
317 #define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */
319 /* Define the default size of the cache line */
320 #ifndef CACHE_LINE
321 #define CACHE_LINE 128 /* cache line size in bytes */
322 #else
323 #if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
324 // 2006-02-13: This produces too many warnings on OS X*. Disable for now
325 #warning CACHE_LINE is too small.
326 #endif
327 #endif /* CACHE_LINE */
329 #define KMP_CACHE_PREFETCH(ADDR) /* nothing */
331 // Define attribute that indicates that the fall through from the previous
332 // case label is intentional and should not be diagnosed by a compiler
333 // Code from libcxx/include/__config
334 // Use a function like macro to imply that it must be followed by a semicolon
335 #if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
336 #define KMP_FALLTHROUGH() [[fallthrough]]
337 #elif __has_cpp_attribute(clang::fallthrough)
338 #define KMP_FALLTHROUGH() [[clang::fallthrough]]
339 #elif __has_attribute(fallthrough) || __GNUC__ >= 7
340 #define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
341 #else
342 #define KMP_FALLTHROUGH() ((void)0)
343 #endif
345 #if KMP_HAVE_ATTRIBUTE_WAITPKG
346 #define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg")))
347 #else
348 #define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */
349 #endif
351 #if KMP_HAVE_ATTRIBUTE_RTM
352 #define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm")))
353 #else
354 #define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */
355 #endif
357 // Define attribute that indicates a function does not return
358 #if __cplusplus >= 201103L
359 #define KMP_NORETURN [[noreturn]]
360 #elif KMP_OS_WINDOWS
361 #define KMP_NORETURN __declspec(noreturn)
362 #else
363 #define KMP_NORETURN __attribute__((noreturn))
364 #endif
366 #if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
367 #define KMP_ALIGN(bytes) __declspec(align(bytes))
368 #define KMP_THREAD_LOCAL __declspec(thread)
369 #define KMP_ALIAS /* Nothing */
370 #else
371 #define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
372 #define KMP_THREAD_LOCAL __thread
373 #define KMP_ALIAS(alias_of) __attribute__((alias(alias_of)))
374 #endif
376 #if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
377 #define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak))
378 #else
379 #define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */
380 #endif
382 #if KMP_HAVE_WEAK_ATTRIBUTE
383 #define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak))
384 #else
385 #define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */
386 #endif
388 // Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
389 #ifndef KMP_STR
390 #define KMP_STR(x) _KMP_STR(x)
391 #define _KMP_STR(x) #x
392 #endif
394 #ifdef KMP_USE_VERSION_SYMBOLS
395 // If using versioned symbols, KMP_EXPAND_NAME prepends
396 // __kmp_api_ to the real API name
397 #define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name)
398 #define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name
399 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \
400 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION")
401 #define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \
402 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \
403 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \
404 __asm__( \
405 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \
406 api_name) "@" ver_str "\n\t"); \
407 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \
408 api_name) "@@" default_ver "\n\t")
410 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \
411 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION")
412 #define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \
413 default_ver) \
414 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \
415 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \
416 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \
417 apic_name) "@@" default_ver "\n\t"); \
418 __asm__( \
419 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \
420 api_name) "@" ver_str "\n\t")
422 #else // KMP_USE_VERSION_SYMBOLS
423 #define KMP_EXPAND_NAME(api_name) api_name
424 #define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */
425 #define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \
426 ver_str) /* Nothing */
427 #endif // KMP_USE_VERSION_SYMBOLS
429 /* Temporary note: if performance testing of this passes, we can remove
430 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
431 #define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes)
432 #define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE)
433 #define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE)
435 /* General purpose fence types for memory operations */
436 enum kmp_mem_fence_type {
437 kmp_no_fence, /* No memory fence */
438 kmp_acquire_fence, /* Acquire (read) memory fence */
439 kmp_release_fence, /* Release (write) memory fence */
440 kmp_full_fence /* Full (read+write) memory fence */
443 // Synchronization primitives
445 #if KMP_ASM_INTRINS && KMP_OS_WINDOWS
447 #if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
448 #pragma intrinsic(InterlockedExchangeAdd)
449 #pragma intrinsic(InterlockedCompareExchange)
450 #pragma intrinsic(InterlockedExchange)
451 #pragma intrinsic(InterlockedExchange64)
452 #endif
454 // Using InterlockedIncrement / InterlockedDecrement causes a library loading
455 // ordering problem, so we use InterlockedExchangeAdd instead.
456 #define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
457 #define KMP_TEST_THEN_INC_ACQ32(p) \
458 InterlockedExchangeAdd((volatile long *)(p), 1)
459 #define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
460 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
461 InterlockedExchangeAdd((volatile long *)(p), 4)
462 #define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
463 #define KMP_TEST_THEN_DEC_ACQ32(p) \
464 InterlockedExchangeAdd((volatile long *)(p), -1)
465 #define KMP_TEST_THEN_ADD32(p, v) \
466 InterlockedExchangeAdd((volatile long *)(p), (v))
468 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
469 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
471 #define KMP_XCHG_FIXED32(p, v) \
472 InterlockedExchange((volatile long *)(p), (long)(v))
473 #define KMP_XCHG_FIXED64(p, v) \
474 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
476 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
477 kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v);
478 return *(kmp_real32 *)&tmp;
481 #define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
482 #define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
483 #define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
484 #define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
485 #define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
486 #define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
488 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
489 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
490 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
491 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
492 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
493 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
494 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
495 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
497 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
498 #define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL)
499 #define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL)
500 #define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL)
501 // #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
502 // #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
503 // #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
504 // #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
505 #define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v))
507 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
508 __kmp_compare_and_store_acq8((p), (cv), (sv))
509 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
510 __kmp_compare_and_store_rel8((p), (cv), (sv))
511 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
512 __kmp_compare_and_store_acq16((p), (cv), (sv))
514 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
515 __kmp_compare_and_store_rel16((p), (cv), (sv))
517 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
518 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
519 (kmp_int32)(sv))
520 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
521 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
522 (kmp_int32)(sv))
523 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
524 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
525 (kmp_int64)(sv))
526 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
527 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
528 (kmp_int64)(sv))
529 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
530 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv))
532 // KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange
533 // _InterlockedCompareExchange expects this order: pointer, exchange, compare
534 // KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
535 // write is successful if the return value of _InterlockedCompareExchange is the
536 // same as the compare value.
537 inline kmp_int8 __kmp_compare_and_store_acq8(volatile kmp_int8 *p, kmp_int8 cv,
538 kmp_int8 sv) {
539 return _InterlockedCompareExchange8_acq(p, sv, cv) == cv;
542 inline kmp_int8 __kmp_compare_and_store_rel8(volatile kmp_int8 *p, kmp_int8 cv,
543 kmp_int8 sv) {
544 return _InterlockedCompareExchange8_rel(p, sv, cv) == cv;
547 inline kmp_int16 __kmp_compare_and_store_acq16(volatile kmp_int16 *p,
548 kmp_int16 cv, kmp_int16 sv) {
549 return _InterlockedCompareExchange16_acq(p, sv, cv) == cv;
552 inline kmp_int16 __kmp_compare_and_store_rel16(volatile kmp_int16 *p,
553 kmp_int16 cv, kmp_int16 sv) {
554 return _InterlockedCompareExchange16_rel(p, sv, cv) == cv;
557 inline kmp_int32 __kmp_compare_and_store_acq32(volatile kmp_int32 *p,
558 kmp_int32 cv, kmp_int32 sv) {
559 return _InterlockedCompareExchange_acq((volatile long *)p, sv, cv) == cv;
562 inline kmp_int32 __kmp_compare_and_store_rel32(volatile kmp_int32 *p,
563 kmp_int32 cv, kmp_int32 sv) {
564 return _InterlockedCompareExchange_rel((volatile long *)p, sv, cv) == cv;
567 inline kmp_int32 __kmp_compare_and_store_acq64(volatile kmp_int64 *p,
568 kmp_int64 cv, kmp_int64 sv) {
569 return _InterlockedCompareExchange64_acq(p, sv, cv) == cv;
572 inline kmp_int32 __kmp_compare_and_store_rel64(volatile kmp_int64 *p,
573 kmp_int64 cv, kmp_int64 sv) {
574 return _InterlockedCompareExchange64_rel(p, sv, cv) == cv;
577 inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv,
578 void *sv) {
579 return _InterlockedCompareExchangePointer(p, sv, cv) == cv;
582 // The _RET versions return the value instead of a bool
584 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
585 _InterlockedCompareExchange8((p), (sv), (cv))
586 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
587 _InterlockedCompareExchange16((p), (sv), (cv))
589 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
590 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \
591 (kmp_int64)(cv))
594 #define KMP_XCHG_FIXED8(p, v) \
595 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
597 // #define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
598 // #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v)));
600 // inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) {
601 // kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64
602 // *)&v); return *(kmp_real64 *)&tmp;
603 // }
605 #else // !KMP_ARCH_AARCH64
607 // Routines that we still need to implement in assembly.
608 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
610 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
611 kmp_int8 sv);
612 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
613 kmp_int16 sv);
614 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
615 kmp_int32 sv);
616 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
617 kmp_int64 sv);
618 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
619 kmp_int8 sv);
620 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
621 kmp_int16 cv, kmp_int16 sv);
622 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
623 kmp_int32 cv, kmp_int32 sv);
624 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
625 kmp_int64 cv, kmp_int64 sv);
627 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
628 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
629 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
630 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
631 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
632 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
634 //#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
635 //#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
636 #define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
637 #define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
638 //#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
639 //#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
640 #define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
641 #define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
642 //#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
643 //#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
644 #define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
645 #define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
646 //#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
647 #define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
648 #define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
651 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
652 __kmp_compare_and_store8((p), (cv), (sv))
653 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
654 __kmp_compare_and_store8((p), (cv), (sv))
655 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
656 __kmp_compare_and_store16((p), (cv), (sv))
657 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
658 __kmp_compare_and_store16((p), (cv), (sv))
659 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
660 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
661 (kmp_int32)(sv))
662 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
663 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
664 (kmp_int32)(sv))
665 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
666 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
667 (kmp_int64)(sv))
668 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
669 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
670 (kmp_int64)(sv))
672 #if KMP_ARCH_X86
673 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
674 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
675 (kmp_int32)(sv))
676 #else /* 64 bit pointers */
677 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
678 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
679 (kmp_int64)(sv))
680 #endif /* KMP_ARCH_X86 */
682 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
683 __kmp_compare_and_store_ret8((p), (cv), (sv))
684 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
685 __kmp_compare_and_store_ret16((p), (cv), (sv))
686 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
687 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
688 (kmp_int64)(sv))
690 #define KMP_XCHG_FIXED8(p, v) \
691 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
692 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
693 //#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
694 //#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
695 //#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
696 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
697 #endif
699 #elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
701 /* cast p to correct type so that proper intrinsic will be used */
702 #define KMP_TEST_THEN_INC32(p) \
703 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
704 #define KMP_TEST_THEN_INC_ACQ32(p) \
705 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
706 #if KMP_ARCH_MIPS
707 #define KMP_TEST_THEN_INC64(p) \
708 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
709 #define KMP_TEST_THEN_INC_ACQ64(p) \
710 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
711 #else
712 #define KMP_TEST_THEN_INC64(p) \
713 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
714 #define KMP_TEST_THEN_INC_ACQ64(p) \
715 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
716 #endif
717 #define KMP_TEST_THEN_ADD4_32(p) \
718 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
719 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
720 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
721 #if KMP_ARCH_MIPS
722 #define KMP_TEST_THEN_ADD4_64(p) \
723 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
724 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
725 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
726 #define KMP_TEST_THEN_DEC64(p) \
727 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
728 #define KMP_TEST_THEN_DEC_ACQ64(p) \
729 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
730 #else
731 #define KMP_TEST_THEN_ADD4_64(p) \
732 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
733 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
734 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
735 #define KMP_TEST_THEN_DEC64(p) \
736 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
737 #define KMP_TEST_THEN_DEC_ACQ64(p) \
738 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
739 #endif
740 #define KMP_TEST_THEN_DEC32(p) \
741 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
742 #define KMP_TEST_THEN_DEC_ACQ32(p) \
743 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
744 #define KMP_TEST_THEN_ADD8(p, v) \
745 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
746 #define KMP_TEST_THEN_ADD32(p, v) \
747 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
748 #if KMP_ARCH_MIPS
749 #define KMP_TEST_THEN_ADD64(p, v) \
750 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
751 __ATOMIC_SEQ_CST)
752 #else
753 #define KMP_TEST_THEN_ADD64(p, v) \
754 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
755 #endif
757 #define KMP_TEST_THEN_OR8(p, v) \
758 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
759 #define KMP_TEST_THEN_AND8(p, v) \
760 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v))
761 #define KMP_TEST_THEN_OR32(p, v) \
762 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
763 #define KMP_TEST_THEN_AND32(p, v) \
764 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
765 #if KMP_ARCH_MIPS
766 #define KMP_TEST_THEN_OR64(p, v) \
767 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
768 __ATOMIC_SEQ_CST)
769 #define KMP_TEST_THEN_AND64(p, v) \
770 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
771 __ATOMIC_SEQ_CST)
772 #else
773 #define KMP_TEST_THEN_OR64(p, v) \
774 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
775 #define KMP_TEST_THEN_AND64(p, v) \
776 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
777 #endif
779 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
780 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
781 (kmp_uint8)(sv))
782 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
783 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
784 (kmp_uint8)(sv))
785 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
786 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
787 (kmp_uint16)(sv))
788 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
789 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
790 (kmp_uint16)(sv))
791 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
792 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
793 (kmp_uint32)(sv))
794 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
795 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
796 (kmp_uint32)(sv))
797 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
798 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
799 (void *)(sv))
801 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
802 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
803 (kmp_uint8)(sv))
804 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
805 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
806 (kmp_uint16)(sv))
807 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
808 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
809 (kmp_uint32)(sv))
810 #if KMP_ARCH_MIPS
811 static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64 *p,
812 kmp_uint64 cv,
813 kmp_uint64 sv) {
814 return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
815 __ATOMIC_SEQ_CST);
817 static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p,
818 kmp_uint64 cv,
819 kmp_uint64 sv) {
820 __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
821 __ATOMIC_SEQ_CST);
822 return cv;
824 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
825 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
826 (kmp_uint64)(cv), (kmp_uint64)(sv))
827 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
828 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
829 (kmp_uint64)(cv), (kmp_uint64)(sv))
830 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
831 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
832 (kmp_uint64)(sv))
833 #else
834 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
835 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
836 (kmp_uint64)(sv))
837 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
838 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
839 (kmp_uint64)(sv))
840 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
841 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
842 (kmp_uint64)(sv))
843 #endif
845 #define KMP_XCHG_FIXED8(p, v) \
846 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
847 #define KMP_XCHG_FIXED16(p, v) \
848 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
849 #define KMP_XCHG_FIXED32(p, v) \
850 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
851 #define KMP_XCHG_FIXED64(p, v) \
852 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
854 inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
855 kmp_int32 tmp =
856 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), *(kmp_uint32 *)&v);
857 return *(kmp_real32 *)&tmp;
860 inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {
861 kmp_int64 tmp =
862 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), *(kmp_uint64 *)&v);
863 return *(kmp_real64 *)&tmp;
866 #else
868 extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
869 extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
870 extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
871 extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
872 extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
873 extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
874 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
875 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
876 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
878 extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
879 kmp_int8 sv);
880 extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
881 kmp_int16 sv);
882 extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
883 kmp_int32 sv);
884 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
885 kmp_int64 sv);
886 extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
887 kmp_int8 sv);
888 extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
889 kmp_int16 cv, kmp_int16 sv);
890 extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
891 kmp_int32 cv, kmp_int32 sv);
892 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
893 kmp_int64 cv, kmp_int64 sv);
895 extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
896 extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
897 extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
898 extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
899 extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
900 extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
902 #define KMP_TEST_THEN_INC32(p) \
903 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
904 #define KMP_TEST_THEN_INC_ACQ32(p) \
905 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
906 #define KMP_TEST_THEN_INC64(p) \
907 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
908 #define KMP_TEST_THEN_INC_ACQ64(p) \
909 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
910 #define KMP_TEST_THEN_ADD4_32(p) \
911 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
912 #define KMP_TEST_THEN_ADD4_ACQ32(p) \
913 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
914 #define KMP_TEST_THEN_ADD4_64(p) \
915 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
916 #define KMP_TEST_THEN_ADD4_ACQ64(p) \
917 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
918 #define KMP_TEST_THEN_DEC32(p) \
919 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
920 #define KMP_TEST_THEN_DEC_ACQ32(p) \
921 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
922 #define KMP_TEST_THEN_DEC64(p) \
923 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
924 #define KMP_TEST_THEN_DEC_ACQ64(p) \
925 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
926 #define KMP_TEST_THEN_ADD8(p, v) \
927 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v))
928 #define KMP_TEST_THEN_ADD32(p, v) \
929 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v))
930 #define KMP_TEST_THEN_ADD64(p, v) \
931 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v))
933 #define KMP_TEST_THEN_OR8(p, v) \
934 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v))
935 #define KMP_TEST_THEN_AND8(p, v) \
936 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v))
937 #define KMP_TEST_THEN_OR32(p, v) \
938 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
939 #define KMP_TEST_THEN_AND32(p, v) \
940 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
941 #define KMP_TEST_THEN_OR64(p, v) \
942 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
943 #define KMP_TEST_THEN_AND64(p, v) \
944 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
946 #define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
947 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
948 (kmp_int8)(sv))
949 #define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
950 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
951 (kmp_int8)(sv))
952 #define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
953 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
954 (kmp_int16)(sv))
955 #define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
956 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
957 (kmp_int16)(sv))
958 #define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
959 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
960 (kmp_int32)(sv))
961 #define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
962 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
963 (kmp_int32)(sv))
964 #define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
965 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
966 (kmp_int64)(sv))
967 #define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
968 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
969 (kmp_int64)(sv))
971 #if KMP_ARCH_X86
972 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
973 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
974 (kmp_int32)(sv))
975 #else /* 64 bit pointers */
976 #define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
977 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
978 (kmp_int64)(sv))
979 #endif /* KMP_ARCH_X86 */
981 #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
982 __kmp_compare_and_store_ret8((p), (cv), (sv))
983 #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
984 __kmp_compare_and_store_ret16((p), (cv), (sv))
985 #define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
986 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
987 (kmp_int32)(sv))
988 #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
989 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
990 (kmp_int64)(sv))
992 #define KMP_XCHG_FIXED8(p, v) \
993 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
994 #define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
995 #define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
996 #define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
997 #define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
998 #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
1000 #endif /* KMP_ASM_INTRINS */
1002 /* ------------- relaxed consistency memory model stuff ------------------ */
1004 #if KMP_OS_WINDOWS
1005 #ifdef __ABSOFT_WIN
1006 #define KMP_MB() asm("nop")
1007 #define KMP_IMB() asm("nop")
1008 #else
1009 #define KMP_MB() /* _asm{ nop } */
1010 #define KMP_IMB() /* _asm{ nop } */
1011 #endif
1012 #endif /* KMP_OS_WINDOWS */
1014 #if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
1015 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64
1016 #if KMP_OS_WINDOWS
1017 #undef KMP_MB
1018 #define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
1019 #else /* !KMP_OS_WINDOWS */
1020 #define KMP_MB() __sync_synchronize()
1021 #endif
1022 #endif
1024 #ifndef KMP_MB
1025 #define KMP_MB() /* nothing to do */
1026 #endif
1028 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1029 #if KMP_COMPILER_ICC
1030 #define KMP_MFENCE_() _mm_mfence()
1031 #define KMP_SFENCE_() _mm_sfence()
1032 #elif KMP_COMPILER_MSVC
1033 #define KMP_MFENCE_() MemoryBarrier()
1034 #define KMP_SFENCE_() MemoryBarrier()
1035 #else
1036 #define KMP_MFENCE_() __sync_synchronize()
1037 #define KMP_SFENCE_() __sync_synchronize()
1038 #endif
1039 #define KMP_MFENCE() \
1040 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \
1041 __kmp_query_cpuid(&__kmp_cpuinfo); \
1043 if (__kmp_cpuinfo.sse2) { \
1044 KMP_MFENCE_(); \
1046 #define KMP_SFENCE() KMP_SFENCE_()
1047 #else
1048 #define KMP_MFENCE() KMP_MB()
1049 #define KMP_SFENCE() KMP_MB()
1050 #endif
1052 #ifndef KMP_IMB
1053 #define KMP_IMB() /* nothing to do */
1054 #endif
1056 #ifndef KMP_ST_REL32
1057 #define KMP_ST_REL32(A, D) (*(A) = (D))
1058 #endif
1060 #ifndef KMP_ST_REL64
1061 #define KMP_ST_REL64(A, D) (*(A) = (D))
1062 #endif
1064 #ifndef KMP_LD_ACQ32
1065 #define KMP_LD_ACQ32(A) (*(A))
1066 #endif
1068 #ifndef KMP_LD_ACQ64
1069 #define KMP_LD_ACQ64(A) (*(A))
1070 #endif
1072 /* ------------------------------------------------------------------------ */
1073 // FIXME - maybe this should this be
1075 // #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
1076 // #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b))
1078 // #define TCR_8(a) (*(volatile kmp_int64 *)(a))
1079 // #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b))
1081 // I'm fairly certain this is the correct thing to do, but I'm afraid
1082 // of performance regressions.
1084 #define TCR_1(a) (a)
1085 #define TCW_1(a, b) (a) = (b)
1086 #define TCR_4(a) (a)
1087 #define TCW_4(a, b) (a) = (b)
1088 #define TCI_4(a) (++(a))
1089 #define TCD_4(a) (--(a))
1090 #define TCR_8(a) (a)
1091 #define TCW_8(a, b) (a) = (b)
1092 #define TCI_8(a) (++(a))
1093 #define TCD_8(a) (--(a))
1094 #define TCR_SYNC_4(a) (a)
1095 #define TCW_SYNC_4(a, b) (a) = (b)
1096 #define TCX_SYNC_4(a, b, c) \
1097 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
1098 (kmp_int32)(b), (kmp_int32)(c))
1099 #define TCR_SYNC_8(a) (a)
1100 #define TCW_SYNC_8(a, b) (a) = (b)
1101 #define TCX_SYNC_8(a, b, c) \
1102 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
1103 (kmp_int64)(b), (kmp_int64)(c))
1105 #if KMP_ARCH_X86 || KMP_ARCH_MIPS
1106 // What about ARM?
1107 #define TCR_PTR(a) ((void *)TCR_4(a))
1108 #define TCW_PTR(a, b) TCW_4((a), (b))
1109 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
1110 #define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
1111 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
1113 #else /* 64 bit pointers */
1115 #define TCR_PTR(a) ((void *)TCR_8(a))
1116 #define TCW_PTR(a, b) TCW_8((a), (b))
1117 #define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
1118 #define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
1119 #define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
1121 #endif /* KMP_ARCH_X86 */
1123 /* If these FTN_{TRUE,FALSE} values change, may need to change several places
1124 where they are used to check that language is Fortran, not C. */
1126 #ifndef FTN_TRUE
1127 #define FTN_TRUE TRUE
1128 #endif
1130 #ifndef FTN_FALSE
1131 #define FTN_FALSE FALSE
1132 #endif
1134 typedef void (*microtask_t)(int *gtid, int *npr, ...);
1136 #ifdef USE_VOLATILE_CAST
1137 #define VOLATILE_CAST(x) (volatile x)
1138 #else
1139 #define VOLATILE_CAST(x) (x)
1140 #endif
1142 #define KMP_WAIT __kmp_wait_4
1143 #define KMP_WAIT_PTR __kmp_wait_4_ptr
1144 #define KMP_EQ __kmp_eq_4
1145 #define KMP_NEQ __kmp_neq_4
1146 #define KMP_LT __kmp_lt_4
1147 #define KMP_GE __kmp_ge_4
1148 #define KMP_LE __kmp_le_4
1150 /* Workaround for Intel(R) 64 code gen bug when taking address of static array
1151 * (Intel(R) 64 Tracker #138) */
1152 #if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
1153 #define STATIC_EFI2_WORKAROUND
1154 #else
1155 #define STATIC_EFI2_WORKAROUND static
1156 #endif
1158 // Support of BGET usage
1159 #ifndef KMP_USE_BGET
1160 #define KMP_USE_BGET 1
1161 #endif
1163 // Switches for OSS builds
1164 #ifndef USE_CMPXCHG_FIX
1165 #define USE_CMPXCHG_FIX 1
1166 #endif
1168 // Enable dynamic user lock
1169 #define KMP_USE_DYNAMIC_LOCK 1
1171 // Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
1172 // dynamic user lock is turned on
1173 #if KMP_USE_DYNAMIC_LOCK
1174 // Visual studio can't handle the asm sections in this code
1175 #define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
1176 #ifdef KMP_USE_ADAPTIVE_LOCKS
1177 #undef KMP_USE_ADAPTIVE_LOCKS
1178 #endif
1179 #define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
1180 #endif
1182 // Enable tick time conversion of ticks to seconds
1183 #if KMP_STATS_ENABLED
1184 #define KMP_HAVE_TICK_TIME \
1185 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
1186 #endif
1188 // Warning levels
1189 enum kmp_warnings_level {
1190 kmp_warnings_off = 0, /* No warnings */
1191 kmp_warnings_low, /* Minimal warnings (default) */
1192 kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */
1193 kmp_warnings_verbose /* reserved */
1196 #ifdef __cplusplus
1197 } // extern "C"
1198 #endif // __cplusplus
1200 // Safe C API
1201 #include "kmp_safe_c_api.h"
1203 // Macros for C++11 atomic functions
1204 #define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order)
1205 #define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order)
1207 // For non-default load/store
1208 #define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire)
1209 #define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed)
1210 #define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release)
1211 #define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed)
1213 // For non-default fetch_<op>
1214 #define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel)
1215 #define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel)
1216 #define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel)
1217 #define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel)
1218 #define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel)
1219 #define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel)
1220 #define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed)
1221 #define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed)
1223 // Callers of the following functions cannot see the side effect on "expected".
1224 template <typename T>
1225 bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) {
1226 return p->compare_exchange_strong(
1227 expected, desired, std::memory_order_acq_rel, std::memory_order_relaxed);
1230 template <typename T>
1231 bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) {
1232 return p->compare_exchange_strong(
1233 expected, desired, std::memory_order_acquire, std::memory_order_relaxed);
1236 template <typename T>
1237 bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) {
1238 return p->compare_exchange_strong(
1239 expected, desired, std::memory_order_release, std::memory_order_relaxed);
1242 // Symbol lookup on Linux/Windows
1243 #if KMP_OS_WINDOWS
1244 extern void *__kmp_lookup_symbol(const char *name);
1245 #define KMP_DLSYM(name) __kmp_lookup_symbol(name)
1246 #define KMP_DLSYM_NEXT(name) nullptr
1247 #else
1248 #define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name)
1249 #define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name)
1250 #endif
1252 #endif /* KMP_OS_H */