[flang] Fix crash in HLFIR generation (#118399)
[llvm-project.git] / openmp / runtime / src / z_Windows_NT-586_util.cpp
blob37759feafd4530c08621315d94fc3b1a53f330a5
1 /*
2 * z_Windows_NT-586_util.cpp -- platform specific routines.
3 */
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 #include "kmp.h"
15 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM)
16 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
17 use compare_and_store for these routines */
19 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
20 kmp_int8 old_value, new_value;
22 old_value = TCR_1(*p);
23 new_value = old_value | d;
25 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
26 KMP_CPU_PAUSE();
27 old_value = TCR_1(*p);
28 new_value = old_value | d;
30 return old_value;
33 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
34 kmp_int8 old_value, new_value;
36 old_value = TCR_1(*p);
37 new_value = old_value & d;
39 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
40 KMP_CPU_PAUSE();
41 old_value = TCR_1(*p);
42 new_value = old_value & d;
44 return old_value;
47 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
48 kmp_uint32 old_value, new_value;
50 old_value = TCR_4(*p);
51 new_value = old_value | d;
53 while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value,
54 new_value)) {
55 KMP_CPU_PAUSE();
56 old_value = TCR_4(*p);
57 new_value = old_value | d;
59 return old_value;
62 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
63 kmp_uint32 old_value, new_value;
65 old_value = TCR_4(*p);
66 new_value = old_value & d;
68 while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value,
69 new_value)) {
70 KMP_CPU_PAUSE();
71 old_value = TCR_4(*p);
72 new_value = old_value & d;
74 return old_value;
77 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
78 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
79 kmp_int64 old_value, new_value;
81 old_value = TCR_1(*p);
82 new_value = old_value + d;
83 while (!__kmp_compare_and_store8(p, old_value, new_value)) {
84 KMP_CPU_PAUSE();
85 old_value = TCR_1(*p);
86 new_value = old_value + d;
88 return old_value;
91 #if KMP_ARCH_X86
92 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
93 kmp_int64 old_value, new_value;
95 old_value = TCR_8(*p);
96 new_value = old_value + d;
97 while (!__kmp_compare_and_store64(p, old_value, new_value)) {
98 KMP_CPU_PAUSE();
99 old_value = TCR_8(*p);
100 new_value = old_value + d;
102 return old_value;
104 #endif /* KMP_ARCH_X86 */
105 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
107 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
108 kmp_uint64 old_value, new_value;
110 old_value = TCR_8(*p);
111 new_value = old_value | d;
112 while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value,
113 new_value)) {
114 KMP_CPU_PAUSE();
115 old_value = TCR_8(*p);
116 new_value = old_value | d;
119 return old_value;
122 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
123 kmp_uint64 old_value, new_value;
125 old_value = TCR_8(*p);
126 new_value = old_value & d;
127 while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value,
128 new_value)) {
129 KMP_CPU_PAUSE();
130 old_value = TCR_8(*p);
131 new_value = old_value & d;
134 return old_value;
137 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC
138 // For !KMP_COMPILER_MSVC, this function is provided in assembly form
139 // by z_Linux_asm.S.
140 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
141 void *p_argv[]
142 #if OMPT_SUPPORT
144 void **exit_frame_ptr
145 #endif
147 #if OMPT_SUPPORT
148 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
149 #endif
151 switch (argc) {
152 case 0:
153 (*pkfn)(&gtid, &tid);
154 break;
155 case 1:
156 (*pkfn)(&gtid, &tid, p_argv[0]);
157 break;
158 case 2:
159 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
160 break;
161 case 3:
162 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
163 break;
164 case 4:
165 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
166 break;
167 case 5:
168 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
169 break;
170 default: {
171 // p_argv[6] and onwards must be passed on the stack since 8 registers are
172 // already used.
173 size_t len = (argc - 6) * sizeof(void *);
174 void *argbuf = alloca(len);
175 memcpy(argbuf, &p_argv[6], len);
177 [[fallthrough]];
178 case 6:
179 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
180 p_argv[5]);
181 break;
184 #if OMPT_SUPPORT
185 *exit_frame_ptr = 0;
186 #endif
188 return 1;
190 #endif
192 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM */