[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / CodeGen / ms-intrinsics-other.c
blobfa8422e5bf19fb1f2ec653b564e3ca3cf9338c53
1 // RUN: %clang_cc1 -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
2 // RUN: -triple x86_64--darwin -Oz -emit-llvm %s -o - \
3 // RUN: | FileCheck %s
4 // RUN: %clang_cc1 -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
5 // RUN: -triple x86_64--linux -Oz -emit-llvm %s -o - \
6 // RUN: | FileCheck %s
7 // RUN: %clang_cc1 -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
8 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
9 // RUN: | FileCheck %s --check-prefix=CHECK-ARM-ARM64
10 // RUN: %clang_cc1 -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
11 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
12 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
13 // RUN: %clang_cc1 -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
14 // RUN: -triple armv7--darwin -Oz -emit-llvm %s -o - \
15 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
17 // RUN: %clang_cc1 -x c++ -std=c++11 \
18 // RUN: -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
19 // RUN: -triple x86_64--darwin -Oz -emit-llvm %s -o - \
20 // RUN: | FileCheck %s
21 // RUN: %clang_cc1 -x c++ -std=c++11 \
22 // RUN: -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
23 // RUN: -triple x86_64--linux -Oz -emit-llvm %s -o - \
24 // RUN: | FileCheck %s
25 // RUN: %clang_cc1 -x c++ -std=c++11 \
26 // RUN: -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
27 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
28 // RUN: | FileCheck %s --check-prefix=CHECK-ARM-ARM64
29 // RUN: %clang_cc1 -x c++ -std=c++11 \
30 // RUN: -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
31 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
32 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
33 // RUN: %clang_cc1 -x c++ -std=c++11 \
34 // RUN: -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
35 // RUN: -triple armv7--darwin -Oz -emit-llvm %s -o - \
36 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
38 // LP64 targets use 'long' as 'int' for MS intrinsics (-fms-extensions)
39 #ifdef __LP64__
40 #define LONG int
41 #else
42 #define LONG long
43 #endif
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
49 unsigned char test_BitScanForward(unsigned LONG *Index, unsigned LONG Mask) {
50 return _BitScanForward(Index, Mask);
52 // CHECK: define{{.*}}i8 @test_BitScanForward(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
53 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
54 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
55 // CHECK: [[END_LABEL]]:
56 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
57 // CHECK: ret i8 [[RESULT]]
58 // CHECK: [[ISNOTZERO_LABEL]]:
59 // CHECK: [[INDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 %Mask, i1 true)
60 // CHECK: store i32 [[INDEX]], ptr %Index, align 4
61 // CHECK: br label %[[END_LABEL]]
63 unsigned char test_BitScanReverse(unsigned LONG *Index, unsigned LONG Mask) {
64 return _BitScanReverse(Index, Mask);
66 // CHECK: define{{.*}}i8 @test_BitScanReverse(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
67 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
68 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
69 // CHECK: [[END_LABEL]]:
70 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
71 // CHECK: ret i8 [[RESULT]]
72 // CHECK: [[ISNOTZERO_LABEL]]:
73 // CHECK: [[REVINDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
74 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
75 // CHECK: store i32 [[INDEX]], ptr %Index, align 4
76 // CHECK: br label %[[END_LABEL]]
78 #if defined(__x86_64__)
79 unsigned char test_BitScanForward64(unsigned LONG *Index, unsigned __int64 Mask) {
80 return _BitScanForward64(Index, Mask);
82 // CHECK: define{{.*}}i8 @test_BitScanForward64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
83 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
84 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
85 // CHECK: [[END_LABEL]]:
86 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
87 // CHECK: ret i8 [[RESULT]]
88 // CHECK: [[ISNOTZERO_LABEL]]:
89 // CHECK: [[INDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.cttz.i64(i64 %Mask, i1 true)
90 // CHECK: [[TRUNC_INDEX:%[0-9]+]] = trunc nuw nsw i64 [[INDEX]] to i32
91 // CHECK: store i32 [[TRUNC_INDEX]], ptr %Index, align 4
92 // CHECK: br label %[[END_LABEL]]
94 unsigned char test_BitScanReverse64(unsigned LONG *Index, unsigned __int64 Mask) {
95 return _BitScanReverse64(Index, Mask);
97 // CHECK: define{{.*}}i8 @test_BitScanReverse64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
98 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
99 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
100 // CHECK: [[END_LABEL]]:
101 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
102 // CHECK: ret i8 [[RESULT]]
103 // CHECK: [[ISNOTZERO_LABEL]]:
104 // CHECK: [[REVINDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
105 // CHECK: [[TRUNC_REVINDEX:%[0-9]+]] = trunc nuw nsw i64 [[REVINDEX]] to i32
106 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
107 // CHECK: store i32 [[INDEX]], ptr %Index, align 4
108 // CHECK: br label %[[END_LABEL]]
109 #endif
111 LONG test_InterlockedExchange(LONG volatile *value, LONG mask) {
112 return _InterlockedExchange(value, mask);
114 // CHECK: define{{.*}}i32 @test_InterlockedExchange(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
115 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask seq_cst, align 4
116 // CHECK: ret i32 [[RESULT:%[0-9]+]]
117 // CHECK: }
119 LONG test_InterlockedExchangeAdd(LONG volatile *value, LONG mask) {
120 return _InterlockedExchangeAdd(value, mask);
122 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
123 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask seq_cst, align 4
124 // CHECK: ret i32 [[RESULT:%[0-9]+]]
125 // CHECK: }
127 LONG test_InterlockedExchangeSub(LONG volatile *value, LONG mask) {
128 return _InterlockedExchangeSub(value, mask);
130 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
131 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i32 %mask seq_cst, align 4
132 // CHECK: ret i32 [[RESULT:%[0-9]+]]
133 // CHECK: }
135 LONG test_InterlockedOr(LONG volatile *value, LONG mask) {
136 return _InterlockedOr(value, mask);
138 // CHECK: define{{.*}}i32 @test_InterlockedOr(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
139 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask seq_cst, align 4
140 // CHECK: ret i32 [[RESULT:%[0-9]+]]
141 // CHECK: }
143 LONG test_InterlockedXor(LONG volatile *value, LONG mask) {
144 return _InterlockedXor(value, mask);
146 // CHECK: define{{.*}}i32 @test_InterlockedXor(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
147 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask seq_cst, align 4
148 // CHECK: ret i32 [[RESULT:%[0-9]+]]
149 // CHECK: }
151 LONG test_InterlockedAnd(LONG volatile *value, LONG mask) {
152 return _InterlockedAnd(value, mask);
154 // CHECK: define{{.*}}i32 @test_InterlockedAnd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
155 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask seq_cst, align 4
156 // CHECK: ret i32 [[RESULT:%[0-9]+]]
157 // CHECK: }
159 LONG test_InterlockedCompareExchange(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
160 return _InterlockedCompareExchange(Destination, Exchange, Comperand);
162 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
163 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
164 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
165 // CHECK: ret i32 [[RESULT]]
166 // CHECK: }
168 LONG test_InterlockedIncrement(LONG volatile *Addend) {
169 return _InterlockedIncrement(Addend);
171 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
172 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 seq_cst, align 4
173 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
174 // CHECK: ret i32 [[RESULT]]
175 // CHECK: }
177 LONG test_InterlockedDecrement(LONG volatile *Addend) {
178 return _InterlockedDecrement(Addend);
180 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
181 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 seq_cst, align 4
182 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
183 // CHECK: ret i32 [[RESULT]]
184 // CHECK: }
186 unsigned short test__lzcnt16(unsigned short x) {
187 return __lzcnt16(x);
189 // CHECK: i16 @test__lzcnt16
190 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i16 0, 17) i16 @llvm.ctlz.i16(i16 %x, i1 false)
191 // CHECK: ret i16 [[RESULT]]
192 // CHECK: }
194 unsigned int test__lzcnt(unsigned int x) {
195 return __lzcnt(x);
197 // CHECK: i32 @test__lzcnt
198 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %x, i1 false)
199 // CHECK: ret i32 [[RESULT]]
200 // CHECK: }
202 unsigned __int64 test__lzcnt64(unsigned __int64 x) {
203 return __lzcnt64(x);
205 // CHECK: i64 @test__lzcnt64
206 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %x, i1 false)
207 // CHECK: ret i64 [[RESULT]]
208 // CHECK: }
210 unsigned short test__popcnt16(unsigned short x) {
211 return __popcnt16(x);
213 // CHECK: i16 @test__popcnt16
214 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i16 0, 17) i16 @llvm.ctpop.i16(i16 %x)
215 // CHECK: ret i16 [[RESULT]]
216 // CHECK: }
218 unsigned int test__popcnt(unsigned int x) {
219 return __popcnt(x);
221 // CHECK: i32 @test__popcnt
222 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.ctpop.i32(i32 %x)
223 // CHECK: ret i32 [[RESULT]]
224 // CHECK: }
226 unsigned __int64 test__popcnt64(unsigned __int64 x) {
227 return __popcnt64(x);
229 // CHECK: i64 @test__popcnt64
230 // CHECK: [[RESULT:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.ctpop.i64(i64 %x)
231 // CHECK: ret i64 [[RESULT]]
232 // CHECK: }
234 #if defined(__aarch64__)
235 LONG test_InterlockedAdd(LONG volatile *Addend, LONG Value) {
236 return _InterlockedAdd(Addend, Value);
239 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAdd(ptr{{[a-z_ ]*}}%Addend, i32 noundef %Value) {{.*}} {
240 // CHECK-ARM-ARM64: %[[OLDVAL:[0-9]+]] = atomicrmw add ptr %Addend, i32 %Value seq_cst, align 4
241 // CHECK-ARM-ARM64: %[[NEWVAL:[0-9]+]] = add i32 %[[OLDVAL:[0-9]+]], %Value
242 // CHECK-ARM-ARM64: ret i32 %[[NEWVAL:[0-9]+]]
244 __int64 test_InterlockedAdd64(__int64 volatile *Addend, __int64 Value) {
245 return _InterlockedAdd64(Addend, Value);
248 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAdd64(ptr{{[a-z_ ]*}}%Addend, i64 noundef %Value) {{.*}} {
249 // CHECK-ARM-ARM64: %[[OLDVAL:[0-9]+]] = atomicrmw add ptr %Addend, i64 %Value seq_cst, align 8
250 // CHECK-ARM-ARM64: %[[NEWVAL:[0-9]+]] = add i64 %[[OLDVAL:[0-9]+]], %Value
251 // CHECK-ARM-ARM64: ret i64 %[[NEWVAL:[0-9]+]]
252 #endif
254 #if defined(__arm__) || defined(__aarch64__)
255 LONG test_InterlockedExchangeAdd_acq(LONG volatile *value, LONG mask) {
256 return _InterlockedExchangeAdd_acq(value, mask);
258 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
259 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask acquire, align 4
260 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
261 // CHECK-ARM: }
262 LONG test_InterlockedExchangeAdd_rel(LONG volatile *value, LONG mask) {
263 return _InterlockedExchangeAdd_rel(value, mask);
265 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
266 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask release, align 4
267 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
268 // CHECK-ARM: }
269 LONG test_InterlockedExchangeAdd_nf(LONG volatile *value, LONG mask) {
270 return _InterlockedExchangeAdd_nf(value, mask);
272 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
273 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask monotonic, align 4
274 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
275 // CHECK-ARM: }
277 LONG test_InterlockedExchange_acq(LONG volatile *value, LONG mask) {
278 return _InterlockedExchange_acq(value, mask);
280 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
281 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask acquire, align 4
282 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
283 // CHECK-ARM: }
284 LONG test_InterlockedExchange_rel(LONG volatile *value, LONG mask) {
285 return _InterlockedExchange_rel(value, mask);
287 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
288 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask release, align 4
289 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
290 // CHECK-ARM: }
291 LONG test_InterlockedExchange_nf(LONG volatile *value, LONG mask) {
292 return _InterlockedExchange_nf(value, mask);
294 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
295 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask monotonic, align 4
296 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
297 // CHECK-ARM: }
299 LONG test_InterlockedCompareExchange_acq(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
300 return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
302 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_acq(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
303 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
304 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
305 // CHECK-ARM: ret i32 [[RESULT]]
306 // CHECK-ARM: }
308 LONG test_InterlockedCompareExchange_rel(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
309 return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
311 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_rel(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
312 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
313 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
314 // CHECK-ARM: ret i32 [[RESULT]]
315 // CHECK-ARM: }
317 LONG test_InterlockedCompareExchange_nf(LONG volatile *Destination, LONG Exchange, LONG Comperand) {
318 return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
320 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_nf(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
321 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
322 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
323 // CHECK-ARM: ret i32 [[RESULT]]
324 // CHECK-ARM: }
326 LONG test_InterlockedOr_acq(LONG volatile *value, LONG mask) {
327 return _InterlockedOr_acq(value, mask);
329 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
330 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask acquire, align 4
331 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
332 // CHECK-ARM: }
334 LONG test_InterlockedOr_rel(LONG volatile *value, LONG mask) {
335 return _InterlockedOr_rel(value, mask);
337 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
338 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask release, align 4
339 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
340 // CHECK-ARM: }
342 LONG test_InterlockedOr_nf(LONG volatile *value, LONG mask) {
343 return _InterlockedOr_nf(value, mask);
345 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
346 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask monotonic, align 4
347 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
348 // CHECK-ARM: }
350 LONG test_InterlockedXor_acq(LONG volatile *value, LONG mask) {
351 return _InterlockedXor_acq(value, mask);
353 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
354 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask acquire, align 4
355 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
356 // CHECK-ARM: }
358 LONG test_InterlockedXor_rel(LONG volatile *value, LONG mask) {
359 return _InterlockedXor_rel(value, mask);
361 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
362 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask release, align 4
363 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
364 // CHECK-ARM: }
366 LONG test_InterlockedXor_nf(LONG volatile *value, LONG mask) {
367 return _InterlockedXor_nf(value, mask);
369 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
370 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask monotonic, align 4
371 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
372 // CHECK-ARM: }
374 LONG test_InterlockedAnd_acq(LONG volatile *value, LONG mask) {
375 return _InterlockedAnd_acq(value, mask);
377 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
378 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask acquire, align 4
379 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
380 // CHECK-ARM: }
382 LONG test_InterlockedAnd_rel(LONG volatile *value, LONG mask) {
383 return _InterlockedAnd_rel(value, mask);
385 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
386 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask release, align 4
387 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
388 // CHECK-ARM: }
390 LONG test_InterlockedAnd_nf(LONG volatile *value, LONG mask) {
391 return _InterlockedAnd_nf(value, mask);
393 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
394 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask monotonic, align 4
395 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
396 // CHECK-ARM: }
399 LONG test_InterlockedIncrement_acq(LONG volatile *Addend) {
400 return _InterlockedIncrement_acq(Addend);
402 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
403 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 acquire, align 4
404 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
405 // CHECK-ARM: ret i32 [[RESULT]]
406 // CHECK-ARM: }
408 LONG test_InterlockedIncrement_rel(LONG volatile *Addend) {
409 return _InterlockedIncrement_rel(Addend);
411 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
412 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 release, align 4
413 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
414 // CHECK-ARM: ret i32 [[RESULT]]
415 // CHECK-ARM: }
417 LONG test_InterlockedIncrement_nf(LONG volatile *Addend) {
418 return _InterlockedIncrement_nf(Addend);
420 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
421 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 monotonic, align 4
422 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
423 // CHECK-ARM: ret i32 [[RESULT]]
424 // CHECK-ARM: }
426 LONG test_InterlockedDecrement_acq(LONG volatile *Addend) {
427 return _InterlockedDecrement_acq(Addend);
429 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
430 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 acquire, align 4
431 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
432 // CHECK-ARM: ret i32 [[RESULT]]
433 // CHECK-ARM: }
435 LONG test_InterlockedDecrement_rel(LONG volatile *Addend) {
436 return _InterlockedDecrement_rel(Addend);
438 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
439 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 release, align 4
440 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
441 // CHECK-ARM: ret i32 [[RESULT]]
442 // CHECK-ARM: }
444 LONG test_InterlockedDecrement_nf(LONG volatile *Addend) {
445 return _InterlockedDecrement_nf(Addend);
447 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
448 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 monotonic, align 4
449 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
450 // CHECK-ARM: ret i32 [[RESULT]]
451 // CHECK-ARM: }
452 #endif
454 #ifdef __cplusplus
456 #endif
459 // Test constexpr handling.
460 #if defined(__cplusplus) && (__cplusplus >= 201103L)
462 char popcnt16_0[__popcnt16(0x0000) == 0 ? 1 : -1];
463 char popcnt16_1[__popcnt16(0x10F0) == 5 ? 1 : -1];
465 char popcnt_0[__popcnt(0x00000000) == 0 ? 1 : -1];
466 char popcnt_1[__popcnt(0x100000F0) == 5 ? 1 : -1];
468 char popcnt64_0[__popcnt64(0x0000000000000000ULL) == 0 ? 1 : -1];
469 char popcnt64_1[__popcnt64(0xF00000F000000001ULL) == 9 ? 1 : -1];
471 #define BITSIZE(x) (sizeof(x) * 8)
472 char lzcnt16_0[__lzcnt16(1) == BITSIZE(short) - 1 ? 1 : -1];
473 char lzcnt16_1[__lzcnt16(1 << (BITSIZE(short) - 1)) == 0 ? 1 : -1];
474 char lzcnt16_2[__lzcnt16(0) == BITSIZE(short) ? 1 : -1];
476 char lzcnt_0[__lzcnt(1) == BITSIZE(int) - 1 ? 1 : -1];
477 char lzcnt_1[__lzcnt(1 << (BITSIZE(int) - 1)) == 0 ? 1 : -1];
478 char lzcnt_2[__lzcnt(0) == BITSIZE(int) ? 1 : -1];
480 char lzcnt64_0[__lzcnt64(1ULL) == BITSIZE(__int64) - 1 ? 1 : -1];
481 char lzcnt64_1[__lzcnt64(1ULL << (BITSIZE(__int64) - 1)) == 0 ? 1 : -1];
482 char lzcnt64_2[__lzcnt64(0ULL) == BITSIZE(__int64) ? 1 : -1];
483 #undef BITSIZE
485 #endif