1 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
2 // RUN: -triple x86_64--darwin -Oz -emit-llvm %s -o - \
4 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
5 // RUN: -triple x86_64--linux -Oz -emit-llvm %s -o - \
7 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
8 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
9 // RUN: | FileCheck %s --check-prefix=CHECK-ARM-ARM64
10 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
11 // RUN: -triple aarch64--darwin -Oz -emit-llvm %s -o - \
12 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
13 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -Wno-implicit-function-declaration \
14 // RUN: -triple armv7--darwin -Oz -emit-llvm %s -o - \
15 // RUN: | FileCheck %s --check-prefix=CHECK-ARM
17 // LP64 targets use 'long' as 'int' for MS intrinsics (-fms-extensions)
24 unsigned char test_BitScanForward(unsigned LONG
*Index
, unsigned LONG Mask
) {
25 return _BitScanForward(Index
, Mask
);
27 // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
28 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
29 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
30 // CHECK: [[END_LABEL]]:
31 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
32 // CHECK: ret i8 [[RESULT]]
33 // CHECK: [[ISNOTZERO_LABEL]]:
34 // CHECK: [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true)
35 // CHECK: store i32 [[INDEX]], i32* %Index, align 4
36 // CHECK: br label %[[END_LABEL]]
38 unsigned char test_BitScanReverse(unsigned LONG
*Index
, unsigned LONG Mask
) {
39 return _BitScanReverse(Index
, Mask
);
41 // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
42 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
43 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
44 // CHECK: [[END_LABEL]]:
45 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
46 // CHECK: ret i8 [[RESULT]]
47 // CHECK: [[ISNOTZERO_LABEL]]:
48 // CHECK: [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
49 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
50 // CHECK: store i32 [[INDEX]], i32* %Index, align 4
51 // CHECK: br label %[[END_LABEL]]
53 #if defined(__x86_64__)
54 unsigned char test_BitScanForward64(unsigned LONG
*Index
, unsigned __int64 Mask
) {
55 return _BitScanForward64(Index
, Mask
);
57 // CHECK: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
58 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
59 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
60 // CHECK: [[END_LABEL]]:
61 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
62 // CHECK: ret i8 [[RESULT]]
63 // CHECK: [[ISNOTZERO_LABEL]]:
64 // CHECK: [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
65 // CHECK: [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
66 // CHECK: store i32 [[TRUNC_INDEX]], i32* %Index, align 4
67 // CHECK: br label %[[END_LABEL]]
69 unsigned char test_BitScanReverse64(unsigned LONG
*Index
, unsigned __int64 Mask
) {
70 return _BitScanReverse64(Index
, Mask
);
72 // CHECK: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
73 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
74 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
75 // CHECK: [[END_LABEL]]:
76 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
77 // CHECK: ret i8 [[RESULT]]
78 // CHECK: [[ISNOTZERO_LABEL]]:
79 // CHECK: [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
80 // CHECK: [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
81 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
82 // CHECK: store i32 [[INDEX]], i32* %Index, align 4
83 // CHECK: br label %[[END_LABEL]]
86 LONG
test_InterlockedExchange(LONG
volatile *value
, LONG mask
) {
87 return _InterlockedExchange(value
, mask
);
89 // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
90 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst, align 4
91 // CHECK: ret i32 [[RESULT:%[0-9]+]]
94 LONG
test_InterlockedExchangeAdd(LONG
volatile *value
, LONG mask
) {
95 return _InterlockedExchangeAdd(value
, mask
);
97 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
98 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst, align 4
99 // CHECK: ret i32 [[RESULT:%[0-9]+]]
102 LONG
test_InterlockedExchangeSub(LONG
volatile *value
, LONG mask
) {
103 return _InterlockedExchangeSub(value
, mask
);
105 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
106 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst, align 4
107 // CHECK: ret i32 [[RESULT:%[0-9]+]]
110 LONG
test_InterlockedOr(LONG
volatile *value
, LONG mask
) {
111 return _InterlockedOr(value
, mask
);
113 // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
114 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst, align 4
115 // CHECK: ret i32 [[RESULT:%[0-9]+]]
118 LONG
test_InterlockedXor(LONG
volatile *value
, LONG mask
) {
119 return _InterlockedXor(value
, mask
);
121 // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
122 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst, align 4
123 // CHECK: ret i32 [[RESULT:%[0-9]+]]
126 LONG
test_InterlockedAnd(LONG
volatile *value
, LONG mask
) {
127 return _InterlockedAnd(value
, mask
);
129 // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
130 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst, align 4
131 // CHECK: ret i32 [[RESULT:%[0-9]+]]
134 LONG
test_InterlockedCompareExchange(LONG
volatile *Destination
, LONG Exchange
, LONG Comperand
) {
135 return _InterlockedCompareExchange(Destination
, Exchange
, Comperand
);
137 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
138 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
139 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
140 // CHECK: ret i32 [[RESULT]]
143 LONG
test_InterlockedIncrement(LONG
volatile *Addend
) {
144 return _InterlockedIncrement(Addend
);
146 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
147 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 seq_cst, align 4
148 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
149 // CHECK: ret i32 [[RESULT]]
152 LONG
test_InterlockedDecrement(LONG
volatile *Addend
) {
153 return _InterlockedDecrement(Addend
);
155 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
156 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst, align 4
157 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
158 // CHECK: ret i32 [[RESULT]]
161 unsigned short test__lzcnt16(unsigned short x
) {
164 // CHECK: i16 @test__lzcnt16
165 // CHECK: [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctlz.i16(i16 %x, i1 false)
166 // CHECK: ret i16 [[RESULT]]
169 unsigned int test__lzcnt(unsigned int x
) {
172 // CHECK: i32 @test__lzcnt
173 // CHECK: [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
174 // CHECK: ret i32 [[RESULT]]
177 unsigned __int64
test__lzcnt64(unsigned __int64 x
) {
180 // CHECK: i64 @test__lzcnt64
181 // CHECK: [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %x, i1 false)
182 // CHECK: ret i64 [[RESULT]]
185 unsigned short test__popcnt16(unsigned short x
) {
186 return __popcnt16(x
);
188 // CHECK: i16 @test__popcnt16
189 // CHECK: [[RESULT:%[0-9]+]] = tail call i16 @llvm.ctpop.i16(i16 %x)
190 // CHECK: ret i16 [[RESULT]]
193 unsigned int test__popcnt(unsigned int x
) {
196 // CHECK: i32 @test__popcnt
197 // CHECK: [[RESULT:%[0-9]+]] = tail call i32 @llvm.ctpop.i32(i32 %x)
198 // CHECK: ret i32 [[RESULT]]
201 unsigned __int64
test__popcnt64(unsigned __int64 x
) {
202 return __popcnt64(x
);
204 // CHECK: i64 @test__popcnt64
205 // CHECK: [[RESULT:%[0-9]+]] = tail call i64 @llvm.ctpop.i64(i64 %x)
206 // CHECK: ret i64 [[RESULT]]
209 #if defined(__aarch64__)
210 LONG
test_InterlockedAdd(LONG
volatile *Addend
, LONG Value
) {
211 return _InterlockedAdd(Addend
, Value
);
214 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAdd(i32*{{[a-z_ ]*}}%Addend, i32 noundef %Value) {{.*}} {
215 // CHECK-ARM-ARM64: %[[OLDVAL:[0-9]+]] = atomicrmw add i32* %Addend, i32 %Value seq_cst, align 4
216 // CHECK-ARM-ARM64: %[[NEWVAL:[0-9]+]] = add i32 %[[OLDVAL:[0-9]+]], %Value
217 // CHECK-ARM-ARM64: ret i32 %[[NEWVAL:[0-9]+]]
220 #if defined(__arm__) || defined(__aarch64__)
221 LONG
test_InterlockedExchangeAdd_acq(LONG
volatile *value
, LONG mask
) {
222 return _InterlockedExchangeAdd_acq(value
, mask
);
224 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
225 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire, align 4
226 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
228 LONG
test_InterlockedExchangeAdd_rel(LONG
volatile *value
, LONG mask
) {
229 return _InterlockedExchangeAdd_rel(value
, mask
);
231 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
232 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release, align 4
233 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
235 LONG
test_InterlockedExchangeAdd_nf(LONG
volatile *value
, LONG mask
) {
236 return _InterlockedExchangeAdd_nf(value
, mask
);
238 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
239 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic, align 4
240 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
243 LONG
test_InterlockedExchange_acq(LONG
volatile *value
, LONG mask
) {
244 return _InterlockedExchange_acq(value
, mask
);
246 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
247 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire, align 4
248 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
250 LONG
test_InterlockedExchange_rel(LONG
volatile *value
, LONG mask
) {
251 return _InterlockedExchange_rel(value
, mask
);
253 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
254 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release, align 4
255 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
257 LONG
test_InterlockedExchange_nf(LONG
volatile *value
, LONG mask
) {
258 return _InterlockedExchange_nf(value
, mask
);
260 // CHECK-ARM: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
261 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic, align 4
262 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
265 LONG
test_InterlockedCompareExchange_acq(LONG
volatile *Destination
, LONG Exchange
, LONG Comperand
) {
266 return _InterlockedCompareExchange_acq(Destination
, Exchange
, Comperand
);
268 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
269 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
270 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
271 // CHECK-ARM: ret i32 [[RESULT]]
274 LONG
test_InterlockedCompareExchange_rel(LONG
volatile *Destination
, LONG Exchange
, LONG Comperand
) {
275 return _InterlockedCompareExchange_rel(Destination
, Exchange
, Comperand
);
277 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
278 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
279 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
280 // CHECK-ARM: ret i32 [[RESULT]]
283 LONG
test_InterlockedCompareExchange_nf(LONG
volatile *Destination
, LONG Exchange
, LONG Comperand
) {
284 return _InterlockedCompareExchange_nf(Destination
, Exchange
, Comperand
);
286 // CHECK-ARM: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
287 // CHECK-ARM: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
288 // CHECK-ARM: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
289 // CHECK-ARM: ret i32 [[RESULT]]
292 LONG
test_InterlockedOr_acq(LONG
volatile *value
, LONG mask
) {
293 return _InterlockedOr_acq(value
, mask
);
295 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
296 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire, align 4
297 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
300 LONG
test_InterlockedOr_rel(LONG
volatile *value
, LONG mask
) {
301 return _InterlockedOr_rel(value
, mask
);
303 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
304 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release, align 4
305 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
308 LONG
test_InterlockedOr_nf(LONG
volatile *value
, LONG mask
) {
309 return _InterlockedOr_nf(value
, mask
);
311 // CHECK-ARM: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
312 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic, align 4
313 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
316 LONG
test_InterlockedXor_acq(LONG
volatile *value
, LONG mask
) {
317 return _InterlockedXor_acq(value
, mask
);
319 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
320 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire, align 4
321 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
324 LONG
test_InterlockedXor_rel(LONG
volatile *value
, LONG mask
) {
325 return _InterlockedXor_rel(value
, mask
);
327 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
328 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release, align 4
329 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
332 LONG
test_InterlockedXor_nf(LONG
volatile *value
, LONG mask
) {
333 return _InterlockedXor_nf(value
, mask
);
335 // CHECK-ARM: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
336 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic, align 4
337 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
340 LONG
test_InterlockedAnd_acq(LONG
volatile *value
, LONG mask
) {
341 return _InterlockedAnd_acq(value
, mask
);
343 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
344 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire, align 4
345 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
348 LONG
test_InterlockedAnd_rel(LONG
volatile *value
, LONG mask
) {
349 return _InterlockedAnd_rel(value
, mask
);
351 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
352 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release, align 4
353 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
356 LONG
test_InterlockedAnd_nf(LONG
volatile *value
, LONG mask
) {
357 return _InterlockedAnd_nf(value
, mask
);
359 // CHECK-ARM: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
360 // CHECK-ARM: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic, align 4
361 // CHECK-ARM: ret i32 [[RESULT:%[0-9]+]]
365 LONG
test_InterlockedIncrement_acq(LONG
volatile *Addend
) {
366 return _InterlockedIncrement_acq(Addend
);
368 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
369 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire, align 4
370 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
371 // CHECK-ARM: ret i32 [[RESULT]]
374 LONG
test_InterlockedIncrement_rel(LONG
volatile *Addend
) {
375 return _InterlockedIncrement_rel(Addend
);
377 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
378 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release, align 4
379 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
380 // CHECK-ARM: ret i32 [[RESULT]]
383 LONG
test_InterlockedIncrement_nf(LONG
volatile *Addend
) {
384 return _InterlockedIncrement_nf(Addend
);
386 // CHECK-ARM: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
387 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic, align 4
388 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
389 // CHECK-ARM: ret i32 [[RESULT]]
392 LONG
test_InterlockedDecrement_acq(LONG
volatile *Addend
) {
393 return _InterlockedDecrement_acq(Addend
);
395 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
396 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire, align 4
397 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
398 // CHECK-ARM: ret i32 [[RESULT]]
401 LONG
test_InterlockedDecrement_rel(LONG
volatile *Addend
) {
402 return _InterlockedDecrement_rel(Addend
);
404 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
405 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release, align 4
406 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
407 // CHECK-ARM: ret i32 [[RESULT]]
410 LONG
test_InterlockedDecrement_nf(LONG
volatile *Addend
) {
411 return _InterlockedDecrement_nf(Addend
);
413 // CHECK-ARM: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
414 // CHECK-ARM: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic, align 4
415 // CHECK-ARM: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
416 // CHECK-ARM: ret i32 [[RESULT]]