1 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
2 // RUN: -triple i686--windows -Oz -emit-llvm %s -o - \
3 // RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL
4 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
5 // RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \
6 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64
7 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
8 // RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \
9 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL,CHECK-64
10 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
11 // RUN: -triple aarch64-windows -Oz -emit-llvm %s -o - \
12 // RUN: | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64,CHECK-64
14 // intrin.h needs size_t, but -ffreestanding prevents us from getting it from
15 // stddef.h. Work around it with this typedef.
16 typedef __SIZE_TYPE__
size_t;
20 #if defined(__i386__) || defined(__x86_64__)
21 void test__stosb(unsigned char *Dest
, unsigned char Data
, size_t Count
) {
22 return __stosb(Dest
, Data
, Count
);
25 // CHECK-I386: define{{.*}}void @test__stosb
26 // CHECK-I386: tail call void @llvm.memset.p0.i32(ptr align 1 %Dest, i8 %Data, i32 %Count, i1 true)
27 // CHECK-I386: ret void
30 // CHECK-X64: define{{.*}}void @test__stosb
31 // CHECK-X64: tail call void @llvm.memset.p0.i64(ptr align 1 %Dest, i8 %Data, i64 %Count, i1 true)
32 // CHECK-X64: ret void
35 void test__movsb(unsigned char *Dest
, unsigned char *Src
, size_t Count
) {
36 return __movsb(Dest
, Src
, Count
);
38 // CHECK-I386-LABEL: define{{.*}} void @test__movsb
39 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsb\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
40 // CHECK-I386: ret void
43 // CHECK-X64-LABEL: define{{.*}} void @test__movsb
44 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
45 // CHECK-X64: ret void
48 void test__stosw(unsigned short *Dest
, unsigned short Data
, size_t Count
) {
49 return __stosw(Dest
, Data
, Count
);
51 // CHECK-I386-LABEL: define{{.*}} void @test__stosw
52 // CHECK-I386: call { ptr, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i32 %Count)
53 // CHECK-I386: ret void
56 // CHECK-X64-LABEL: define{{.*}} void @test__stosw
57 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i64 %Count)
58 // CHECK-X64: ret void
61 void test__movsw(unsigned short *Dest
, unsigned short *Src
, size_t Count
) {
62 return __movsw(Dest
, Src
, Count
);
64 // CHECK-I386-LABEL: define{{.*}} void @test__movsw
65 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsw\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
66 // CHECK-I386: ret void
69 // CHECK-X64-LABEL: define{{.*}} void @test__movsw
70 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
71 // CHECK-X64: ret void
74 void test__stosd(unsigned long *Dest
, unsigned long Data
, size_t Count
) {
75 return __stosd(Dest
, Data
, Count
);
77 // CHECK-I386-LABEL: define{{.*}} void @test__stosd
78 // CHECK-I386: call { ptr, i32 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i32 %Count)
79 // CHECK-I386: ret void
82 // CHECK-X64-LABEL: define{{.*}} void @test__stosd
83 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i64 %Count)
84 // CHECK-X64: ret void
87 void test__movsd(unsigned long *Dest
, unsigned long *Src
, size_t Count
) {
88 return __movsd(Dest
, Src
, Count
);
90 // CHECK-I386-LABEL: define{{.*}} void @test__movsd
91 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movs$(l$|d$)\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count)
92 // CHECK-I386: ret void
95 // CHECK-X64-LABEL: define{{.*}} void @test__movsd
96 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movs$(l$|d$)", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
97 // CHECK-X64: ret void
101 void test__stosq(unsigned __int64
*Dest
, unsigned __int64 Data
, size_t Count
) {
102 return __stosq(Dest
, Data
, Count
);
104 // CHECK-X64-LABEL: define{{.*}} void @test__stosq
105 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, ptr %Dest, i64 %Count)
106 // CHECK-X64: ret void
109 void test__movsq(unsigned __int64
*Dest
, unsigned __int64
*Src
, size_t Count
) {
110 return __movsq(Dest
, Src
, Count
);
112 // CHECK-X64-LABEL: define{{.*}} void @test__movsq
113 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count)
114 // CHECK-X64: ret void
118 void test__ud2(void) {
121 // CHECK-INTEL-LABEL: define{{.*}} void @test__ud2()
122 // CHECK-INTEL: call void @llvm.trap()
124 void test__int2c(void) {
127 // CHECK-INTEL-LABEL: define{{.*}} void @test__int2c()
128 // CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]]
133 void *test_ReturnAddress(void) {
134 return _ReturnAddress();
136 // CHECK-LABEL: define{{.*}}ptr @test_ReturnAddress()
137 // CHECK: = tail call ptr @llvm.returnaddress(i32 0)
140 #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__)
141 void *test_AddressOfReturnAddress(void) {
142 return _AddressOfReturnAddress();
144 // CHECK-INTEL-LABEL: define dso_local ptr @test_AddressOfReturnAddress()
145 // CHECK-INTEL: = tail call ptr @llvm.addressofreturnaddress.p0()
146 // CHECK-INTEL: ret ptr
149 unsigned char test_BitScanForward(unsigned long *Index
, unsigned long Mask
) {
150 return _BitScanForward(++Index
, Mask
);
152 // CHECK: define{{.*}}i8 @test_BitScanForward(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
153 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
154 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
155 // CHECK: [[END_LABEL]]:
156 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
157 // CHECK: ret i8 [[RESULT]]
158 // CHECK: [[ISNOTZERO_LABEL]]:
159 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds nuw i8, ptr %Index, {{i64|i32}} 4
160 // CHECK: [[INDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 %Mask, i1 true)
161 // CHECK: store i32 [[INDEX]], ptr [[IDXGEP]], align 4
162 // CHECK: br label %[[END_LABEL]]
164 unsigned char test_BitScanReverse(unsigned long *Index
, unsigned long Mask
) {
165 return _BitScanReverse(++Index
, Mask
);
167 // CHECK: define{{.*}}i8 @test_BitScanReverse(ptr {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
168 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
169 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
170 // CHECK: [[END_LABEL]]:
171 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
172 // CHECK: ret i8 [[RESULT]]
173 // CHECK: [[ISNOTZERO_LABEL]]:
174 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds nuw i8, ptr %Index, {{i64|i32}} 4
175 // CHECK: [[REVINDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
176 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
177 // CHECK: store i32 [[INDEX]], ptr [[IDXGEP]], align 4
178 // CHECK: br label %[[END_LABEL]]
180 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
181 unsigned char test_BitScanForward64(unsigned long *Index
, unsigned __int64 Mask
) {
182 return _BitScanForward64(Index
, Mask
);
184 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
185 // CHECK-ARM-X64: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
186 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
187 // CHECK-ARM-X64: [[END_LABEL]]:
188 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
189 // CHECK-ARM-X64: ret i8 [[RESULT]]
190 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
191 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.cttz.i64(i64 %Mask, i1 true)
192 // CHECK-ARM-X64: [[TRUNC_INDEX:%[0-9]+]] = trunc nuw nsw i64 [[INDEX]] to i32
193 // CHECK-ARM-X64: store i32 [[TRUNC_INDEX]], ptr %Index, align 4
194 // CHECK-ARM-X64: br label %[[END_LABEL]]
196 unsigned char test_BitScanReverse64(unsigned long *Index
, unsigned __int64 Mask
) {
197 return _BitScanReverse64(Index
, Mask
);
199 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(ptr {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
200 // CHECK-ARM-X64: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
201 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
202 // CHECK-ARM-X64: [[END_LABEL]]:
203 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
204 // CHECK-ARM-X64: ret i8 [[RESULT]]
205 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
206 // CHECK-ARM-X64: [[REVINDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
207 // CHECK-ARM-X64: [[TRUNC_REVINDEX:%[0-9]+]] = trunc nuw nsw i64 [[REVINDEX]] to i32
208 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
209 // CHECK-ARM-X64: store i32 [[INDEX]], ptr %Index, align 4
210 // CHECK-ARM-X64: br label %[[END_LABEL]]
213 void *test_InterlockedExchangePointer(void * volatile *Target
, void *Value
) {
214 return _InterlockedExchangePointer(Target
, Value
);
217 // CHECK: define{{.*}}ptr @test_InterlockedExchangePointer(ptr {{[a-z_ ]*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{
218 // CHECK: %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR:i[0-9]+]]
219 // CHECK: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] seq_cst, align {{4|8}}
220 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr
221 // CHECK: ret ptr %[[RESULT]]
224 void *test_InterlockedCompareExchangePointer(void * volatile *Destination
,
225 void *Exchange
, void *Comparand
) {
226 return _InterlockedCompareExchangePointer(Destination
, Exchange
, Comparand
);
229 // CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer(ptr {{[a-z_ ]*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{
230 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]]
231 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]]
232 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst, align {{4|8}}
233 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
234 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr
235 // CHECK: ret ptr %[[RESULT:[0-9]+]]
238 void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination
,
239 void *Exchange
, void *Comparand
) {
240 return _InterlockedCompareExchangePointer_nf(Destination
, Exchange
, Comparand
);
243 // CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer_nf(ptr {{[a-z_ ]*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{
244 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]]
245 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]]
246 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic, align {{4|8}}
247 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
248 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr
249 // CHECK: ret ptr %[[RESULT:[0-9]+]]
252 char test_InterlockedExchange8(char volatile *value
, char mask
) {
253 return _InterlockedExchange8(value
, mask
);
255 // CHECK: define{{.*}}i8 @test_InterlockedExchange8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
256 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask seq_cst, align 1
257 // CHECK: ret i8 [[RESULT:%[0-9]+]]
260 short test_InterlockedExchange16(short volatile *value
, short mask
) {
261 return _InterlockedExchange16(value
, mask
);
263 // CHECK: define{{.*}}i16 @test_InterlockedExchange16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
264 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask seq_cst, align 2
265 // CHECK: ret i16 [[RESULT:%[0-9]+]]
268 long test_InterlockedExchange(long volatile *value
, long mask
) {
269 return _InterlockedExchange(value
, mask
);
271 // CHECK: define{{.*}}i32 @test_InterlockedExchange(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
272 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask seq_cst, align 4
273 // CHECK: ret i32 [[RESULT:%[0-9]+]]
276 char test_InterlockedExchangeAdd8(char volatile *value
, char mask
) {
277 return _InterlockedExchangeAdd8(value
, mask
);
279 // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
280 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask seq_cst, align 1
281 // CHECK: ret i8 [[RESULT:%[0-9]+]]
284 short test_InterlockedExchangeAdd16(short volatile *value
, short mask
) {
285 return _InterlockedExchangeAdd16(value
, mask
);
287 // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
288 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask seq_cst, align 2
289 // CHECK: ret i16 [[RESULT:%[0-9]+]]
292 long test_InterlockedExchangeAdd(long volatile *value
, long mask
) {
293 return _InterlockedExchangeAdd(value
, mask
);
295 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
296 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask seq_cst, align 4
297 // CHECK: ret i32 [[RESULT:%[0-9]+]]
300 char test_InterlockedExchangeSub8(char volatile *value
, char mask
) {
301 return _InterlockedExchangeSub8(value
, mask
);
303 // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
304 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i8 %mask seq_cst, align 1
305 // CHECK: ret i8 [[RESULT:%[0-9]+]]
308 short test_InterlockedExchangeSub16(short volatile *value
, short mask
) {
309 return _InterlockedExchangeSub16(value
, mask
);
311 // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
312 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i16 %mask seq_cst, align 2
313 // CHECK: ret i16 [[RESULT:%[0-9]+]]
316 long test_InterlockedExchangeSub(long volatile *value
, long mask
) {
317 return _InterlockedExchangeSub(value
, mask
);
319 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
320 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i32 %mask seq_cst, align 4
321 // CHECK: ret i32 [[RESULT:%[0-9]+]]
324 char test_InterlockedOr8(char volatile *value
, char mask
) {
325 return _InterlockedOr8(value
, mask
);
327 // CHECK: define{{.*}}i8 @test_InterlockedOr8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
328 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask seq_cst, align 1
329 // CHECK: ret i8 [[RESULT:%[0-9]+]]
332 short test_InterlockedOr16(short volatile *value
, short mask
) {
333 return _InterlockedOr16(value
, mask
);
335 // CHECK: define{{.*}}i16 @test_InterlockedOr16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
336 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask seq_cst, align 2
337 // CHECK: ret i16 [[RESULT:%[0-9]+]]
340 long test_InterlockedOr(long volatile *value
, long mask
) {
341 return _InterlockedOr(value
, mask
);
343 // CHECK: define{{.*}}i32 @test_InterlockedOr(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
344 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask seq_cst, align 4
345 // CHECK: ret i32 [[RESULT:%[0-9]+]]
348 char test_InterlockedXor8(char volatile *value
, char mask
) {
349 return _InterlockedXor8(value
, mask
);
351 // CHECK: define{{.*}}i8 @test_InterlockedXor8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
352 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask seq_cst, align 1
353 // CHECK: ret i8 [[RESULT:%[0-9]+]]
356 short test_InterlockedXor16(short volatile *value
, short mask
) {
357 return _InterlockedXor16(value
, mask
);
359 // CHECK: define{{.*}}i16 @test_InterlockedXor16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
360 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask seq_cst, align 2
361 // CHECK: ret i16 [[RESULT:%[0-9]+]]
364 long test_InterlockedXor(long volatile *value
, long mask
) {
365 return _InterlockedXor(value
, mask
);
367 // CHECK: define{{.*}}i32 @test_InterlockedXor(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
368 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask seq_cst, align 4
369 // CHECK: ret i32 [[RESULT:%[0-9]+]]
372 char test_InterlockedAnd8(char volatile *value
, char mask
) {
373 return _InterlockedAnd8(value
, mask
);
375 // CHECK: define{{.*}}i8 @test_InterlockedAnd8(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
376 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask seq_cst, align 1
377 // CHECK: ret i8 [[RESULT:%[0-9]+]]
380 short test_InterlockedAnd16(short volatile *value
, short mask
) {
381 return _InterlockedAnd16(value
, mask
);
383 // CHECK: define{{.*}}i16 @test_InterlockedAnd16(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
384 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask seq_cst, align 2
385 // CHECK: ret i16 [[RESULT:%[0-9]+]]
388 long test_InterlockedAnd(long volatile *value
, long mask
) {
389 return _InterlockedAnd(value
, mask
);
391 // CHECK: define{{.*}}i32 @test_InterlockedAnd(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
392 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask seq_cst, align 4
393 // CHECK: ret i32 [[RESULT:%[0-9]+]]
396 char test_InterlockedCompareExchange8(char volatile *Destination
, char Exchange
, char Comperand
) {
397 return _InterlockedCompareExchange8(Destination
, Exchange
, Comperand
);
399 // CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
400 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst, align 1
401 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
402 // CHECK: ret i8 [[RESULT]]
405 short test_InterlockedCompareExchange16(short volatile *Destination
, short Exchange
, short Comperand
) {
406 return _InterlockedCompareExchange16(Destination
, Exchange
, Comperand
);
408 // CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
409 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst, align 2
410 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
411 // CHECK: ret i16 [[RESULT]]
414 long test_InterlockedCompareExchange(long volatile *Destination
, long Exchange
, long Comperand
) {
415 return _InterlockedCompareExchange(Destination
, Exchange
, Comperand
);
417 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
418 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
419 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
420 // CHECK: ret i32 [[RESULT]]
423 __int64
test_InterlockedCompareExchange64(__int64
volatile *Destination
, __int64 Exchange
, __int64 Comperand
) {
424 return _InterlockedCompareExchange64(Destination
, Exchange
, Comperand
);
426 // CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
427 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst, align 8
428 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
429 // CHECK: ret i64 [[RESULT]]
432 #if defined(__x86_64__) || defined(__aarch64__)
433 unsigned char test_InterlockedCompareExchange128(
434 __int64
volatile *Destination
, __int64 ExchangeHigh
,
435 __int64 ExchangeLow
, __int64
*ComparandResult
) {
436 return _InterlockedCompareExchange128(++Destination
, ++ExchangeHigh
,
437 ++ExchangeLow
, ++ComparandResult
);
439 // CHECK-64: define{{.*}}i8 @test_InterlockedCompareExchange128(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, ptr{{[a-z_ ]*}}%ComparandResult){{.*}}{
440 // CHECK-64: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Destination, i64 8
441 // CHECK-64: %inc = add nsw i64 %ExchangeHigh, 1
442 // CHECK-64: %inc1 = add nsw i64 %ExchangeLow, 1
443 // CHECK-64: %incdec.ptr2 = getelementptr inbounds nuw i8, ptr %ComparandResult, i64 8
444 // CHECK-64: [[EH:%[0-9]+]] = zext i64 %inc to i128
445 // CHECK-64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128
446 // CHECK-64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64
447 // CHECK-64: [[EXP:%[0-9]+]] = or disjoint i128 [[EHS]], [[EL]]
448 // CHECK-64: [[ORG:%[0-9]+]] = load i128, ptr %incdec.ptr2, align 8
449 // CHECK-64: [[RES:%[0-9]+]] = cmpxchg volatile ptr %incdec.ptr, i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst, align 16
450 // CHECK-64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0
451 // CHECK-64: store i128 [[OLD]], ptr %incdec.ptr2, align 8
452 // CHECK-64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1
453 // CHECK-64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8
454 // CHECK-64: ret i8 [[SUC8]]
458 #if defined(__aarch64__)
459 unsigned char test_InterlockedCompareExchange128_acq(
460 __int64
volatile *Destination
, __int64 ExchangeHigh
,
461 __int64 ExchangeLow
, __int64
*ComparandResult
) {
462 return _InterlockedCompareExchange128_acq(Destination
, ExchangeHigh
,
463 ExchangeLow
, ComparandResult
);
465 unsigned char test_InterlockedCompareExchange128_nf(
466 __int64
volatile *Destination
, __int64 ExchangeHigh
,
467 __int64 ExchangeLow
, __int64
*ComparandResult
) {
468 return _InterlockedCompareExchange128_nf(Destination
, ExchangeHigh
,
469 ExchangeLow
, ComparandResult
);
471 unsigned char test_InterlockedCompareExchange128_rel(
472 __int64
volatile *Destination
, __int64 ExchangeHigh
,
473 __int64 ExchangeLow
, __int64
*ComparandResult
) {
474 return _InterlockedCompareExchange128_rel(Destination
, ExchangeHigh
,
475 ExchangeLow
, ComparandResult
);
477 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_acq({{.*}})
478 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} acquire acquire, align 16
479 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_nf({{.*}})
480 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} monotonic monotonic, align 16
481 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_rel({{.*}})
482 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} release monotonic, align 16
485 short test_InterlockedIncrement16(short volatile *Addend
) {
486 return _InterlockedIncrement16(++Addend
);
488 // CHECK: define{{.*}}i16 @test_InterlockedIncrement16(ptr{{[a-z_ ]*}}%Addend){{.*}}{
489 // CHECK: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Addend, {{i64|i32}} 2
490 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i16 1 seq_cst, align 2
491 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
492 // CHECK: ret i16 [[RESULT]]
495 long test_InterlockedIncrement(long volatile *Addend
) {
496 return _InterlockedIncrement(++Addend
);
498 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
499 // CHECK: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Addend, {{i64|i32}} 4
500 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i32 1 seq_cst, align 4
501 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
502 // CHECK: ret i32 [[RESULT]]
505 short test_InterlockedDecrement16(short volatile *Addend
) {
506 return _InterlockedDecrement16(Addend
);
508 // CHECK: define{{.*}}i16 @test_InterlockedDecrement16(ptr{{[a-z_ ]*}}%Addend){{.*}}{
509 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 seq_cst, align 2
510 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
511 // CHECK: ret i16 [[RESULT]]
514 long test_InterlockedDecrement(long volatile *Addend
) {
515 return _InterlockedDecrement(Addend
);
517 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(ptr{{[a-z_ ]*}}%Addend){{.*}}{
518 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 seq_cst, align 4
519 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
520 // CHECK: ret i32 [[RESULT]]
523 char test_iso_volatile_load8(char volatile *p
) { return __iso_volatile_load8(p
); }
524 short test_iso_volatile_load16(short volatile *p
) { return __iso_volatile_load16(p
); }
525 int test_iso_volatile_load32(int volatile *p
) { return __iso_volatile_load32(p
); }
526 __int64
test_iso_volatile_load64(__int64
volatile *p
) { return __iso_volatile_load64(p
); }
528 // CHECK: define{{.*}}i8 @test_iso_volatile_load8(ptr{{[a-z_ ]*}}%p)
529 // CHECK: = load volatile i8, ptr %p
530 // CHECK: define{{.*}}i16 @test_iso_volatile_load16(ptr{{[a-z_ ]*}}%p)
531 // CHECK: = load volatile i16, ptr %p
532 // CHECK: define{{.*}}i32 @test_iso_volatile_load32(ptr{{[a-z_ ]*}}%p)
533 // CHECK: = load volatile i32, ptr %p
534 // CHECK: define{{.*}}i64 @test_iso_volatile_load64(ptr{{[a-z_ ]*}}%p)
535 // CHECK: = load volatile i64, ptr %p
537 void test_iso_volatile_store8(char volatile *p
, char v
) { __iso_volatile_store8(p
, v
); }
538 void test_iso_volatile_store16(short volatile *p
, short v
) { __iso_volatile_store16(p
, v
); }
539 void test_iso_volatile_store32(int volatile *p
, int v
) { __iso_volatile_store32(p
, v
); }
540 void test_iso_volatile_store64(__int64
volatile *p
, __int64 v
) { __iso_volatile_store64(p
, v
); }
542 // CHECK: define{{.*}}void @test_iso_volatile_store8(ptr{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
543 // CHECK: store volatile i8 %v, ptr %p
544 // CHECK: define{{.*}}void @test_iso_volatile_store16(ptr{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
545 // CHECK: store volatile i16 %v, ptr %p
546 // CHECK: define{{.*}}void @test_iso_volatile_store32(ptr{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
547 // CHECK: store volatile i32 %v, ptr %p
548 // CHECK: define{{.*}}void @test_iso_volatile_store64(ptr{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
549 // CHECK: store volatile i64 %v, ptr %p
552 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
553 __int64
test_InterlockedExchange64(__int64
volatile *value
, __int64 mask
) {
554 return _InterlockedExchange64(value
, mask
);
556 // CHECK: define{{.*}}i64 @test_InterlockedExchange64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
557 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask seq_cst, align 8
558 // CHECK: ret i64 [[RESULT:%[0-9]+]]
561 __int64
test_InterlockedExchangeAdd64(__int64
volatile *value
, __int64 mask
) {
562 return _InterlockedExchangeAdd64(value
, mask
);
564 // CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
565 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask seq_cst, align 8
566 // CHECK: ret i64 [[RESULT:%[0-9]+]]
569 __int64
test_InterlockedExchangeSub64(__int64
volatile *value
, __int64 mask
) {
570 return _InterlockedExchangeSub64(value
, mask
);
572 // CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
573 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i64 %mask seq_cst, align 8
574 // CHECK: ret i64 [[RESULT:%[0-9]+]]
577 __int64
test_InterlockedOr64(__int64
volatile *value
, __int64 mask
) {
578 return _InterlockedOr64(value
, mask
);
580 // CHECK: define{{.*}}i64 @test_InterlockedOr64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
581 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask seq_cst, align 8
582 // CHECK: ret i64 [[RESULT:%[0-9]+]]
585 __int64
test_InterlockedXor64(__int64
volatile *value
, __int64 mask
) {
586 return _InterlockedXor64(value
, mask
);
588 // CHECK: define{{.*}}i64 @test_InterlockedXor64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
589 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask seq_cst, align 8
590 // CHECK: ret i64 [[RESULT:%[0-9]+]]
593 __int64
test_InterlockedAnd64(__int64
volatile *value
, __int64 mask
) {
594 return _InterlockedAnd64(value
, mask
);
596 // CHECK: define{{.*}}i64 @test_InterlockedAnd64(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
597 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask seq_cst, align 8
598 // CHECK: ret i64 [[RESULT:%[0-9]+]]
601 __int64
test_InterlockedIncrement64(__int64
volatile *Addend
) {
602 return _InterlockedIncrement64(Addend
);
604 // CHECK: define{{.*}}i64 @test_InterlockedIncrement64(ptr{{[a-z_ ]*}}%Addend){{.*}}{
605 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 seq_cst, align 8
606 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
607 // CHECK: ret i64 [[RESULT]]
610 __int64
test_InterlockedDecrement64(__int64
volatile *Addend
) {
611 return _InterlockedDecrement64(Addend
);
613 // CHECK: define{{.*}}i64 @test_InterlockedDecrement64(ptr{{[a-z_ ]*}}%Addend){{.*}}{
614 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 seq_cst, align 8
615 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
616 // CHECK: ret i64 [[RESULT]]
621 #if defined(__i386__) || defined(__x86_64__)
622 long test_InterlockedExchange_HLEAcquire(long volatile *Target
, long Value
) {
623 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(ptr{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
624 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target)
625 return _InterlockedExchange_HLEAcquire(Target
, Value
);
627 long test_InterlockedExchange_HLERelease(long volatile *Target
, long Value
) {
628 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(ptr{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
629 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target)
630 return _InterlockedExchange_HLERelease(Target
, Value
);
632 long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination
,
633 long Exchange
, long Comparand
) {
634 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
635 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination)
636 return _InterlockedCompareExchange_HLEAcquire(Destination
, Exchange
, Comparand
);
638 long test_InterlockedCompareExchange_HLERelease(long volatile *Destination
,
639 long Exchange
, long Comparand
) {
640 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
641 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination)
642 return _InterlockedCompareExchange_HLERelease(Destination
, Exchange
, Comparand
);
645 #if defined(__x86_64__)
646 __int64
test_InterlockedExchange64_HLEAcquire(__int64
volatile *Target
, __int64 Value
) {
647 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(ptr{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
648 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target)
649 return _InterlockedExchange64_HLEAcquire(Target
, Value
);
651 __int64
test_InterlockedExchange64_HLERelease(__int64
volatile *Target
, __int64 Value
) {
652 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(ptr{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
653 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target)
654 return _InterlockedExchange64_HLERelease(Target
, Value
);
656 __int64
test_InterlockedCompareExchange64_HLEAcquire(__int64
volatile *Destination
,
657 __int64 Exchange
, __int64 Comparand
) {
658 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
659 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination)
660 return _InterlockedCompareExchange64_HLEAcquire(Destination
, Exchange
, Comparand
);
662 __int64
test_InterlockedCompareExchange64_HLERelease(__int64
volatile *Destination
,
663 __int64 Exchange
, __int64 Comparand
) {
664 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
665 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination)
666 return _InterlockedCompareExchange64_HLERelease(Destination
, Exchange
, Comparand
);
670 #if defined(__arm__) || defined(__aarch64__)
671 char test_InterlockedExchangeAdd8_acq(char volatile *value
, char mask
) {
672 return _InterlockedExchangeAdd8_acq(value
, mask
);
674 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
675 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask acquire, align 1
676 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
677 // CHECK-ARM-ARM64: }
678 char test_InterlockedExchangeAdd8_rel(char volatile *value
, char mask
) {
679 return _InterlockedExchangeAdd8_rel(value
, mask
);
681 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
682 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask release, align 1
683 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
684 // CHECK-ARM-ARM64: }
685 char test_InterlockedExchangeAdd8_nf(char volatile *value
, char mask
) {
686 return _InterlockedExchangeAdd8_nf(value
, mask
);
688 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
689 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask monotonic, align 1
690 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
691 // CHECK-ARM-ARM64: }
692 short test_InterlockedExchangeAdd16_acq(short volatile *value
, short mask
) {
693 return _InterlockedExchangeAdd16_acq(value
, mask
);
695 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
696 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask acquire, align 2
697 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
698 // CHECK-ARM-ARM64: }
699 short test_InterlockedExchangeAdd16_rel(short volatile *value
, short mask
) {
700 return _InterlockedExchangeAdd16_rel(value
, mask
);
702 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
703 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask release, align 2
704 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
705 // CHECK-ARM-ARM64: }
706 short test_InterlockedExchangeAdd16_nf(short volatile *value
, short mask
) {
707 return _InterlockedExchangeAdd16_nf(value
, mask
);
709 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
710 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask monotonic, align 2
711 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
712 // CHECK-ARM-ARM64: }
713 long test_InterlockedExchangeAdd_acq(long volatile *value
, long mask
) {
714 return _InterlockedExchangeAdd_acq(value
, mask
);
716 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
717 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask acquire, align 4
718 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
719 // CHECK-ARM-ARM64: }
720 long test_InterlockedExchangeAdd_rel(long volatile *value
, long mask
) {
721 return _InterlockedExchangeAdd_rel(value
, mask
);
723 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
724 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask release, align 4
725 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
726 // CHECK-ARM-ARM64: }
727 long test_InterlockedExchangeAdd_nf(long volatile *value
, long mask
) {
728 return _InterlockedExchangeAdd_nf(value
, mask
);
730 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
731 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask monotonic, align 4
732 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
733 // CHECK-ARM-ARM64: }
734 __int64
test_InterlockedExchangeAdd64_acq(__int64
volatile *value
, __int64 mask
) {
735 return _InterlockedExchangeAdd64_acq(value
, mask
);
737 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
738 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask acquire, align 8
739 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
740 // CHECK-ARM-ARM64: }
741 __int64
test_InterlockedExchangeAdd64_rel(__int64
volatile *value
, __int64 mask
) {
742 return _InterlockedExchangeAdd64_rel(value
, mask
);
744 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
745 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask release, align 8
746 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
747 // CHECK-ARM-ARM64: }
748 __int64
test_InterlockedExchangeAdd64_nf(__int64
volatile *value
, __int64 mask
) {
749 return _InterlockedExchangeAdd64_nf(value
, mask
);
751 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
752 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask monotonic, align 8
753 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
754 // CHECK-ARM-ARM64: }
756 char test_InterlockedExchange8_acq(char volatile *value
, char mask
) {
757 return _InterlockedExchange8_acq(value
, mask
);
759 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
760 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask acquire, align 1
761 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
762 // CHECK-ARM-ARM64: }
763 char test_InterlockedExchange8_rel(char volatile *value
, char mask
) {
764 return _InterlockedExchange8_rel(value
, mask
);
766 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
767 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask release, align 1
768 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
769 // CHECK-ARM-ARM64: }
770 char test_InterlockedExchange8_nf(char volatile *value
, char mask
) {
771 return _InterlockedExchange8_nf(value
, mask
);
773 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
774 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask monotonic, align 1
775 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
776 // CHECK-ARM-ARM64: }
777 short test_InterlockedExchange16_acq(short volatile *value
, short mask
) {
778 return _InterlockedExchange16_acq(value
, mask
);
780 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
781 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask acquire, align 2
782 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
783 // CHECK-ARM-ARM64: }
784 short test_InterlockedExchange16_rel(short volatile *value
, short mask
) {
785 return _InterlockedExchange16_rel(value
, mask
);
787 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
788 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask release, align 2
789 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
790 // CHECK-ARM-ARM64: }
791 short test_InterlockedExchange16_nf(short volatile *value
, short mask
) {
792 return _InterlockedExchange16_nf(value
, mask
);
794 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
795 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask monotonic, align 2
796 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
797 // CHECK-ARM-ARM64: }
798 long test_InterlockedExchange_acq(long volatile *value
, long mask
) {
799 return _InterlockedExchange_acq(value
, mask
);
801 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
802 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask acquire, align 4
803 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
804 // CHECK-ARM-ARM64: }
805 long test_InterlockedExchange_rel(long volatile *value
, long mask
) {
806 return _InterlockedExchange_rel(value
, mask
);
808 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
809 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask release, align 4
810 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
811 // CHECK-ARM-ARM64: }
812 long test_InterlockedExchange_nf(long volatile *value
, long mask
) {
813 return _InterlockedExchange_nf(value
, mask
);
815 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
816 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask monotonic, align 4
817 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
818 // CHECK-ARM-ARM64: }
819 __int64
test_InterlockedExchange64_acq(__int64
volatile *value
, __int64 mask
) {
820 return _InterlockedExchange64_acq(value
, mask
);
822 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
823 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask acquire, align 8
824 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
825 // CHECK-ARM-ARM64: }
826 __int64
test_InterlockedExchange64_rel(__int64
volatile *value
, __int64 mask
) {
827 return _InterlockedExchange64_rel(value
, mask
);
829 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
830 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask release, align 8
831 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
832 // CHECK-ARM-ARM64: }
833 __int64
test_InterlockedExchange64_nf(__int64
volatile *value
, __int64 mask
) {
834 return _InterlockedExchange64_nf(value
, mask
);
836 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
837 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask monotonic, align 8
838 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
839 // CHECK-ARM-ARM64: }
841 char test_InterlockedCompareExchange8_acq(char volatile *Destination
, char Exchange
, char Comperand
) {
842 return _InterlockedCompareExchange8_acq(Destination
, Exchange
, Comperand
);
844 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
845 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange acquire acquire, align 1
846 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
847 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
848 // CHECK-ARM-ARM64: }
850 char test_InterlockedCompareExchange8_rel(char volatile *Destination
, char Exchange
, char Comperand
) {
851 return _InterlockedCompareExchange8_rel(Destination
, Exchange
, Comperand
);
853 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
854 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange release monotonic, align 1
855 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
856 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
857 // CHECK-ARM-ARM64: }
859 char test_InterlockedCompareExchange8_nf(char volatile *Destination
, char Exchange
, char Comperand
) {
860 return _InterlockedCompareExchange8_nf(Destination
, Exchange
, Comperand
);
862 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(ptr{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
863 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic, align 1
864 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
865 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
866 // CHECK-ARM-ARM64: }
868 short test_InterlockedCompareExchange16_acq(short volatile *Destination
, short Exchange
, short Comperand
) {
869 return _InterlockedCompareExchange16_acq(Destination
, Exchange
, Comperand
);
871 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
872 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange acquire acquire, align 2
873 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
874 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
875 // CHECK-ARM-ARM64: }
877 short test_InterlockedCompareExchange16_rel(short volatile *Destination
, short Exchange
, short Comperand
) {
878 return _InterlockedCompareExchange16_rel(Destination
, Exchange
, Comperand
);
880 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
881 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange release monotonic, align 2
882 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
883 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
884 // CHECK-ARM-ARM64: }
886 short test_InterlockedCompareExchange16_nf(short volatile *Destination
, short Exchange
, short Comperand
) {
887 return _InterlockedCompareExchange16_nf(Destination
, Exchange
, Comperand
);
889 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(ptr{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
890 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic, align 2
891 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
892 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
893 // CHECK-ARM-ARM64: }
895 long test_InterlockedCompareExchange_acq(long volatile *Destination
, long Exchange
, long Comperand
) {
896 return _InterlockedCompareExchange_acq(Destination
, Exchange
, Comperand
);
898 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
899 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
900 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
901 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
902 // CHECK-ARM-ARM64: }
904 long test_InterlockedCompareExchange_rel(long volatile *Destination
, long Exchange
, long Comperand
) {
905 return _InterlockedCompareExchange_rel(Destination
, Exchange
, Comperand
);
907 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
908 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
909 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
910 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
911 // CHECK-ARM-ARM64: }
913 long test_InterlockedCompareExchange_nf(long volatile *Destination
, long Exchange
, long Comperand
) {
914 return _InterlockedCompareExchange_nf(Destination
, Exchange
, Comperand
);
916 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(ptr{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
917 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
918 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
919 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
920 // CHECK-ARM-ARM64: }
922 __int64
test_InterlockedCompareExchange64_acq(__int64
volatile *Destination
, __int64 Exchange
, __int64 Comperand
) {
923 return _InterlockedCompareExchange64_acq(Destination
, Exchange
, Comperand
);
925 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
926 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange acquire acquire, align 8
927 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
928 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
929 // CHECK-ARM-ARM64: }
931 __int64
test_InterlockedCompareExchange64_rel(__int64
volatile *Destination
, __int64 Exchange
, __int64 Comperand
) {
932 return _InterlockedCompareExchange64_rel(Destination
, Exchange
, Comperand
);
934 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
935 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange release monotonic, align 8
936 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
937 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
938 // CHECK-ARM-ARM64: }
940 __int64
test_InterlockedCompareExchange64_nf(__int64
volatile *Destination
, __int64 Exchange
, __int64 Comperand
) {
941 return _InterlockedCompareExchange64_nf(Destination
, Exchange
, Comperand
);
943 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(ptr{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
944 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic, align 8
945 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
946 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
947 // CHECK-ARM-ARM64: }
949 char test_InterlockedOr8_acq(char volatile *value
, char mask
) {
950 return _InterlockedOr8_acq(value
, mask
);
952 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
953 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask acquire, align 1
954 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
955 // CHECK-ARM-ARM64: }
957 char test_InterlockedOr8_rel(char volatile *value
, char mask
) {
958 return _InterlockedOr8_rel(value
, mask
);
960 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
961 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask release, align 1
962 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
963 // CHECK-ARM-ARM64: }
965 char test_InterlockedOr8_nf(char volatile *value
, char mask
) {
966 return _InterlockedOr8_nf(value
, mask
);
968 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
969 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask monotonic, align 1
970 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
971 // CHECK-ARM-ARM64: }
973 short test_InterlockedOr16_acq(short volatile *value
, short mask
) {
974 return _InterlockedOr16_acq(value
, mask
);
976 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
977 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask acquire, align 2
978 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
979 // CHECK-ARM-ARM64: }
981 short test_InterlockedOr16_rel(short volatile *value
, short mask
) {
982 return _InterlockedOr16_rel(value
, mask
);
984 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
985 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask release, align 2
986 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
987 // CHECK-ARM-ARM64: }
989 short test_InterlockedOr16_nf(short volatile *value
, short mask
) {
990 return _InterlockedOr16_nf(value
, mask
);
992 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
993 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask monotonic, align 2
994 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
995 // CHECK-ARM-ARM64: }
997 long test_InterlockedOr_acq(long volatile *value
, long mask
) {
998 return _InterlockedOr_acq(value
, mask
);
1000 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1001 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask acquire, align 4
1002 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1003 // CHECK-ARM-ARM64: }
1005 long test_InterlockedOr_rel(long volatile *value
, long mask
) {
1006 return _InterlockedOr_rel(value
, mask
);
1008 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1009 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask release, align 4
1010 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1011 // CHECK-ARM-ARM64: }
1013 long test_InterlockedOr_nf(long volatile *value
, long mask
) {
1014 return _InterlockedOr_nf(value
, mask
);
1016 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1017 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask monotonic, align 4
1018 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1019 // CHECK-ARM-ARM64: }
1021 __int64
test_InterlockedOr64_acq(__int64
volatile *value
, __int64 mask
) {
1022 return _InterlockedOr64_acq(value
, mask
);
1024 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1025 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask acquire, align 8
1026 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1027 // CHECK-ARM-ARM64: }
1029 __int64
test_InterlockedOr64_rel(__int64
volatile *value
, __int64 mask
) {
1030 return _InterlockedOr64_rel(value
, mask
);
1032 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1033 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask release, align 8
1034 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1035 // CHECK-ARM-ARM64: }
1037 __int64
test_InterlockedOr64_nf(__int64
volatile *value
, __int64 mask
) {
1038 return _InterlockedOr64_nf(value
, mask
);
1040 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1041 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask monotonic, align 8
1042 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1043 // CHECK-ARM-ARM64: }
1045 char test_InterlockedXor8_acq(char volatile *value
, char mask
) {
1046 return _InterlockedXor8_acq(value
, mask
);
1048 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1049 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask acquire, align 1
1050 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1051 // CHECK-ARM-ARM64: }
1053 char test_InterlockedXor8_rel(char volatile *value
, char mask
) {
1054 return _InterlockedXor8_rel(value
, mask
);
1056 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1057 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask release, align 1
1058 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1059 // CHECK-ARM-ARM64: }
1061 char test_InterlockedXor8_nf(char volatile *value
, char mask
) {
1062 return _InterlockedXor8_nf(value
, mask
);
1064 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1065 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask monotonic, align 1
1066 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1067 // CHECK-ARM-ARM64: }
1069 short test_InterlockedXor16_acq(short volatile *value
, short mask
) {
1070 return _InterlockedXor16_acq(value
, mask
);
1072 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1073 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask acquire, align 2
1074 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1075 // CHECK-ARM-ARM64: }
1077 short test_InterlockedXor16_rel(short volatile *value
, short mask
) {
1078 return _InterlockedXor16_rel(value
, mask
);
1080 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1081 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask release, align 2
1082 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1083 // CHECK-ARM-ARM64: }
1085 short test_InterlockedXor16_nf(short volatile *value
, short mask
) {
1086 return _InterlockedXor16_nf(value
, mask
);
1088 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1089 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask monotonic, align 2
1090 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1091 // CHECK-ARM-ARM64: }
1093 long test_InterlockedXor_acq(long volatile *value
, long mask
) {
1094 return _InterlockedXor_acq(value
, mask
);
1096 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1097 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask acquire, align 4
1098 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1099 // CHECK-ARM-ARM64: }
1101 long test_InterlockedXor_rel(long volatile *value
, long mask
) {
1102 return _InterlockedXor_rel(value
, mask
);
1104 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1105 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask release, align 4
1106 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1107 // CHECK-ARM-ARM64: }
1109 long test_InterlockedXor_nf(long volatile *value
, long mask
) {
1110 return _InterlockedXor_nf(value
, mask
);
1112 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1113 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask monotonic, align 4
1114 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1115 // CHECK-ARM-ARM64: }
1117 __int64
test_InterlockedXor64_acq(__int64
volatile *value
, __int64 mask
) {
1118 return _InterlockedXor64_acq(value
, mask
);
1120 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1121 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask acquire, align 8
1122 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1123 // CHECK-ARM-ARM64: }
1125 __int64
test_InterlockedXor64_rel(__int64
volatile *value
, __int64 mask
) {
1126 return _InterlockedXor64_rel(value
, mask
);
1128 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1129 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask release, align 8
1130 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1131 // CHECK-ARM-ARM64: }
1133 __int64
test_InterlockedXor64_nf(__int64
volatile *value
, __int64 mask
) {
1134 return _InterlockedXor64_nf(value
, mask
);
1136 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1137 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask monotonic, align 8
1138 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1139 // CHECK-ARM-ARM64: }
1141 char test_InterlockedAnd8_acq(char volatile *value
, char mask
) {
1142 return _InterlockedAnd8_acq(value
, mask
);
1144 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1145 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask acquire, align 1
1146 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1147 // CHECK-ARM-ARM64: }
1149 char test_InterlockedAnd8_rel(char volatile *value
, char mask
) {
1150 return _InterlockedAnd8_rel(value
, mask
);
1152 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1153 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask release, align 1
1154 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1155 // CHECK-ARM-ARM64: }
1157 char test_InterlockedAnd8_nf(char volatile *value
, char mask
) {
1158 return _InterlockedAnd8_nf(value
, mask
);
1160 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(ptr{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1161 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask monotonic, align 1
1162 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1163 // CHECK-ARM-ARM64: }
1165 short test_InterlockedAnd16_acq(short volatile *value
, short mask
) {
1166 return _InterlockedAnd16_acq(value
, mask
);
1168 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1169 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask acquire, align 2
1170 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1171 // CHECK-ARM-ARM64: }
1173 short test_InterlockedAnd16_rel(short volatile *value
, short mask
) {
1174 return _InterlockedAnd16_rel(value
, mask
);
1176 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1177 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask release, align 2
1178 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1179 // CHECK-ARM-ARM64: }
1181 short test_InterlockedAnd16_nf(short volatile *value
, short mask
) {
1182 return _InterlockedAnd16_nf(value
, mask
);
1184 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(ptr{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1185 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask monotonic, align 2
1186 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1187 // CHECK-ARM-ARM64: }
1189 long test_InterlockedAnd_acq(long volatile *value
, long mask
) {
1190 return _InterlockedAnd_acq(value
, mask
);
1192 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1193 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask acquire, align 4
1194 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1195 // CHECK-ARM-ARM64: }
1197 long test_InterlockedAnd_rel(long volatile *value
, long mask
) {
1198 return _InterlockedAnd_rel(value
, mask
);
1200 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1201 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask release, align 4
1202 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1203 // CHECK-ARM-ARM64: }
1205 long test_InterlockedAnd_nf(long volatile *value
, long mask
) {
1206 return _InterlockedAnd_nf(value
, mask
);
1208 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(ptr{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1209 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask monotonic, align 4
1210 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1211 // CHECK-ARM-ARM64: }
1213 __int64
test_InterlockedAnd64_acq(__int64
volatile *value
, __int64 mask
) {
1214 return _InterlockedAnd64_acq(value
, mask
);
1216 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1217 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask acquire, align 8
1218 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1219 // CHECK-ARM-ARM64: }
1221 __int64
test_InterlockedAnd64_rel(__int64
volatile *value
, __int64 mask
) {
1222 return _InterlockedAnd64_rel(value
, mask
);
1224 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1225 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask release, align 8
1226 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1227 // CHECK-ARM-ARM64: }
1229 __int64
test_InterlockedAnd64_nf(__int64
volatile *value
, __int64 mask
) {
1230 return _InterlockedAnd64_nf(value
, mask
);
1232 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(ptr{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1233 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask monotonic, align 8
1234 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1235 // CHECK-ARM-ARM64: }
1237 short test_InterlockedIncrement16_acq(short volatile *Addend
) {
1238 return _InterlockedIncrement16_acq(Addend
);
1240 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1241 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 acquire, align 2
1242 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1243 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1244 // CHECK-ARM-ARM64: }
1246 short test_InterlockedIncrement16_rel(short volatile *Addend
) {
1247 return _InterlockedIncrement16_rel(Addend
);
1249 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1250 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 release, align 2
1251 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1252 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1253 // CHECK-ARM-ARM64: }
1255 short test_InterlockedIncrement16_nf(short volatile *Addend
) {
1256 return _InterlockedIncrement16_nf(Addend
);
1258 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1259 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 monotonic, align 2
1260 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1261 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1262 // CHECK-ARM-ARM64: }
1264 long test_InterlockedIncrement_acq(long volatile *Addend
) {
1265 return _InterlockedIncrement_acq(Addend
);
1267 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1268 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 acquire, align 4
1269 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1270 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1271 // CHECK-ARM-ARM64: }
1273 long test_InterlockedIncrement_rel(long volatile *Addend
) {
1274 return _InterlockedIncrement_rel(Addend
);
1276 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1277 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 release, align 4
1278 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1279 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1280 // CHECK-ARM-ARM64: }
1282 long test_InterlockedIncrement_nf(long volatile *Addend
) {
1283 return _InterlockedIncrement_nf(Addend
);
1285 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1286 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 monotonic, align 4
1287 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1288 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1289 // CHECK-ARM-ARM64: }
1291 __int64
test_InterlockedIncrement64_acq(__int64
volatile *Addend
) {
1292 return _InterlockedIncrement64_acq(Addend
);
1294 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1295 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 acquire, align 8
1296 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1297 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1298 // CHECK-ARM-ARM64: }
1300 __int64
test_InterlockedIncrement64_rel(__int64
volatile *Addend
) {
1301 return _InterlockedIncrement64_rel(Addend
);
1303 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1304 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 release, align 8
1305 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1306 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1307 // CHECK-ARM-ARM64: }
1309 __int64
test_InterlockedIncrement64_nf(__int64
volatile *Addend
) {
1310 return _InterlockedIncrement64_nf(Addend
);
1312 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1313 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 monotonic, align 8
1314 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1315 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1316 // CHECK-ARM-ARM64: }
1318 short test_InterlockedDecrement16_acq(short volatile *Addend
) {
1319 return _InterlockedDecrement16_acq(Addend
);
1321 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1322 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 acquire, align 2
1323 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1324 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1325 // CHECK-ARM-ARM64: }
1327 short test_InterlockedDecrement16_rel(short volatile *Addend
) {
1328 return _InterlockedDecrement16_rel(Addend
);
1330 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1331 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 release, align 2
1332 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1333 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1334 // CHECK-ARM-ARM64: }
1336 short test_InterlockedDecrement16_nf(short volatile *Addend
) {
1337 return _InterlockedDecrement16_nf(Addend
);
1339 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1340 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 monotonic, align 2
1341 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1342 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1343 // CHECK-ARM-ARM64: }
1345 long test_InterlockedDecrement_acq(long volatile *Addend
) {
1346 return _InterlockedDecrement_acq(Addend
);
1348 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1349 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 acquire, align 4
1350 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1351 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1352 // CHECK-ARM-ARM64: }
1354 long test_InterlockedDecrement_rel(long volatile *Addend
) {
1355 return _InterlockedDecrement_rel(Addend
);
1357 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1358 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 release, align 4
1359 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1360 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1361 // CHECK-ARM-ARM64: }
1363 long test_InterlockedDecrement_nf(long volatile *Addend
) {
1364 return _InterlockedDecrement_nf(Addend
);
1366 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1367 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 monotonic, align 4
1368 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1369 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1370 // CHECK-ARM-ARM64: }
1372 __int64
test_InterlockedDecrement64_acq(__int64
volatile *Addend
) {
1373 return _InterlockedDecrement64_acq(Addend
);
1375 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1376 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 acquire, align 8
1377 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1378 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1379 // CHECK-ARM-ARM64: }
1381 __int64
test_InterlockedDecrement64_rel(__int64
volatile *Addend
) {
1382 return _InterlockedDecrement64_rel(Addend
);
1384 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1385 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 release, align 8
1386 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1387 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1388 // CHECK-ARM-ARM64: }
1390 __int64
test_InterlockedDecrement64_nf(__int64
volatile *Addend
) {
1391 return _InterlockedDecrement64_nf(Addend
);
1393 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(ptr{{[a-z_ ]*}}%Addend){{.*}}{
1394 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 monotonic, align 8
1395 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1396 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1397 // CHECK-ARM-ARM64: }
1400 void test__fastfail(void) {
1403 // CHECK-LABEL: define{{.*}} void @test__fastfail()
1404 // CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]]
1405 // CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]]
1406 // CHECK-ARM64: call void asm sideeffect "brk #0xF003", "{w0}"(i32 42) #[[NORETURN:[0-9]+]]
1408 // Attributes come last.
1410 // CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} }