[clang] Handle __declspec() attributes in using
[llvm-project.git] / clang / test / CodeGen / ms-intrinsics.c
blob55e9dc0adf7dbb68be25c92fafb9a4c2619d2958
1 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
2 // RUN: -triple i686--windows -Oz -emit-llvm %s -o - \
3 // RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL
4 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
5 // RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \
6 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64
7 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
8 // RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \
9 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL,CHECK-64
10 // RUN: %clang_cc1 -no-opaque-pointers -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \
11 // RUN: -triple aarch64-windows -Oz -emit-llvm %s -o - \
12 // RUN: | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64,CHECK-64
14 // intrin.h needs size_t, but -ffreestanding prevents us from getting it from
15 // stddef.h. Work around it with this typedef.
16 typedef __SIZE_TYPE__ size_t;
18 #include <intrin.h>
20 #if defined(__i386__) || defined(__x86_64__)
21 void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) {
22 return __stosb(Dest, Data, Count);
25 // CHECK-I386: define{{.*}}void @test__stosb
26 // CHECK-I386: tail call void @llvm.memset.p0i8.i32(i8* align 1 %Dest, i8 %Data, i32 %Count, i1 true)
27 // CHECK-I386: ret void
28 // CHECK-I386: }
30 // CHECK-X64: define{{.*}}void @test__stosb
31 // CHECK-X64: tail call void @llvm.memset.p0i8.i64(i8* align 1 %Dest, i8 %Data, i64 %Count, i1 true)
32 // CHECK-X64: ret void
33 // CHECK-X64: }
35 void test__movsb(unsigned char *Dest, unsigned char *Src, size_t Count) {
36 return __movsb(Dest, Src, Count);
38 // CHECK-I386-LABEL: define{{.*}} void @test__movsb
39 // CHECK-I386: tail call { i8*, i8*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsb\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count)
40 // CHECK-I386: ret void
41 // CHECK-I386: }
43 // CHECK-X64-LABEL: define{{.*}} void @test__movsb
44 // CHECK-X64: call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count)
45 // CHECK-X64: ret void
46 // CHECK-X64: }
48 void test__stosw(unsigned short *Dest, unsigned short Data, size_t Count) {
49 return __stosw(Dest, Data, Count);
51 // CHECK-I386-LABEL: define{{.*}} void @test__stosw
52 // CHECK-I386: call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count)
53 // CHECK-I386: ret void
54 // CHECK-I386: }
56 // CHECK-X64-LABEL: define{{.*}} void @test__stosw
57 // CHECK-X64: call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count)
58 // CHECK-X64: ret void
59 // CHECK-X64: }
61 void test__movsw(unsigned short *Dest, unsigned short *Src, size_t Count) {
62 return __movsw(Dest, Src, Count);
64 // CHECK-I386-LABEL: define{{.*}} void @test__movsw
65 // CHECK-I386: tail call { i16*, i16*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsw\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count)
66 // CHECK-I386: ret void
67 // CHECK-I386: }
69 // CHECK-X64-LABEL: define{{.*}} void @test__movsw
70 // CHECK-X64: call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count)
71 // CHECK-X64: ret void
72 // CHECK-X64: }
74 void test__stosd(unsigned long *Dest, unsigned long Data, size_t Count) {
75 return __stosd(Dest, Data, Count);
77 // CHECK-I386-LABEL: define{{.*}} void @test__stosd
78 // CHECK-I386: call { i32*, i32 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count)
79 // CHECK-I386: ret void
80 // CHECK-I386: }
82 // CHECK-X64-LABEL: define{{.*}} void @test__stosd
83 // CHECK-X64: call { i32*, i64 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count)
84 // CHECK-X64: ret void
85 // CHECK-X64: }
87 void test__movsd(unsigned long *Dest, unsigned long *Src, size_t Count) {
88 return __movsd(Dest, Src, Count);
90 // CHECK-I386-LABEL: define{{.*}} void @test__movsd
91 // CHECK-I386: tail call { i32*, i32*, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movs$(l$|d$)\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count)
92 // CHECK-I386: ret void
93 // CHECK-I386: }
95 // CHECK-X64-LABEL: define{{.*}} void @test__movsd
96 // CHECK-X64: call { i32*, i32*, i64 } asm sideeffect "rep movs$(l$|d$)", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count)
97 // CHECK-X64: ret void
98 // CHECK-X64: }
100 #ifdef __x86_64__
101 void test__stosq(unsigned __int64 *Dest, unsigned __int64 Data, size_t Count) {
102 return __stosq(Dest, Data, Count);
104 // CHECK-X64-LABEL: define{{.*}} void @test__stosq
105 // CHECK-X64: call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count)
106 // CHECK-X64: ret void
107 // CHECK-X64: }
109 void test__movsq(unsigned __int64 *Dest, unsigned __int64 *Src, size_t Count) {
110 return __movsq(Dest, Src, Count);
112 // CHECK-X64-LABEL: define{{.*}} void @test__movsq
113 // CHECK-X64: call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count)
114 // CHECK-X64: ret void
115 // CHECK-X64: }
116 #endif
118 void test__ud2(void) {
119 __ud2();
121 // CHECK-INTEL-LABEL: define{{.*}} void @test__ud2()
122 // CHECK-INTEL: call void @llvm.trap()
124 void test__int2c(void) {
125 __int2c();
127 // CHECK-INTEL-LABEL: define{{.*}} void @test__int2c()
128 // CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]]
131 #endif
133 void *test_ReturnAddress(void) {
134 return _ReturnAddress();
136 // CHECK-LABEL: define{{.*}}i8* @test_ReturnAddress()
137 // CHECK: = tail call i8* @llvm.returnaddress(i32 0)
138 // CHECK: ret i8*
140 #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__)
141 void *test_AddressOfReturnAddress(void) {
142 return _AddressOfReturnAddress();
144 // CHECK-INTEL-LABEL: define dso_local i8* @test_AddressOfReturnAddress()
145 // CHECK-INTEL: = tail call i8* @llvm.addressofreturnaddress.p0i8()
146 // CHECK-INTEL: ret i8*
147 #endif
149 unsigned char test_BitScanForward(unsigned long *Index, unsigned long Mask) {
150 return _BitScanForward(++Index, Mask);
152 // CHECK: define{{.*}}i8 @test_BitScanForward(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
153 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0
154 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
155 // CHECK: [[END_LABEL]]:
156 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
157 // CHECK: ret i8 [[RESULT]]
158 // CHECK: [[ISNOTZERO_LABEL]]:
159 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
160 // CHECK: [[INDEX:%[0-9]+]] = tail call i32 @llvm.cttz.i32(i32 %Mask, i1 true)
161 // CHECK: store i32 [[INDEX]], i32* [[IDXGEP]], align 4
162 // CHECK: br label %[[END_LABEL]]
164 unsigned char test_BitScanReverse(unsigned long *Index, unsigned long Mask) {
165 return _BitScanReverse(++Index, Mask);
167 // CHECK: define{{.*}}i8 @test_BitScanReverse(i32* {{[a-z_ ]*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{
168 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0
169 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
170 // CHECK: [[END_LABEL]]:
171 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
172 // CHECK: ret i8 [[RESULT]]
173 // CHECK: [[ISNOTZERO_LABEL]]:
174 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds i32, i32* %Index, {{i64|i32}} 1
175 // CHECK: [[REVINDEX:%[0-9]+]] = tail call i32 @llvm.ctlz.i32(i32 %Mask, i1 true)
176 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31
177 // CHECK: store i32 [[INDEX]], i32* [[IDXGEP]], align 4
178 // CHECK: br label %[[END_LABEL]]
180 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
181 unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask) {
182 return _BitScanForward64(Index, Mask);
184 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
185 // CHECK-ARM-X64: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0
186 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
187 // CHECK-ARM-X64: [[END_LABEL]]:
188 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
189 // CHECK-ARM-X64: ret i8 [[RESULT]]
190 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
191 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
192 // CHECK-ARM-X64: [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
193 // CHECK-ARM-X64: store i32 [[TRUNC_INDEX]], i32* %Index, align 4
194 // CHECK-ARM-X64: br label %[[END_LABEL]]
196 unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask) {
197 return _BitScanReverse64(Index, Mask);
199 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(i32* {{[a-z_ ]*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{
200 // CHECK-ARM-X64: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0
201 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]]
202 // CHECK-ARM-X64: [[END_LABEL]]:
203 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ]
204 // CHECK-ARM-X64: ret i8 [[RESULT]]
205 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]:
206 // CHECK-ARM-X64: [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
207 // CHECK-ARM-X64: [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
208 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
209 // CHECK-ARM-X64: store i32 [[INDEX]], i32* %Index, align 4
210 // CHECK-ARM-X64: br label %[[END_LABEL]]
211 #endif
213 void *test_InterlockedExchangePointer(void * volatile *Target, void *Value) {
214 return _InterlockedExchangePointer(Target, Value);
217 // CHECK: define{{.*}}i8* @test_InterlockedExchangePointer(i8** {{[a-z_ ]*}}%Target, i8* {{[a-z_ ]*}}%Value){{.*}}{
218 // CHECK: %[[TARGET:[0-9]+]] = bitcast i8** %Target to [[iPTR:i[0-9]+]]*
219 // CHECK: %[[VALUE:[0-9]+]] = ptrtoint i8* %Value to [[iPTR]]
220 // CHECK: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg [[iPTR]]* %[[TARGET]], [[iPTR]] %[[VALUE]] seq_cst, align {{4|8}}
221 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to i8*
222 // CHECK: ret i8* %[[RESULT]]
223 // CHECK: }
225 void *test_InterlockedCompareExchangePointer(void * volatile *Destination,
226 void *Exchange, void *Comparand) {
227 return _InterlockedCompareExchangePointer(Destination, Exchange, Comparand);
230 // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
231 // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
232 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
233 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
234 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst, align {{4|8}}
235 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
236 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
237 // CHECK: ret i8* %[[RESULT:[0-9]+]]
238 // CHECK: }
240 void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination,
241 void *Exchange, void *Comparand) {
242 return _InterlockedCompareExchangePointer_nf(Destination, Exchange, Comparand);
245 // CHECK: define{{.*}}i8* @test_InterlockedCompareExchangePointer_nf(i8** {{[a-z_ ]*}}%Destination, i8* {{[a-z_ ]*}}%Exchange, i8* {{[a-z_ ]*}}%Comparand){{.*}}{
246 // CHECK: %[[DEST:[0-9]+]] = bitcast i8** %Destination to [[iPTR]]*
247 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint i8* %Exchange to [[iPTR]]
248 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint i8* %Comparand to [[iPTR]]
249 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile [[iPTR]]* %[[DEST:[0-9]+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic, align {{4|8}}
250 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0
251 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to i8*
252 // CHECK: ret i8* %[[RESULT:[0-9]+]]
253 // CHECK: }
255 char test_InterlockedExchange8(char volatile *value, char mask) {
256 return _InterlockedExchange8(value, mask);
258 // CHECK: define{{.*}}i8 @test_InterlockedExchange8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
259 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask seq_cst, align 1
260 // CHECK: ret i8 [[RESULT:%[0-9]+]]
261 // CHECK: }
263 short test_InterlockedExchange16(short volatile *value, short mask) {
264 return _InterlockedExchange16(value, mask);
266 // CHECK: define{{.*}}i16 @test_InterlockedExchange16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
267 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask seq_cst, align 2
268 // CHECK: ret i16 [[RESULT:%[0-9]+]]
269 // CHECK: }
271 long test_InterlockedExchange(long volatile *value, long mask) {
272 return _InterlockedExchange(value, mask);
274 // CHECK: define{{.*}}i32 @test_InterlockedExchange(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
275 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask seq_cst, align 4
276 // CHECK: ret i32 [[RESULT:%[0-9]+]]
277 // CHECK: }
279 char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
280 return _InterlockedExchangeAdd8(value, mask);
282 // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
283 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask seq_cst, align 1
284 // CHECK: ret i8 [[RESULT:%[0-9]+]]
285 // CHECK: }
287 short test_InterlockedExchangeAdd16(short volatile *value, short mask) {
288 return _InterlockedExchangeAdd16(value, mask);
290 // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
291 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask seq_cst, align 2
292 // CHECK: ret i16 [[RESULT:%[0-9]+]]
293 // CHECK: }
295 long test_InterlockedExchangeAdd(long volatile *value, long mask) {
296 return _InterlockedExchangeAdd(value, mask);
298 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
299 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask seq_cst, align 4
300 // CHECK: ret i32 [[RESULT:%[0-9]+]]
301 // CHECK: }
303 char test_InterlockedExchangeSub8(char volatile *value, char mask) {
304 return _InterlockedExchangeSub8(value, mask);
306 // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
307 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i8* %value, i8 %mask seq_cst, align 1
308 // CHECK: ret i8 [[RESULT:%[0-9]+]]
309 // CHECK: }
311 short test_InterlockedExchangeSub16(short volatile *value, short mask) {
312 return _InterlockedExchangeSub16(value, mask);
314 // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
315 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i16* %value, i16 %mask seq_cst, align 2
316 // CHECK: ret i16 [[RESULT:%[0-9]+]]
317 // CHECK: }
319 long test_InterlockedExchangeSub(long volatile *value, long mask) {
320 return _InterlockedExchangeSub(value, mask);
322 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
323 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i32* %value, i32 %mask seq_cst, align 4
324 // CHECK: ret i32 [[RESULT:%[0-9]+]]
325 // CHECK: }
327 char test_InterlockedOr8(char volatile *value, char mask) {
328 return _InterlockedOr8(value, mask);
330 // CHECK: define{{.*}}i8 @test_InterlockedOr8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
331 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask seq_cst, align 1
332 // CHECK: ret i8 [[RESULT:%[0-9]+]]
333 // CHECK: }
335 short test_InterlockedOr16(short volatile *value, short mask) {
336 return _InterlockedOr16(value, mask);
338 // CHECK: define{{.*}}i16 @test_InterlockedOr16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
339 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask seq_cst, align 2
340 // CHECK: ret i16 [[RESULT:%[0-9]+]]
341 // CHECK: }
343 long test_InterlockedOr(long volatile *value, long mask) {
344 return _InterlockedOr(value, mask);
346 // CHECK: define{{.*}}i32 @test_InterlockedOr(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
347 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask seq_cst, align 4
348 // CHECK: ret i32 [[RESULT:%[0-9]+]]
349 // CHECK: }
351 char test_InterlockedXor8(char volatile *value, char mask) {
352 return _InterlockedXor8(value, mask);
354 // CHECK: define{{.*}}i8 @test_InterlockedXor8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
355 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask seq_cst, align 1
356 // CHECK: ret i8 [[RESULT:%[0-9]+]]
357 // CHECK: }
359 short test_InterlockedXor16(short volatile *value, short mask) {
360 return _InterlockedXor16(value, mask);
362 // CHECK: define{{.*}}i16 @test_InterlockedXor16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
363 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask seq_cst, align 2
364 // CHECK: ret i16 [[RESULT:%[0-9]+]]
365 // CHECK: }
367 long test_InterlockedXor(long volatile *value, long mask) {
368 return _InterlockedXor(value, mask);
370 // CHECK: define{{.*}}i32 @test_InterlockedXor(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
371 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask seq_cst, align 4
372 // CHECK: ret i32 [[RESULT:%[0-9]+]]
373 // CHECK: }
375 char test_InterlockedAnd8(char volatile *value, char mask) {
376 return _InterlockedAnd8(value, mask);
378 // CHECK: define{{.*}}i8 @test_InterlockedAnd8(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
379 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask seq_cst, align 1
380 // CHECK: ret i8 [[RESULT:%[0-9]+]]
381 // CHECK: }
383 short test_InterlockedAnd16(short volatile *value, short mask) {
384 return _InterlockedAnd16(value, mask);
386 // CHECK: define{{.*}}i16 @test_InterlockedAnd16(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
387 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask seq_cst, align 2
388 // CHECK: ret i16 [[RESULT:%[0-9]+]]
389 // CHECK: }
391 long test_InterlockedAnd(long volatile *value, long mask) {
392 return _InterlockedAnd(value, mask);
394 // CHECK: define{{.*}}i32 @test_InterlockedAnd(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
395 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask seq_cst, align 4
396 // CHECK: ret i32 [[RESULT:%[0-9]+]]
397 // CHECK: }
399 char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) {
400 return _InterlockedCompareExchange8(Destination, Exchange, Comperand);
402 // CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
403 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst, align 1
404 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
405 // CHECK: ret i8 [[RESULT]]
406 // CHECK: }
408 short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) {
409 return _InterlockedCompareExchange16(Destination, Exchange, Comperand);
411 // CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
412 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst, align 2
413 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
414 // CHECK: ret i16 [[RESULT]]
415 // CHECK: }
417 long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) {
418 return _InterlockedCompareExchange(Destination, Exchange, Comperand);
420 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
421 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4
422 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
423 // CHECK: ret i32 [[RESULT]]
424 // CHECK: }
426 __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
427 return _InterlockedCompareExchange64(Destination, Exchange, Comperand);
429 // CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
430 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst, align 8
431 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
432 // CHECK: ret i64 [[RESULT]]
433 // CHECK: }
435 #if defined(__x86_64__) || defined(__aarch64__)
436 unsigned char test_InterlockedCompareExchange128(
437 __int64 volatile *Destination, __int64 ExchangeHigh,
438 __int64 ExchangeLow, __int64 *ComparandResult) {
439 return _InterlockedCompareExchange128(++Destination, ++ExchangeHigh,
440 ++ExchangeLow, ++ComparandResult);
442 // CHECK-64: define{{.*}}i8 @test_InterlockedCompareExchange128(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, i64*{{[a-z_ ]*}}%ComparandResult){{.*}}{
443 // CHECK-64: %incdec.ptr = getelementptr inbounds i64, i64* %Destination, i64 1
444 // CHECK-64: %inc = add nsw i64 %ExchangeHigh, 1
445 // CHECK-64: %inc1 = add nsw i64 %ExchangeLow, 1
446 // CHECK-64: %incdec.ptr2 = getelementptr inbounds i64, i64* %ComparandResult, i64 1
447 // CHECK-64: [[DST:%[0-9]+]] = bitcast i64* %incdec.ptr to i128*
448 // CHECK-64: [[CNR:%[0-9]+]] = bitcast i64* %incdec.ptr2 to i128*
449 // CHECK-64: [[EH:%[0-9]+]] = zext i64 %inc to i128
450 // CHECK-64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128
451 // CHECK-64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64
452 // CHECK-64: [[EXP:%[0-9]+]] = or i128 [[EHS]], [[EL]]
453 // CHECK-64: [[ORG:%[0-9]+]] = load i128, i128* [[CNR]], align 16
454 // CHECK-64: [[RES:%[0-9]+]] = cmpxchg volatile i128* [[DST]], i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst, align 16
455 // CHECK-64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0
456 // CHECK-64: store i128 [[OLD]], i128* [[CNR]], align 16
457 // CHECK-64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1
458 // CHECK-64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8
459 // CHECK-64: ret i8 [[SUC8]]
460 // CHECK-64: }
461 #endif
463 #if defined(__aarch64__)
464 unsigned char test_InterlockedCompareExchange128_acq(
465 __int64 volatile *Destination, __int64 ExchangeHigh,
466 __int64 ExchangeLow, __int64 *ComparandResult) {
467 return _InterlockedCompareExchange128_acq(Destination, ExchangeHigh,
468 ExchangeLow, ComparandResult);
470 unsigned char test_InterlockedCompareExchange128_nf(
471 __int64 volatile *Destination, __int64 ExchangeHigh,
472 __int64 ExchangeLow, __int64 *ComparandResult) {
473 return _InterlockedCompareExchange128_nf(Destination, ExchangeHigh,
474 ExchangeLow, ComparandResult);
476 unsigned char test_InterlockedCompareExchange128_rel(
477 __int64 volatile *Destination, __int64 ExchangeHigh,
478 __int64 ExchangeLow, __int64 *ComparandResult) {
479 return _InterlockedCompareExchange128_rel(Destination, ExchangeHigh,
480 ExchangeLow, ComparandResult);
482 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_acq({{.*}})
483 // CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} acquire acquire, align 16
484 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_nf({{.*}})
485 // CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} monotonic monotonic, align 16
486 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_rel({{.*}})
487 // CHECK-ARM64: cmpxchg volatile i128* %{{.*}}, i128 %{{.*}}, i128 %{{.*}} release monotonic, align 16
488 #endif
490 short test_InterlockedIncrement16(short volatile *Addend) {
491 return _InterlockedIncrement16(++Addend);
493 // CHECK: define{{.*}}i16 @test_InterlockedIncrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
494 // CHECK: %incdec.ptr = getelementptr inbounds i16, i16* %Addend, {{i64|i32}} 1
495 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i16* %incdec.ptr, i16 1 seq_cst, align 2
496 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
497 // CHECK: ret i16 [[RESULT]]
498 // CHECK: }
500 long test_InterlockedIncrement(long volatile *Addend) {
501 return _InterlockedIncrement(++Addend);
503 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
504 // CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %Addend, {{i64|i32}} 1
505 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i32* %incdec.ptr, i32 1 seq_cst, align 4
506 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
507 // CHECK: ret i32 [[RESULT]]
508 // CHECK: }
510 short test_InterlockedDecrement16(short volatile *Addend) {
511 return _InterlockedDecrement16(Addend);
513 // CHECK: define{{.*}}i16 @test_InterlockedDecrement16(i16*{{[a-z_ ]*}}%Addend){{.*}}{
514 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 seq_cst, align 2
515 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
516 // CHECK: ret i16 [[RESULT]]
517 // CHECK: }
519 long test_InterlockedDecrement(long volatile *Addend) {
520 return _InterlockedDecrement(Addend);
522 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(i32*{{[a-z_ ]*}}%Addend){{.*}}{
523 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 seq_cst, align 4
524 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
525 // CHECK: ret i32 [[RESULT]]
526 // CHECK: }
528 char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); }
529 short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); }
530 int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); }
531 __int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); }
533 // CHECK: define{{.*}}i8 @test_iso_volatile_load8(i8*{{[a-z_ ]*}}%p)
534 // CHECK: = load volatile i8, i8* %p
535 // CHECK: define{{.*}}i16 @test_iso_volatile_load16(i16*{{[a-z_ ]*}}%p)
536 // CHECK: = load volatile i16, i16* %p
537 // CHECK: define{{.*}}i32 @test_iso_volatile_load32(i32*{{[a-z_ ]*}}%p)
538 // CHECK: = load volatile i32, i32* %p
539 // CHECK: define{{.*}}i64 @test_iso_volatile_load64(i64*{{[a-z_ ]*}}%p)
540 // CHECK: = load volatile i64, i64* %p
542 void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); }
543 void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); }
544 void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); }
545 void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); }
547 // CHECK: define{{.*}}void @test_iso_volatile_store8(i8*{{[a-z_ ]*}}%p, i8 {{[a-z_ ]*}}%v)
548 // CHECK: store volatile i8 %v, i8* %p
549 // CHECK: define{{.*}}void @test_iso_volatile_store16(i16*{{[a-z_ ]*}}%p, i16 {{[a-z_ ]*}}%v)
550 // CHECK: store volatile i16 %v, i16* %p
551 // CHECK: define{{.*}}void @test_iso_volatile_store32(i32*{{[a-z_ ]*}}%p, i32 {{[a-z_ ]*}}%v)
552 // CHECK: store volatile i32 %v, i32* %p
553 // CHECK: define{{.*}}void @test_iso_volatile_store64(i64*{{[a-z_ ]*}}%p, i64 {{[a-z_ ]*}}%v)
554 // CHECK: store volatile i64 %v, i64* %p
557 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
558 __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
559 return _InterlockedExchange64(value, mask);
561 // CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
562 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst, align 8
563 // CHECK: ret i64 [[RESULT:%[0-9]+]]
564 // CHECK: }
566 __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
567 return _InterlockedExchangeAdd64(value, mask);
569 // CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
570 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst, align 8
571 // CHECK: ret i64 [[RESULT:%[0-9]+]]
572 // CHECK: }
574 __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
575 return _InterlockedExchangeSub64(value, mask);
577 // CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
578 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst, align 8
579 // CHECK: ret i64 [[RESULT:%[0-9]+]]
580 // CHECK: }
582 __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
583 return _InterlockedOr64(value, mask);
585 // CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
586 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst, align 8
587 // CHECK: ret i64 [[RESULT:%[0-9]+]]
588 // CHECK: }
590 __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
591 return _InterlockedXor64(value, mask);
593 // CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
594 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst, align 8
595 // CHECK: ret i64 [[RESULT:%[0-9]+]]
596 // CHECK: }
598 __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
599 return _InterlockedAnd64(value, mask);
601 // CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
602 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst, align 8
603 // CHECK: ret i64 [[RESULT:%[0-9]+]]
604 // CHECK: }
606 __int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
607 return _InterlockedIncrement64(Addend);
609 // CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
610 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst, align 8
611 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
612 // CHECK: ret i64 [[RESULT]]
613 // CHECK: }
615 __int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
616 return _InterlockedDecrement64(Addend);
618 // CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
619 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst, align 8
620 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
621 // CHECK: ret i64 [[RESULT]]
622 // CHECK: }
624 #endif
626 #if defined(__i386__) || defined(__x86_64__)
627 long test_InterlockedExchange_HLEAcquire(long volatile *Target, long Value) {
628 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
629 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Target, i32 %Value, i32* elementtype(i32) %Target)
630 return _InterlockedExchange_HLEAcquire(Target, Value);
632 long test_InterlockedExchange_HLERelease(long volatile *Target, long Value) {
633 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(i32*{{[a-z_ ]*}}%Target, i32{{[a-z_ ]*}}%Value)
634 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Target, i32 %Value, i32* elementtype(i32) %Target)
635 return _InterlockedExchange_HLERelease(Target, Value);
637 long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination,
638 long Exchange, long Comparand) {
639 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
640 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, i32* elementtype(i32) %Destination)
641 return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand);
643 long test_InterlockedCompareExchange_HLERelease(long volatile *Destination,
644 long Exchange, long Comparand) {
645 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand)
646 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, i32* elementtype(i32) %Destination)
647 return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand);
649 #endif
650 #if defined(__x86_64__)
651 __int64 test_InterlockedExchange64_HLEAcquire(__int64 volatile *Target, __int64 Value) {
652 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
653 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Target, i64 %Value, i64* elementtype(i64) %Target)
654 return _InterlockedExchange64_HLEAcquire(Target, Value);
656 __int64 test_InterlockedExchange64_HLERelease(__int64 volatile *Target, __int64 Value) {
657 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(i64*{{[a-z_ ]*}}%Target, i64{{[a-z_ ]*}}%Value)
658 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Target, i64 %Value, i64* elementtype(i64) %Target)
659 return _InterlockedExchange64_HLERelease(Target, Value);
661 __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination,
662 __int64 Exchange, __int64 Comparand) {
663 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
664 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, i64* elementtype(i64) %Destination)
665 return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand);
667 __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination,
668 __int64 Exchange, __int64 Comparand) {
669 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand)
670 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, i64* elementtype(i64) %Destination)
671 return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand);
673 #endif
675 #if defined(__arm__) || defined(__aarch64__)
676 char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) {
677 return _InterlockedExchangeAdd8_acq(value, mask);
679 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
680 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask acquire, align 1
681 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
682 // CHECK-ARM-ARM64: }
683 char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) {
684 return _InterlockedExchangeAdd8_rel(value, mask);
686 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
687 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask release, align 1
688 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
689 // CHECK-ARM-ARM64: }
690 char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) {
691 return _InterlockedExchangeAdd8_nf(value, mask);
693 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
694 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i8* %value, i8 %mask monotonic, align 1
695 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
696 // CHECK-ARM-ARM64: }
697 short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) {
698 return _InterlockedExchangeAdd16_acq(value, mask);
700 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
701 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask acquire, align 2
702 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
703 // CHECK-ARM-ARM64: }
704 short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) {
705 return _InterlockedExchangeAdd16_rel(value, mask);
707 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
708 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask release, align 2
709 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
710 // CHECK-ARM-ARM64: }
711 short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) {
712 return _InterlockedExchangeAdd16_nf(value, mask);
714 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
715 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i16* %value, i16 %mask monotonic, align 2
716 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
717 // CHECK-ARM-ARM64: }
718 long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) {
719 return _InterlockedExchangeAdd_acq(value, mask);
721 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
722 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask acquire, align 4
723 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
724 // CHECK-ARM-ARM64: }
725 long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) {
726 return _InterlockedExchangeAdd_rel(value, mask);
728 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
729 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask release, align 4
730 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
731 // CHECK-ARM-ARM64: }
732 long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) {
733 return _InterlockedExchangeAdd_nf(value, mask);
735 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
736 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i32* %value, i32 %mask monotonic, align 4
737 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
738 // CHECK-ARM-ARM64: }
739 __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) {
740 return _InterlockedExchangeAdd64_acq(value, mask);
742 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
743 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask acquire, align 8
744 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
745 // CHECK-ARM-ARM64: }
746 __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) {
747 return _InterlockedExchangeAdd64_rel(value, mask);
749 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
750 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask release, align 8
751 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
752 // CHECK-ARM-ARM64: }
753 __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) {
754 return _InterlockedExchangeAdd64_nf(value, mask);
756 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
757 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask monotonic, align 8
758 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
759 // CHECK-ARM-ARM64: }
761 char test_InterlockedExchange8_acq(char volatile *value, char mask) {
762 return _InterlockedExchange8_acq(value, mask);
764 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
765 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask acquire, align 1
766 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
767 // CHECK-ARM-ARM64: }
768 char test_InterlockedExchange8_rel(char volatile *value, char mask) {
769 return _InterlockedExchange8_rel(value, mask);
771 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
772 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask release, align 1
773 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
774 // CHECK-ARM-ARM64: }
775 char test_InterlockedExchange8_nf(char volatile *value, char mask) {
776 return _InterlockedExchange8_nf(value, mask);
778 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
779 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i8* %value, i8 %mask monotonic, align 1
780 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
781 // CHECK-ARM-ARM64: }
782 short test_InterlockedExchange16_acq(short volatile *value, short mask) {
783 return _InterlockedExchange16_acq(value, mask);
785 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
786 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask acquire, align 2
787 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
788 // CHECK-ARM-ARM64: }
789 short test_InterlockedExchange16_rel(short volatile *value, short mask) {
790 return _InterlockedExchange16_rel(value, mask);
792 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
793 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask release, align 2
794 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
795 // CHECK-ARM-ARM64: }
796 short test_InterlockedExchange16_nf(short volatile *value, short mask) {
797 return _InterlockedExchange16_nf(value, mask);
799 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
800 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i16* %value, i16 %mask monotonic, align 2
801 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
802 // CHECK-ARM-ARM64: }
803 long test_InterlockedExchange_acq(long volatile *value, long mask) {
804 return _InterlockedExchange_acq(value, mask);
806 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
807 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask acquire, align 4
808 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
809 // CHECK-ARM-ARM64: }
810 long test_InterlockedExchange_rel(long volatile *value, long mask) {
811 return _InterlockedExchange_rel(value, mask);
813 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
814 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask release, align 4
815 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
816 // CHECK-ARM-ARM64: }
817 long test_InterlockedExchange_nf(long volatile *value, long mask) {
818 return _InterlockedExchange_nf(value, mask);
820 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
821 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i32* %value, i32 %mask monotonic, align 4
822 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
823 // CHECK-ARM-ARM64: }
824 __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) {
825 return _InterlockedExchange64_acq(value, mask);
827 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
828 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask acquire, align 8
829 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
830 // CHECK-ARM-ARM64: }
831 __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) {
832 return _InterlockedExchange64_rel(value, mask);
834 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
835 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask release, align 8
836 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
837 // CHECK-ARM-ARM64: }
838 __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) {
839 return _InterlockedExchange64_nf(value, mask);
841 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
842 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic, align 8
843 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
844 // CHECK-ARM-ARM64: }
846 char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
847 return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
849 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
850 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire, align 1
851 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
852 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
853 // CHECK-ARM-ARM64: }
855 char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
856 return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
858 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
859 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic, align 1
860 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
861 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
862 // CHECK-ARM-ARM64: }
864 char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
865 return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
867 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
868 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic, align 1
869 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
870 // CHECK-ARM-ARM64: ret i8 [[RESULT]]
871 // CHECK-ARM-ARM64: }
873 short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
874 return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
876 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
877 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire, align 2
878 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
879 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
880 // CHECK-ARM-ARM64: }
882 short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
883 return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
885 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
886 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic, align 2
887 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
888 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
889 // CHECK-ARM-ARM64: }
891 short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
892 return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
894 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
895 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic, align 2
896 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
897 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
898 // CHECK-ARM-ARM64: }
900 long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
901 return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
903 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
904 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4
905 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
906 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
907 // CHECK-ARM-ARM64: }
909 long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) {
910 return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
912 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
913 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4
914 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
915 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
916 // CHECK-ARM-ARM64: }
918 long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) {
919 return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
921 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
922 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4
923 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
924 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
925 // CHECK-ARM-ARM64: }
927 __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
928 return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand);
930 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
931 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange acquire acquire, align 8
932 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
933 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
934 // CHECK-ARM-ARM64: }
936 __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
937 return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand);
939 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
940 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange release monotonic, align 8
941 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
942 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
943 // CHECK-ARM-ARM64: }
945 __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
946 return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand);
948 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
949 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic, align 8
950 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
951 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
952 // CHECK-ARM-ARM64: }
954 char test_InterlockedOr8_acq(char volatile *value, char mask) {
955 return _InterlockedOr8_acq(value, mask);
957 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
958 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask acquire, align 1
959 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
960 // CHECK-ARM-ARM64: }
962 char test_InterlockedOr8_rel(char volatile *value, char mask) {
963 return _InterlockedOr8_rel(value, mask);
965 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
966 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask release, align 1
967 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
968 // CHECK-ARM-ARM64: }
970 char test_InterlockedOr8_nf(char volatile *value, char mask) {
971 return _InterlockedOr8_nf(value, mask);
973 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
974 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i8* %value, i8 %mask monotonic, align 1
975 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
976 // CHECK-ARM-ARM64: }
978 short test_InterlockedOr16_acq(short volatile *value, short mask) {
979 return _InterlockedOr16_acq(value, mask);
981 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
982 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask acquire, align 2
983 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
984 // CHECK-ARM-ARM64: }
986 short test_InterlockedOr16_rel(short volatile *value, short mask) {
987 return _InterlockedOr16_rel(value, mask);
989 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
990 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask release, align 2
991 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
992 // CHECK-ARM-ARM64: }
994 short test_InterlockedOr16_nf(short volatile *value, short mask) {
995 return _InterlockedOr16_nf(value, mask);
997 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
998 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i16* %value, i16 %mask monotonic, align 2
999 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1000 // CHECK-ARM-ARM64: }
1002 long test_InterlockedOr_acq(long volatile *value, long mask) {
1003 return _InterlockedOr_acq(value, mask);
1005 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1006 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask acquire, align 4
1007 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1008 // CHECK-ARM-ARM64: }
1010 long test_InterlockedOr_rel(long volatile *value, long mask) {
1011 return _InterlockedOr_rel(value, mask);
1013 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1014 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask release, align 4
1015 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1016 // CHECK-ARM-ARM64: }
1018 long test_InterlockedOr_nf(long volatile *value, long mask) {
1019 return _InterlockedOr_nf(value, mask);
1021 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1022 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i32* %value, i32 %mask monotonic, align 4
1023 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1024 // CHECK-ARM-ARM64: }
1026 __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) {
1027 return _InterlockedOr64_acq(value, mask);
1029 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1030 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask acquire, align 8
1031 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1032 // CHECK-ARM-ARM64: }
1034 __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) {
1035 return _InterlockedOr64_rel(value, mask);
1037 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1038 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask release, align 8
1039 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1040 // CHECK-ARM-ARM64: }
1042 __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) {
1043 return _InterlockedOr64_nf(value, mask);
1045 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1046 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic, align 8
1047 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1048 // CHECK-ARM-ARM64: }
1050 char test_InterlockedXor8_acq(char volatile *value, char mask) {
1051 return _InterlockedXor8_acq(value, mask);
1053 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1054 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask acquire, align 1
1055 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1056 // CHECK-ARM-ARM64: }
1058 char test_InterlockedXor8_rel(char volatile *value, char mask) {
1059 return _InterlockedXor8_rel(value, mask);
1061 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1062 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask release, align 1
1063 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1064 // CHECK-ARM-ARM64: }
1066 char test_InterlockedXor8_nf(char volatile *value, char mask) {
1067 return _InterlockedXor8_nf(value, mask);
1069 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1070 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask monotonic, align 1
1071 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1072 // CHECK-ARM-ARM64: }
1074 short test_InterlockedXor16_acq(short volatile *value, short mask) {
1075 return _InterlockedXor16_acq(value, mask);
1077 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1078 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask acquire, align 2
1079 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1080 // CHECK-ARM-ARM64: }
1082 short test_InterlockedXor16_rel(short volatile *value, short mask) {
1083 return _InterlockedXor16_rel(value, mask);
1085 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1086 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask release, align 2
1087 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1088 // CHECK-ARM-ARM64: }
1090 short test_InterlockedXor16_nf(short volatile *value, short mask) {
1091 return _InterlockedXor16_nf(value, mask);
1093 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1094 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask monotonic, align 2
1095 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1096 // CHECK-ARM-ARM64: }
1098 long test_InterlockedXor_acq(long volatile *value, long mask) {
1099 return _InterlockedXor_acq(value, mask);
1101 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1102 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire, align 4
1103 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1104 // CHECK-ARM-ARM64: }
1106 long test_InterlockedXor_rel(long volatile *value, long mask) {
1107 return _InterlockedXor_rel(value, mask);
1109 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1110 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release, align 4
1111 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1112 // CHECK-ARM-ARM64: }
1114 long test_InterlockedXor_nf(long volatile *value, long mask) {
1115 return _InterlockedXor_nf(value, mask);
1117 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1118 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic, align 4
1119 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1120 // CHECK-ARM-ARM64: }
1122 __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) {
1123 return _InterlockedXor64_acq(value, mask);
1125 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1126 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask acquire, align 8
1127 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1128 // CHECK-ARM-ARM64: }
1130 __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) {
1131 return _InterlockedXor64_rel(value, mask);
1133 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1134 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask release, align 8
1135 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1136 // CHECK-ARM-ARM64: }
1138 __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) {
1139 return _InterlockedXor64_nf(value, mask);
1141 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1142 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask monotonic, align 8
1143 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1144 // CHECK-ARM-ARM64: }
1146 char test_InterlockedAnd8_acq(char volatile *value, char mask) {
1147 return _InterlockedAnd8_acq(value, mask);
1149 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1150 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask acquire, align 1
1151 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1152 // CHECK-ARM-ARM64: }
1154 char test_InterlockedAnd8_rel(char volatile *value, char mask) {
1155 return _InterlockedAnd8_rel(value, mask);
1157 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1158 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask release, align 1
1159 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1160 // CHECK-ARM-ARM64: }
1162 char test_InterlockedAnd8_nf(char volatile *value, char mask) {
1163 return _InterlockedAnd8_nf(value, mask);
1165 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1166 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i8* %value, i8 %mask monotonic, align 1
1167 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
1168 // CHECK-ARM-ARM64: }
1170 short test_InterlockedAnd16_acq(short volatile *value, short mask) {
1171 return _InterlockedAnd16_acq(value, mask);
1173 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1174 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask acquire, align 2
1175 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1176 // CHECK-ARM-ARM64: }
1178 short test_InterlockedAnd16_rel(short volatile *value, short mask) {
1179 return _InterlockedAnd16_rel(value, mask);
1181 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1182 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask release, align 2
1183 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1184 // CHECK-ARM-ARM64: }
1186 short test_InterlockedAnd16_nf(short volatile *value, short mask) {
1187 return _InterlockedAnd16_nf(value, mask);
1189 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1190 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i16* %value, i16 %mask monotonic, align 2
1191 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
1192 // CHECK-ARM-ARM64: }
1194 long test_InterlockedAnd_acq(long volatile *value, long mask) {
1195 return _InterlockedAnd_acq(value, mask);
1197 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1198 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask acquire, align 4
1199 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1200 // CHECK-ARM-ARM64: }
1202 long test_InterlockedAnd_rel(long volatile *value, long mask) {
1203 return _InterlockedAnd_rel(value, mask);
1205 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1206 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask release, align 4
1207 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1208 // CHECK-ARM-ARM64: }
1210 long test_InterlockedAnd_nf(long volatile *value, long mask) {
1211 return _InterlockedAnd_nf(value, mask);
1213 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1214 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i32* %value, i32 %mask monotonic, align 4
1215 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
1216 // CHECK-ARM-ARM64: }
1218 __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) {
1219 return _InterlockedAnd64_acq(value, mask);
1221 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1222 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask acquire, align 8
1223 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1224 // CHECK-ARM-ARM64: }
1226 __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) {
1227 return _InterlockedAnd64_rel(value, mask);
1229 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1230 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask release, align 8
1231 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1232 // CHECK-ARM-ARM64: }
1234 __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) {
1235 return _InterlockedAnd64_nf(value, mask);
1237 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1238 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask monotonic, align 8
1239 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
1240 // CHECK-ARM-ARM64: }
1242 short test_InterlockedIncrement16_acq(short volatile *Addend) {
1243 return _InterlockedIncrement16_acq(Addend);
1245 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1246 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 acquire, align 2
1247 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1248 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1249 // CHECK-ARM-ARM64: }
1251 short test_InterlockedIncrement16_rel(short volatile *Addend) {
1252 return _InterlockedIncrement16_rel(Addend);
1254 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1255 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 release, align 2
1256 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1257 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1258 // CHECK-ARM-ARM64: }
1260 short test_InterlockedIncrement16_nf(short volatile *Addend) {
1261 return _InterlockedIncrement16_nf(Addend);
1263 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1264 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i16* %Addend, i16 1 monotonic, align 2
1265 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1
1266 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1267 // CHECK-ARM-ARM64: }
1269 long test_InterlockedIncrement_acq(long volatile *Addend) {
1270 return _InterlockedIncrement_acq(Addend);
1272 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1273 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 acquire, align 4
1274 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1275 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1276 // CHECK-ARM-ARM64: }
1278 long test_InterlockedIncrement_rel(long volatile *Addend) {
1279 return _InterlockedIncrement_rel(Addend);
1281 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1282 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 release, align 4
1283 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1284 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1285 // CHECK-ARM-ARM64: }
1287 long test_InterlockedIncrement_nf(long volatile *Addend) {
1288 return _InterlockedIncrement_nf(Addend);
1290 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1291 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i32* %Addend, i32 1 monotonic, align 4
1292 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1
1293 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1294 // CHECK-ARM-ARM64: }
1296 __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) {
1297 return _InterlockedIncrement64_acq(Addend);
1299 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1300 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 acquire, align 8
1301 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1302 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1303 // CHECK-ARM-ARM64: }
1305 __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) {
1306 return _InterlockedIncrement64_rel(Addend);
1308 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1309 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 release, align 8
1310 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1311 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1312 // CHECK-ARM-ARM64: }
1314 __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) {
1315 return _InterlockedIncrement64_nf(Addend);
1317 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1318 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 monotonic, align 8
1319 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
1320 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1321 // CHECK-ARM-ARM64: }
1323 short test_InterlockedDecrement16_acq(short volatile *Addend) {
1324 return _InterlockedDecrement16_acq(Addend);
1326 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1327 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 acquire, align 2
1328 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1329 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1330 // CHECK-ARM-ARM64: }
1332 short test_InterlockedDecrement16_rel(short volatile *Addend) {
1333 return _InterlockedDecrement16_rel(Addend);
1335 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1336 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 release, align 2
1337 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1338 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1339 // CHECK-ARM-ARM64: }
1341 short test_InterlockedDecrement16_nf(short volatile *Addend) {
1342 return _InterlockedDecrement16_nf(Addend);
1344 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(i16*{{[a-z_ ]*}}%Addend){{.*}}{
1345 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i16* %Addend, i16 1 monotonic, align 2
1346 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1
1347 // CHECK-ARM-ARM64: ret i16 [[RESULT]]
1348 // CHECK-ARM-ARM64: }
1350 long test_InterlockedDecrement_acq(long volatile *Addend) {
1351 return _InterlockedDecrement_acq(Addend);
1353 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1354 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 acquire, align 4
1355 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1356 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1357 // CHECK-ARM-ARM64: }
1359 long test_InterlockedDecrement_rel(long volatile *Addend) {
1360 return _InterlockedDecrement_rel(Addend);
1362 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1363 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 release, align 4
1364 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1365 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1366 // CHECK-ARM-ARM64: }
1368 long test_InterlockedDecrement_nf(long volatile *Addend) {
1369 return _InterlockedDecrement_nf(Addend);
1371 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(i32*{{[a-z_ ]*}}%Addend){{.*}}{
1372 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i32* %Addend, i32 1 monotonic, align 4
1373 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
1374 // CHECK-ARM-ARM64: ret i32 [[RESULT]]
1375 // CHECK-ARM-ARM64: }
1377 __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) {
1378 return _InterlockedDecrement64_acq(Addend);
1380 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1381 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 acquire, align 8
1382 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1383 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1384 // CHECK-ARM-ARM64: }
1386 __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) {
1387 return _InterlockedDecrement64_rel(Addend);
1389 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1390 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 release, align 8
1391 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1392 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1393 // CHECK-ARM-ARM64: }
1395 __int64 test_InterlockedDecrement64_nf(__int64 volatile *Addend) {
1396 return _InterlockedDecrement64_nf(Addend);
1398 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(i64*{{[a-z_ ]*}}%Addend){{.*}}{
1399 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 monotonic, align 8
1400 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
1401 // CHECK-ARM-ARM64: ret i64 [[RESULT]]
1402 // CHECK-ARM-ARM64: }
1403 #endif
1405 void test__fastfail(void) {
1406 __fastfail(42);
1408 // CHECK-LABEL: define{{.*}} void @test__fastfail()
1409 // CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]]
1410 // CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]]
1411 // CHECK-ARM64: call void asm sideeffect "brk #0xF003", "{w0}"(i32 42) #[[NORETURN:[0-9]+]]
1413 // Attributes come last.
1415 // CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} }