[MLIR][TOSA] Update CustomOp input and output names (#118408)
[llvm-project.git] / clang / test / CodeGen / X86 / x86-atomic-long_double.c
blob9c82784807dacaf3f208f63a86a5ccf9afc4f95d
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2 // RUN: %clang_cc1 -triple x86_64-linux-gnu -target-cpu core2 %s -emit-llvm -o - | FileCheck --check-prefixes=X64 %s
3 // RUN: %clang_cc1 -triple i686-linux-gnu -target-cpu core2 %s -emit-llvm -o - | FileCheck --check-prefixes=X86 %s
5 // X64-LABEL: define dso_local x86_fp80 @testinc(
6 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] {
7 // X64-NEXT: [[ENTRY:.*]]:
8 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
9 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
10 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
11 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
12 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
13 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
14 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
15 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i128, ptr [[TMP0]] seq_cst, align 16
16 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
17 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
18 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
19 // X64: [[ATOMIC_OP]]:
20 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
21 // X64-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP2]], 0xK3FFF8000000000000000
22 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
23 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
24 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
25 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
26 // X64-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 16
27 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
28 // X64-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
29 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
30 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
31 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
32 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
33 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
34 // X64: [[ATOMIC_CONT]]:
35 // X64-NEXT: ret x86_fp80 [[INC]]
37 // X86-LABEL: define dso_local x86_fp80 @testinc(
38 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0:[0-9]+]] {
39 // X86-NEXT: [[ENTRY:.*]]:
40 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
41 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
42 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
43 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
44 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
45 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
46 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
47 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
48 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
49 // X86: [[ATOMIC_OP]]:
50 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
51 // X86-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP2]], 0xK3FFF8000000000000000
52 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
53 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
54 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
55 // X86-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 4
56 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
57 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
58 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
59 // X86: [[ATOMIC_CONT]]:
60 // X86-NEXT: ret x86_fp80 [[INC]]
62 long double testinc(_Atomic long double *addr) {
64 return ++*addr;
67 // X64-LABEL: define dso_local x86_fp80 @testdec(
68 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
69 // X64-NEXT: [[ENTRY:.*]]:
70 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
71 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
72 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
73 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
74 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
75 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
76 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
77 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i128, ptr [[TMP0]] seq_cst, align 16
78 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
79 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
80 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
81 // X64: [[ATOMIC_OP]]:
82 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
83 // X64-NEXT: [[DEC:%.*]] = fadd x86_fp80 [[TMP2]], 0xKBFFF8000000000000000
84 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
85 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
86 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
87 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
88 // X64-NEXT: store x86_fp80 [[DEC]], ptr [[ATOMIC_TEMP2]], align 16
89 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
90 // X64-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
91 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
92 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
93 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
94 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
95 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
96 // X64: [[ATOMIC_CONT]]:
97 // X64-NEXT: ret x86_fp80 [[TMP1]]
99 // X86-LABEL: define dso_local x86_fp80 @testdec(
100 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
101 // X86-NEXT: [[ENTRY:.*]]:
102 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
103 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
104 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
105 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
106 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
107 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
108 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
109 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
110 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
111 // X86: [[ATOMIC_OP]]:
112 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
113 // X86-NEXT: [[DEC:%.*]] = fadd x86_fp80 [[TMP2]], 0xKBFFF8000000000000000
114 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
115 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
116 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
117 // X86-NEXT: store x86_fp80 [[DEC]], ptr [[ATOMIC_TEMP2]], align 4
118 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
119 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
120 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
121 // X86: [[ATOMIC_CONT]]:
122 // X86-NEXT: ret x86_fp80 [[TMP1]]
124 long double testdec(_Atomic long double *addr) {
126 return (*addr)--;
129 // X64-LABEL: define dso_local x86_fp80 @testcompassign(
130 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
131 // X64-NEXT: [[ENTRY:.*]]:
132 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
133 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
134 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
135 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
136 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
137 // X64-NEXT: [[ATOMIC_TEMP5:%.*]] = alloca x86_fp80, align 16
138 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
139 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
140 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i128, ptr [[TMP0]] seq_cst, align 16
141 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
142 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
143 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
144 // X64: [[ATOMIC_OP]]:
145 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
146 // X64-NEXT: [[SUB:%.*]] = fsub x86_fp80 [[TMP2]], 0xK4003C800000000000000
147 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
148 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
149 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
150 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
151 // X64-NEXT: store x86_fp80 [[SUB]], ptr [[ATOMIC_TEMP2]], align 16
152 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
153 // X64-NEXT: [[TMP5:%.*]] = cmpxchg ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
154 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
155 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
156 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
157 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
158 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
159 // X64: [[ATOMIC_CONT]]:
160 // X64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
161 // X64-NEXT: [[ATOMIC_LOAD4:%.*]] = load atomic i128, ptr [[TMP9]] seq_cst, align 16
162 // X64-NEXT: store i128 [[ATOMIC_LOAD4]], ptr [[ATOMIC_TEMP5]], align 16
163 // X64-NEXT: [[TMP10:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP5]], align 16
164 // X64-NEXT: ret x86_fp80 [[TMP10]]
166 // X86-LABEL: define dso_local x86_fp80 @testcompassign(
167 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
168 // X86-NEXT: [[ENTRY:.*]]:
169 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
170 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
171 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
172 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
173 // X86-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 4
174 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
175 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
176 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
177 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
178 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
179 // X86: [[ATOMIC_OP]]:
180 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
181 // X86-NEXT: [[SUB:%.*]] = fsub x86_fp80 [[TMP2]], 0xK4003C800000000000000
182 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
183 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
184 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
185 // X86-NEXT: store x86_fp80 [[SUB]], ptr [[ATOMIC_TEMP2]], align 4
186 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
187 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
188 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
189 // X86: [[ATOMIC_CONT]]:
190 // X86-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
191 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP4]], ptr noundef [[ATOMIC_TEMP3]], i32 noundef 5)
192 // X86-NEXT: [[TMP5:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 4
193 // X86-NEXT: ret x86_fp80 [[TMP5]]
195 long double testcompassign(_Atomic long double *addr) {
196 *addr -= 25;
197 return *addr;
200 // X64-LABEL: define dso_local x86_fp80 @testassign(
201 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
202 // X64-NEXT: [[ENTRY:.*:]]
203 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
204 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
205 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
206 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
207 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
208 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP]], i8 0, i64 16, i1 false)
209 // X64-NEXT: store x86_fp80 0xK4005E600000000000000, ptr [[ATOMIC_TEMP]], align 16
210 // X64-NEXT: [[TMP1:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
211 // X64-NEXT: store atomic i128 [[TMP1]], ptr [[TMP0]] seq_cst, align 16
212 // X64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
213 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i128, ptr [[TMP2]] seq_cst, align 16
214 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP1]], align 16
215 // X64-NEXT: [[TMP3:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 16
216 // X64-NEXT: ret x86_fp80 [[TMP3]]
218 // X86-LABEL: define dso_local x86_fp80 @testassign(
219 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
220 // X86-NEXT: [[ENTRY:.*:]]
221 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
222 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
223 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
224 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
225 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
226 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP]], i8 0, i64 12, i1 false)
227 // X86-NEXT: store x86_fp80 0xK4005E600000000000000, ptr [[ATOMIC_TEMP]], align 4
228 // X86-NEXT: call void @__atomic_store(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
229 // X86-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
230 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP1]], ptr noundef [[ATOMIC_TEMP1]], i32 noundef 5)
231 // X86-NEXT: [[TMP2:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
232 // X86-NEXT: ret x86_fp80 [[TMP2]]
234 long double testassign(_Atomic long double *addr) {
235 *addr = 115;
237 return *addr;
240 // X64-LABEL: define dso_local x86_fp80 @test_volatile_inc(
241 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
242 // X64-NEXT: [[ENTRY:.*]]:
243 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
244 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
245 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
246 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
247 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
248 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
249 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
250 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic volatile i128, ptr [[TMP0]] seq_cst, align 16
251 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
252 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
253 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
254 // X64: [[ATOMIC_OP]]:
255 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
256 // X64-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP2]], 0xK3FFF8000000000000000
257 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
258 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
259 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
260 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
261 // X64-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 16
262 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
263 // X64-NEXT: [[TMP5:%.*]] = cmpxchg volatile ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
264 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
265 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
266 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
267 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
268 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
269 // X64: [[ATOMIC_CONT]]:
270 // X64-NEXT: ret x86_fp80 [[INC]]
272 // X86-LABEL: define dso_local x86_fp80 @test_volatile_inc(
273 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
274 // X86-NEXT: [[ENTRY:.*]]:
275 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
276 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
277 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
278 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
279 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
280 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
281 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
282 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
283 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
284 // X86: [[ATOMIC_OP]]:
285 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
286 // X86-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP2]], 0xK3FFF8000000000000000
287 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
288 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
289 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
290 // X86-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 4
291 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
292 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
293 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
294 // X86: [[ATOMIC_CONT]]:
295 // X86-NEXT: ret x86_fp80 [[INC]]
297 long double test_volatile_inc(volatile _Atomic long double *addr) {
298 return ++*addr;
301 // X64-LABEL: define dso_local x86_fp80 @test_volatile_dec(
302 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
303 // X64-NEXT: [[ENTRY:.*]]:
304 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
305 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
306 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
307 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
308 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
309 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
310 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
311 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic volatile i128, ptr [[TMP0]] seq_cst, align 16
312 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
313 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
314 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
315 // X64: [[ATOMIC_OP]]:
316 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
317 // X64-NEXT: [[DEC:%.*]] = fadd x86_fp80 [[TMP2]], 0xKBFFF8000000000000000
318 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
319 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
320 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
321 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
322 // X64-NEXT: store x86_fp80 [[DEC]], ptr [[ATOMIC_TEMP2]], align 16
323 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
324 // X64-NEXT: [[TMP5:%.*]] = cmpxchg volatile ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
325 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
326 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
327 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
328 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
329 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
330 // X64: [[ATOMIC_CONT]]:
331 // X64-NEXT: ret x86_fp80 [[TMP1]]
333 // X86-LABEL: define dso_local x86_fp80 @test_volatile_dec(
334 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
335 // X86-NEXT: [[ENTRY:.*]]:
336 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
337 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
338 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
339 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
340 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
341 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
342 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
343 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
344 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
345 // X86: [[ATOMIC_OP]]:
346 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
347 // X86-NEXT: [[DEC:%.*]] = fadd x86_fp80 [[TMP2]], 0xKBFFF8000000000000000
348 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
349 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
350 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
351 // X86-NEXT: store x86_fp80 [[DEC]], ptr [[ATOMIC_TEMP2]], align 4
352 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
353 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
354 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
355 // X86: [[ATOMIC_CONT]]:
356 // X86-NEXT: ret x86_fp80 [[TMP1]]
358 long double test_volatile_dec(volatile _Atomic long double *addr) {
359 return (*addr)--;
362 // X64-LABEL: define dso_local x86_fp80 @test_volatile_compassign(
363 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
364 // X64-NEXT: [[ENTRY:.*]]:
365 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
366 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
367 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
368 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
369 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
370 // X64-NEXT: [[ATOMIC_TEMP5:%.*]] = alloca x86_fp80, align 16
371 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
372 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
373 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic volatile i128, ptr [[TMP0]] seq_cst, align 16
374 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
375 // X64-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
376 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
377 // X64: [[ATOMIC_OP]]:
378 // X64-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP8:%.*]], %[[ATOMIC_OP]] ]
379 // X64-NEXT: [[SUB:%.*]] = fsub x86_fp80 [[TMP2]], 0xK4003C800000000000000
380 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
381 // X64-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 16
382 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
383 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
384 // X64-NEXT: store x86_fp80 [[SUB]], ptr [[ATOMIC_TEMP2]], align 16
385 // X64-NEXT: [[TMP4:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
386 // X64-NEXT: [[TMP5:%.*]] = cmpxchg volatile ptr [[TMP0]], i128 [[TMP3]], i128 [[TMP4]] seq_cst seq_cst, align 16
387 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP5]], 0
388 // X64-NEXT: [[TMP7:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
389 // X64-NEXT: store i128 [[TMP6]], ptr [[ATOMIC_TEMP3]], align 16
390 // X64-NEXT: [[TMP8]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
391 // X64-NEXT: br i1 [[TMP7]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
392 // X64: [[ATOMIC_CONT]]:
393 // X64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
394 // X64-NEXT: [[ATOMIC_LOAD4:%.*]] = load atomic volatile i128, ptr [[TMP9]] seq_cst, align 16
395 // X64-NEXT: store i128 [[ATOMIC_LOAD4]], ptr [[ATOMIC_TEMP5]], align 16
396 // X64-NEXT: [[TMP10:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP5]], align 16
397 // X64-NEXT: ret x86_fp80 [[TMP10]]
399 // X86-LABEL: define dso_local x86_fp80 @test_volatile_compassign(
400 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
401 // X86-NEXT: [[ENTRY:.*]]:
402 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
403 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
404 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
405 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
406 // X86-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 4
407 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
408 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
409 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
410 // X86-NEXT: [[TMP1:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
411 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
412 // X86: [[ATOMIC_OP]]:
413 // X86-NEXT: [[TMP2:%.*]] = phi x86_fp80 [ [[TMP1]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[ATOMIC_OP]] ]
414 // X86-NEXT: [[SUB:%.*]] = fsub x86_fp80 [[TMP2]], 0xK4003C800000000000000
415 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
416 // X86-NEXT: store x86_fp80 [[TMP2]], ptr [[ATOMIC_TEMP1]], align 4
417 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
418 // X86-NEXT: store x86_fp80 [[SUB]], ptr [[ATOMIC_TEMP2]], align 4
419 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
420 // X86-NEXT: [[TMP3]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
421 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
422 // X86: [[ATOMIC_CONT]]:
423 // X86-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
424 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP4]], ptr noundef [[ATOMIC_TEMP3]], i32 noundef 5)
425 // X86-NEXT: [[TMP5:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 4
426 // X86-NEXT: ret x86_fp80 [[TMP5]]
428 long double test_volatile_compassign(volatile _Atomic long double *addr) {
429 *addr -= 25;
430 return *addr;
433 // X64-LABEL: define dso_local x86_fp80 @test_volatile_assign(
434 // X64-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
435 // X64-NEXT: [[ENTRY:.*:]]
436 // X64-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8
437 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
438 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
439 // X64-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 8
440 // X64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
441 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP]], i8 0, i64 16, i1 false)
442 // X64-NEXT: store x86_fp80 0xK4005E600000000000000, ptr [[ATOMIC_TEMP]], align 16
443 // X64-NEXT: [[TMP1:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
444 // X64-NEXT: store atomic volatile i128 [[TMP1]], ptr [[TMP0]] seq_cst, align 16
445 // X64-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 8
446 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic volatile i128, ptr [[TMP2]] seq_cst, align 16
447 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP1]], align 16
448 // X64-NEXT: [[TMP3:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 16
449 // X64-NEXT: ret x86_fp80 [[TMP3]]
451 // X86-LABEL: define dso_local x86_fp80 @test_volatile_assign(
452 // X86-SAME: ptr noundef [[ADDR:%.*]]) #[[ATTR0]] {
453 // X86-NEXT: [[ENTRY:.*:]]
454 // X86-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 4
455 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
456 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
457 // X86-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR]], align 4
458 // X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
459 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP]], i8 0, i64 12, i1 false)
460 // X86-NEXT: store x86_fp80 0xK4005E600000000000000, ptr [[ATOMIC_TEMP]], align 4
461 // X86-NEXT: call void @__atomic_store(i32 noundef 12, ptr noundef [[TMP0]], ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
462 // X86-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ADDR_ADDR]], align 4
463 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef [[TMP1]], ptr noundef [[ATOMIC_TEMP1]], i32 noundef 5)
464 // X86-NEXT: [[TMP2:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
465 // X86-NEXT: ret x86_fp80 [[TMP2]]
467 long double test_volatile_assign(volatile _Atomic long double *addr) {
468 *addr = 115;
470 return *addr;
473 // X64-LABEL: define dso_local i32 @pr107054(
474 // X64-SAME: ) #[[ATTR0]] {
475 // X64-NEXT: [[ENTRY:.*]]:
476 // X64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 16
477 // X64-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 16
478 // X64-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 16
479 // X64-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca x86_fp80, align 16
480 // X64-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i128, ptr @pr107054.n seq_cst, align 16
481 // X64-NEXT: store i128 [[ATOMIC_LOAD]], ptr [[ATOMIC_TEMP]], align 16
482 // X64-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 16
483 // X64-NEXT: br label %[[ATOMIC_OP:.*]]
484 // X64: [[ATOMIC_OP]]:
485 // X64-NEXT: [[TMP1:%.*]] = phi x86_fp80 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP7:%.*]], %[[ATOMIC_OP]] ]
486 // X64-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP1]], 0xK3FFF8000000000000000
487 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP1]], i8 0, i64 16, i1 false)
488 // X64-NEXT: store x86_fp80 [[TMP1]], ptr [[ATOMIC_TEMP1]], align 16
489 // X64-NEXT: [[TMP2:%.*]] = load i128, ptr [[ATOMIC_TEMP1]], align 16
490 // X64-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[ATOMIC_TEMP2]], i8 0, i64 16, i1 false)
491 // X64-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 16
492 // X64-NEXT: [[TMP3:%.*]] = load i128, ptr [[ATOMIC_TEMP2]], align 16
493 // X64-NEXT: [[TMP4:%.*]] = cmpxchg ptr @pr107054.n, i128 [[TMP2]], i128 [[TMP3]] seq_cst seq_cst, align 16
494 // X64-NEXT: [[TMP5:%.*]] = extractvalue { i128, i1 } [[TMP4]], 0
495 // X64-NEXT: [[TMP6:%.*]] = extractvalue { i128, i1 } [[TMP4]], 1
496 // X64-NEXT: store i128 [[TMP5]], ptr [[ATOMIC_TEMP3]], align 16
497 // X64-NEXT: [[TMP7]] = load x86_fp80, ptr [[ATOMIC_TEMP3]], align 16
498 // X64-NEXT: br i1 [[TMP6]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
499 // X64: [[ATOMIC_CONT]]:
500 // X64-NEXT: [[CMP:%.*]] = fcmp oeq x86_fp80 [[INC]], 0xK3FFF8000000000000000
501 // X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
502 // X64-NEXT: ret i32 [[CONV]]
504 // X86-LABEL: define dso_local i32 @pr107054(
505 // X86-SAME: ) #[[ATTR0]] {
506 // X86-NEXT: [[ENTRY:.*]]:
507 // X86-NEXT: [[ATOMIC_TEMP:%.*]] = alloca x86_fp80, align 4
508 // X86-NEXT: [[ATOMIC_TEMP1:%.*]] = alloca x86_fp80, align 4
509 // X86-NEXT: [[ATOMIC_TEMP2:%.*]] = alloca x86_fp80, align 4
510 // X86-NEXT: call void @__atomic_load(i32 noundef 12, ptr noundef @pr107054.n, ptr noundef [[ATOMIC_TEMP]], i32 noundef 5)
511 // X86-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[ATOMIC_TEMP]], align 4
512 // X86-NEXT: br label %[[ATOMIC_OP:.*]]
513 // X86: [[ATOMIC_OP]]:
514 // X86-NEXT: [[TMP1:%.*]] = phi x86_fp80 [ [[TMP0]], %[[ENTRY]] ], [ [[TMP2:%.*]], %[[ATOMIC_OP]] ]
515 // X86-NEXT: [[INC:%.*]] = fadd x86_fp80 [[TMP1]], 0xK3FFF8000000000000000
516 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP1]], i8 0, i64 12, i1 false)
517 // X86-NEXT: store x86_fp80 [[TMP1]], ptr [[ATOMIC_TEMP1]], align 4
518 // X86-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[ATOMIC_TEMP2]], i8 0, i64 12, i1 false)
519 // X86-NEXT: store x86_fp80 [[INC]], ptr [[ATOMIC_TEMP2]], align 4
520 // X86-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 12, ptr noundef @pr107054.n, ptr noundef [[ATOMIC_TEMP1]], ptr noundef [[ATOMIC_TEMP2]], i32 noundef 5, i32 noundef 5)
521 // X86-NEXT: [[TMP2]] = load x86_fp80, ptr [[ATOMIC_TEMP1]], align 4
522 // X86-NEXT: br i1 [[CALL]], label %[[ATOMIC_CONT:.*]], label %[[ATOMIC_OP]]
523 // X86: [[ATOMIC_CONT]]:
524 // X86-NEXT: [[CMP:%.*]] = fcmp oeq x86_fp80 [[INC]], 0xK3FFF8000000000000000
525 // X86-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
526 // X86-NEXT: ret i32 [[CONV]]
528 int pr107054()
530 static _Atomic long double n;
531 return (++n) == 1;