1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
2 ; Test 32-bit addition in which the second operand is variable.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
9 define zeroext i1 @f1(i32 %dummy, i32 %a, i32 %b, ptr %res) {
12 ; CHECK-NEXT: alr %r3, %r4
14 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
15 ; CHECK-NEXT: st %r3, 0(%r5)
17 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
18 %val = extractvalue {i32, i1} %t, 0
19 %obit = extractvalue {i32, i1} %t, 1
20 store i32 %val, ptr %res
24 ; Check using the overflow result for a branch.
25 define void @f2(i32 %dummy, i32 %a, i32 %b, ptr %res) {
28 ; CHECK-NEXT: alr %r3, %r4
29 ; CHECK-NEXT: st %r3, 0(%r5)
30 ; CHECK-NEXT: jgnle foo@PLT
31 ; CHECK-NEXT: .LBB1_1: # %exit
33 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
34 %val = extractvalue {i32, i1} %t, 0
35 %obit = extractvalue {i32, i1} %t, 1
36 store i32 %val, ptr %res
37 br i1 %obit, label %call, label %exit
47 ; ... and the same with the inverted direction.
48 define void @f3(i32 %dummy, i32 %a, i32 %b, ptr %res) {
51 ; CHECK-NEXT: alr %r3, %r4
52 ; CHECK-NEXT: st %r3, 0(%r5)
53 ; CHECK-NEXT: jgle foo@PLT
54 ; CHECK-NEXT: .LBB2_1: # %exit
56 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
57 %val = extractvalue {i32, i1} %t, 0
58 %obit = extractvalue {i32, i1} %t, 1
59 store i32 %val, ptr %res
60 br i1 %obit, label %exit, label %call
70 ; Check the low end of the AL range.
71 define zeroext i1 @f4(i32 %dummy, i32 %a, ptr %src, ptr %res) {
74 ; CHECK-NEXT: al %r3, 0(%r4)
76 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
77 ; CHECK-NEXT: st %r3, 0(%r5)
79 %b = load i32, ptr %src
80 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
81 %val = extractvalue {i32, i1} %t, 0
82 %obit = extractvalue {i32, i1} %t, 1
83 store i32 %val, ptr %res
87 ; Check the high end of the aligned AL range.
88 define zeroext i1 @f5(i32 %dummy, i32 %a, ptr %src, ptr %res) {
91 ; CHECK-NEXT: al %r3, 4092(%r4)
93 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
94 ; CHECK-NEXT: st %r3, 0(%r5)
96 %ptr = getelementptr i32, ptr %src, i64 1023
97 %b = load i32, ptr %ptr
98 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
99 %val = extractvalue {i32, i1} %t, 0
100 %obit = extractvalue {i32, i1} %t, 1
101 store i32 %val, ptr %res
105 ; Check the next word up, which should use ALY instead of AL.
106 define zeroext i1 @f6(i32 %dummy, i32 %a, ptr %src, ptr %res) {
109 ; CHECK-NEXT: aly %r3, 4096(%r4)
110 ; CHECK-NEXT: ipm %r0
111 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
112 ; CHECK-NEXT: st %r3, 0(%r5)
113 ; CHECK-NEXT: br %r14
114 %ptr = getelementptr i32, ptr %src, i64 1024
115 %b = load i32, ptr %ptr
116 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
117 %val = extractvalue {i32, i1} %t, 0
118 %obit = extractvalue {i32, i1} %t, 1
119 store i32 %val, ptr %res
123 ; Check the high end of the aligned ALY range.
124 define zeroext i1 @f7(i32 %dummy, i32 %a, ptr %src, ptr %res) {
127 ; CHECK-NEXT: aly %r3, 524284(%r4)
128 ; CHECK-NEXT: ipm %r0
129 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
130 ; CHECK-NEXT: st %r3, 0(%r5)
131 ; CHECK-NEXT: br %r14
132 %ptr = getelementptr i32, ptr %src, i64 131071
133 %b = load i32, ptr %ptr
134 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
135 %val = extractvalue {i32, i1} %t, 0
136 %obit = extractvalue {i32, i1} %t, 1
137 store i32 %val, ptr %res
141 ; Check the next word up, which needs separate address logic.
142 ; Other sequences besides this one would be OK.
143 define zeroext i1 @f8(i32 %dummy, i32 %a, ptr %src, ptr %res) {
146 ; CHECK-NEXT: agfi %r4, 524288
147 ; CHECK-NEXT: al %r3, 0(%r4)
148 ; CHECK-NEXT: ipm %r0
149 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
150 ; CHECK-NEXT: st %r3, 0(%r5)
151 ; CHECK-NEXT: br %r14
152 %ptr = getelementptr i32, ptr %src, i64 131072
153 %b = load i32, ptr %ptr
154 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
155 %val = extractvalue {i32, i1} %t, 0
156 %obit = extractvalue {i32, i1} %t, 1
157 store i32 %val, ptr %res
161 ; Check the high end of the negative aligned ALY range.
162 define zeroext i1 @f9(i32 %dummy, i32 %a, ptr %src, ptr %res) {
165 ; CHECK-NEXT: aly %r3, -4(%r4)
166 ; CHECK-NEXT: ipm %r0
167 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
168 ; CHECK-NEXT: st %r3, 0(%r5)
169 ; CHECK-NEXT: br %r14
170 %ptr = getelementptr i32, ptr %src, i64 -1
171 %b = load i32, ptr %ptr
172 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
173 %val = extractvalue {i32, i1} %t, 0
174 %obit = extractvalue {i32, i1} %t, 1
175 store i32 %val, ptr %res
179 ; Check the low end of the ALY range.
180 define zeroext i1 @f10(i32 %dummy, i32 %a, ptr %src, ptr %res) {
183 ; CHECK-NEXT: aly %r3, -524288(%r4)
184 ; CHECK-NEXT: ipm %r0
185 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
186 ; CHECK-NEXT: st %r3, 0(%r5)
187 ; CHECK-NEXT: br %r14
188 %ptr = getelementptr i32, ptr %src, i64 -131072
189 %b = load i32, ptr %ptr
190 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
191 %val = extractvalue {i32, i1} %t, 0
192 %obit = extractvalue {i32, i1} %t, 1
193 store i32 %val, ptr %res
197 ; Check the next word down, which needs separate address logic.
198 ; Other sequences besides this one would be OK.
199 define zeroext i1 @f11(i32 %dummy, i32 %a, ptr %src, ptr %res) {
202 ; CHECK-NEXT: agfi %r4, -524292
203 ; CHECK-NEXT: al %r3, 0(%r4)
204 ; CHECK-NEXT: ipm %r0
205 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
206 ; CHECK-NEXT: st %r3, 0(%r5)
207 ; CHECK-NEXT: br %r14
208 %ptr = getelementptr i32, ptr %src, i64 -131073
209 %b = load i32, ptr %ptr
210 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
211 %val = extractvalue {i32, i1} %t, 0
212 %obit = extractvalue {i32, i1} %t, 1
213 store i32 %val, ptr %res
217 ; Check that AL allows an index.
218 define zeroext i1 @f12(i64 %src, i64 %index, i32 %a, ptr %res) {
221 ; CHECK-NEXT: al %r4, 4092(%r3,%r2)
222 ; CHECK-NEXT: ipm %r0
223 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
224 ; CHECK-NEXT: st %r4, 0(%r5)
225 ; CHECK-NEXT: br %r14
226 %add1 = add i64 %src, %index
227 %add2 = add i64 %add1, 4092
228 %ptr = inttoptr i64 %add2 to ptr
229 %b = load i32, ptr %ptr
230 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
231 %val = extractvalue {i32, i1} %t, 0
232 %obit = extractvalue {i32, i1} %t, 1
233 store i32 %val, ptr %res
237 ; Check that ALY allows an index.
238 define zeroext i1 @f13(i64 %src, i64 %index, i32 %a, ptr %res) {
241 ; CHECK-NEXT: aly %r4, 4096(%r3,%r2)
242 ; CHECK-NEXT: ipm %r0
243 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 35
244 ; CHECK-NEXT: st %r4, 0(%r5)
245 ; CHECK-NEXT: br %r14
246 %add1 = add i64 %src, %index
247 %add2 = add i64 %add1, 4096
248 %ptr = inttoptr i64 %add2 to ptr
249 %b = load i32, ptr %ptr
250 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
251 %val = extractvalue {i32, i1} %t, 0
252 %obit = extractvalue {i32, i1} %t, 1
253 store i32 %val, ptr %res
257 ; Check that additions of spilled values can use AL rather than ALR.
258 define zeroext i1 @f14(ptr %ptr0) {
261 ; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
262 ; CHECK-NEXT: .cfi_offset %r6, -112
263 ; CHECK-NEXT: .cfi_offset %r7, -104
264 ; CHECK-NEXT: .cfi_offset %r8, -96
265 ; CHECK-NEXT: .cfi_offset %r9, -88
266 ; CHECK-NEXT: .cfi_offset %r10, -80
267 ; CHECK-NEXT: .cfi_offset %r11, -72
268 ; CHECK-NEXT: .cfi_offset %r12, -64
269 ; CHECK-NEXT: .cfi_offset %r13, -56
270 ; CHECK-NEXT: .cfi_offset %r14, -48
271 ; CHECK-NEXT: .cfi_offset %r15, -40
272 ; CHECK-NEXT: aghi %r15, -168
273 ; CHECK-NEXT: .cfi_def_cfa_offset 328
274 ; CHECK-NEXT: l %r6, 0(%r2)
275 ; CHECK-NEXT: l %r13, 8(%r2)
276 ; CHECK-NEXT: l %r12, 16(%r2)
277 ; CHECK-NEXT: l %r7, 24(%r2)
278 ; CHECK-NEXT: l %r8, 32(%r2)
279 ; CHECK-NEXT: l %r9, 40(%r2)
280 ; CHECK-NEXT: l %r10, 48(%r2)
281 ; CHECK-NEXT: l %r11, 56(%r2)
282 ; CHECK-NEXT: mvc 160(4,%r15), 64(%r2) # 4-byte Folded Spill
283 ; CHECK-NEXT: mvc 164(4,%r15), 72(%r2) # 4-byte Folded Spill
284 ; CHECK-NEXT: brasl %r14, foo@PLT
285 ; CHECK-NEXT: alr %r2, %r6
286 ; CHECK-NEXT: ipm %r0
287 ; CHECK-NEXT: risbg %r0, %r0, 63, 191, 35
288 ; CHECK-NEXT: alr %r2, %r13
289 ; CHECK-NEXT: ipm %r1
290 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
291 ; CHECK-NEXT: alr %r2, %r12
292 ; CHECK-NEXT: ipm %r1
293 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
294 ; CHECK-NEXT: alr %r2, %r7
295 ; CHECK-NEXT: ipm %r1
296 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
297 ; CHECK-NEXT: alr %r2, %r8
298 ; CHECK-NEXT: ipm %r1
299 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
300 ; CHECK-NEXT: alr %r2, %r9
301 ; CHECK-NEXT: ipm %r1
302 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
303 ; CHECK-NEXT: alr %r2, %r10
304 ; CHECK-NEXT: ipm %r1
305 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
306 ; CHECK-NEXT: alr %r2, %r11
307 ; CHECK-NEXT: ipm %r1
308 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
309 ; CHECK-NEXT: al %r2, 160(%r15) # 4-byte Folded Reload
310 ; CHECK-NEXT: ipm %r1
311 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
312 ; CHECK-NEXT: al %r2, 164(%r15) # 4-byte Folded Reload
313 ; CHECK-NEXT: ipm %r1
314 ; CHECK-NEXT: rosbg %r0, %r1, 63, 63, 35
315 ; CHECK-NEXT: risbg %r2, %r0, 63, 191, 0
316 ; CHECK-NEXT: lmg %r6, %r15, 216(%r15)
317 ; CHECK-NEXT: br %r14
318 %ptr1 = getelementptr i32, ptr %ptr0, i64 2
319 %ptr2 = getelementptr i32, ptr %ptr0, i64 4
320 %ptr3 = getelementptr i32, ptr %ptr0, i64 6
321 %ptr4 = getelementptr i32, ptr %ptr0, i64 8
322 %ptr5 = getelementptr i32, ptr %ptr0, i64 10
323 %ptr6 = getelementptr i32, ptr %ptr0, i64 12
324 %ptr7 = getelementptr i32, ptr %ptr0, i64 14
325 %ptr8 = getelementptr i32, ptr %ptr0, i64 16
326 %ptr9 = getelementptr i32, ptr %ptr0, i64 18
328 %val0 = load i32, ptr %ptr0
329 %val1 = load i32, ptr %ptr1
330 %val2 = load i32, ptr %ptr2
331 %val3 = load i32, ptr %ptr3
332 %val4 = load i32, ptr %ptr4
333 %val5 = load i32, ptr %ptr5
334 %val6 = load i32, ptr %ptr6
335 %val7 = load i32, ptr %ptr7
336 %val8 = load i32, ptr %ptr8
337 %val9 = load i32, ptr %ptr9
339 %ret = call i32 @foo()
341 %t0 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %ret, i32 %val0)
342 %add0 = extractvalue {i32, i1} %t0, 0
343 %obit0 = extractvalue {i32, i1} %t0, 1
344 %t1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add0, i32 %val1)
345 %add1 = extractvalue {i32, i1} %t1, 0
346 %obit1 = extractvalue {i32, i1} %t1, 1
347 %res1 = or i1 %obit0, %obit1
348 %t2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add1, i32 %val2)
349 %add2 = extractvalue {i32, i1} %t2, 0
350 %obit2 = extractvalue {i32, i1} %t2, 1
351 %res2 = or i1 %res1, %obit2
352 %t3 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add2, i32 %val3)
353 %add3 = extractvalue {i32, i1} %t3, 0
354 %obit3 = extractvalue {i32, i1} %t3, 1
355 %res3 = or i1 %res2, %obit3
356 %t4 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add3, i32 %val4)
357 %add4 = extractvalue {i32, i1} %t4, 0
358 %obit4 = extractvalue {i32, i1} %t4, 1
359 %res4 = or i1 %res3, %obit4
360 %t5 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add4, i32 %val5)
361 %add5 = extractvalue {i32, i1} %t5, 0
362 %obit5 = extractvalue {i32, i1} %t5, 1
363 %res5 = or i1 %res4, %obit5
364 %t6 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add5, i32 %val6)
365 %add6 = extractvalue {i32, i1} %t6, 0
366 %obit6 = extractvalue {i32, i1} %t6, 1
367 %res6 = or i1 %res5, %obit6
368 %t7 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add6, i32 %val7)
369 %add7 = extractvalue {i32, i1} %t7, 0
370 %obit7 = extractvalue {i32, i1} %t7, 1
371 %res7 = or i1 %res6, %obit7
372 %t8 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add7, i32 %val8)
373 %add8 = extractvalue {i32, i1} %t8, 0
374 %obit8 = extractvalue {i32, i1} %t8, 1
375 %res8 = or i1 %res7, %obit8
376 %t9 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %add8, i32 %val9)
377 %add9 = extractvalue {i32, i1} %t9, 0
378 %obit9 = extractvalue {i32, i1} %t9, 1
379 %res9 = or i1 %res8, %obit9
384 declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone