1 ; Test sequences that can use RISBG with a zeroed first operand.
2 ; The tests here assume that RISBLG is available.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
6 ; Test an extraction of bit 0 from a right-shifted value.
7 define i32 @f1(i32 %foo) {
9 ; CHECK: risblg %r2, %r2, 31, 159, 54
11 %shr = lshr i32 %foo, 10
12 %and = and i32 %shr, 1
16 ; ...and again with i64.
17 define i64 @f2(i64 %foo) {
19 ; CHECK: risbg %r2, %r2, 63, 191, 54
21 %shr = lshr i64 %foo, 10
22 %and = and i64 %shr, 1
26 ; Test an extraction of other bits from a right-shifted value.
27 define i32 @f3(i32 %foo) {
29 ; CHECK: risblg %r2, %r2, 28, 157, 42
31 %shr = lshr i32 %foo, 22
32 %and = and i32 %shr, 12
36 ; ...and again with i64.
37 define i64 @f4(i64 %foo) {
39 ; CHECK: risbg %r2, %r2, 60, 189, 42
41 %shr = lshr i64 %foo, 22
42 %and = and i64 %shr, 12
46 ; Test an extraction of most bits from a right-shifted value.
47 ; The range should be reduced to exclude the zeroed high bits.
48 define i32 @f5(i32 %foo) {
50 ; CHECK: risblg %r2, %r2, 2, 156, 62
52 %shr = lshr i32 %foo, 2
53 %and = and i32 %shr, -8
57 ; ...and again with i64.
58 define i64 @f6(i64 %foo) {
60 ; CHECK: risbg %r2, %r2, 2, 188, 62
62 %shr = lshr i64 %foo, 2
63 %and = and i64 %shr, -8
67 ; Try the next value up (mask ....1111001). This needs a separate shift
69 define i32 @f7(i32 %foo) {
72 ; CHECK: nill %r2, 65529
74 %shr = lshr i32 %foo, 2
75 %and = and i32 %shr, -7
79 ; ...and again with i64.
80 define i64 @f8(i64 %foo) {
82 ; CHECK: srlg %r2, %r2, 2
83 ; CHECK: nill %r2, 65529
85 %shr = lshr i64 %foo, 2
86 %and = and i64 %shr, -7
90 ; Test an extraction of bits from a left-shifted value. The range should
91 ; be reduced to exclude the zeroed low bits.
92 define i32 @f9(i32 %foo) {
94 ; CHECK: risblg %r2, %r2, 24, 157, 2
96 %shr = shl i32 %foo, 2
97 %and = and i32 %shr, 255
101 ; ...and again with i64.
102 define i64 @f10(i64 %foo) {
104 ; CHECK: risbg %r2, %r2, 56, 189, 2
106 %shr = shl i64 %foo, 2
107 %and = and i64 %shr, 255
111 ; Try a wrap-around mask (mask ....111100001111). This needs a separate shift
113 define i32 @f11(i32 %foo) {
116 ; CHECK: nill %r2, 65295
118 %shr = shl i32 %foo, 2
119 %and = and i32 %shr, -241
123 ; ...and again with i64.
124 define i64 @f12(i64 %foo) {
126 ; CHECK: sllg %r2, %r2, 2
127 ; CHECK: nill %r2, 65295
129 %shr = shl i64 %foo, 2
130 %and = and i64 %shr, -241
134 ; Test an extraction from a rotated value, no mask wraparound.
135 ; This is equivalent to the lshr case, because the bits from the
137 define i32 @f13(i32 %foo) {
139 ; CHECK: risblg %r2, %r2, 24, 156, 46
141 %parta = shl i32 %foo, 14
142 %partb = lshr i32 %foo, 18
143 %rotl = or i32 %parta, %partb
144 %and = and i32 %rotl, 248
148 ; ...and again with i64.
149 define i64 @f14(i64 %foo) {
151 ; CHECK: risbg %r2, %r2, 56, 188, 14
153 %parta = shl i64 %foo, 14
154 %partb = lshr i64 %foo, 50
155 %rotl = or i64 %parta, %partb
156 %and = and i64 %rotl, 248
160 ; Try a case in which only the bits from the shl are used.
161 define i32 @f15(i32 %foo) {
163 ; CHECK: risblg %r2, %r2, 15, 145, 14
165 %parta = shl i32 %foo, 14
166 %partb = lshr i32 %foo, 18
167 %rotl = or i32 %parta, %partb
168 %and = and i32 %rotl, 114688
172 ; ...and again with i64.
173 define i64 @f16(i64 %foo) {
175 ; CHECK: risbg %r2, %r2, 47, 177, 14
177 %parta = shl i64 %foo, 14
178 %partb = lshr i64 %foo, 50
179 %rotl = or i64 %parta, %partb
180 %and = and i64 %rotl, 114688
184 ; Test a 32-bit rotate in which both parts of the OR are needed.
185 ; This needs a separate shift and mask.
186 define i32 @f17(i32 %foo) {
188 ; CHECK: rll %r2, %r2, 4
189 ; CHECK: nilf %r2, 126
191 %parta = shl i32 %foo, 4
192 %partb = lshr i32 %foo, 28
193 %rotl = or i32 %parta, %partb
194 %and = and i32 %rotl, 126
198 ; ...and for i64, where RISBG should do the rotate too.
199 define i64 @f18(i64 %foo) {
201 ; CHECK: risbg %r2, %r2, 57, 190, 4
203 %parta = shl i64 %foo, 4
204 %partb = lshr i64 %foo, 60
205 %rotl = or i64 %parta, %partb
206 %and = and i64 %rotl, 126
210 ; Test an arithmetic shift right in which some of the sign bits are kept.
211 ; This needs a separate shift and mask.
212 define i32 @f19(i32 %foo) {
215 ; CHECK: nilf %r2, 30
217 %shr = ashr i32 %foo, 28
218 %and = and i32 %shr, 30
222 ; ...and again with i64. In this case RISBG is the best way of doing the AND.
223 define i64 @f20(i64 %foo) {
225 ; CHECK: srag [[REG:%r[0-5]]], %r2, 60
226 ; CHECK: risbg %r2, [[REG]], 59, 190, 0
228 %shr = ashr i64 %foo, 60
229 %and = and i64 %shr, 30
233 ; Now try an arithmetic right shift in which the sign bits aren't needed.
234 ; Introduce a second use of %shr so that the ashr doesn't decompose to
236 define i32 @f21(i32 %foo, i32 *%dest) {
238 ; CHECK: risblg %r2, %r2, 28, 158, 36
240 %shr = ashr i32 %foo, 28
241 store i32 %shr, i32 *%dest
242 %and = and i32 %shr, 14
246 ; ...and again with i64.
247 define i64 @f22(i64 %foo, i64 *%dest) {
249 ; CHECK: risbg %r2, %r2, 60, 190, 4
251 %shr = ashr i64 %foo, 60
252 store i64 %shr, i64 *%dest
253 %and = and i64 %shr, 14
257 ; Check that we use RISBG for shifted values even if the AND is a
258 ; natural zero extension.
259 define i64 @f23(i64 %foo) {
261 ; CHECK: risbg %r2, %r2, 56, 191, 62
263 %shr = lshr i64 %foo, 2
264 %and = and i64 %shr, 255
268 ; Test a case where the AND comes before a rotate. This needs a separate
270 define i32 @f24(i32 %foo) {
272 ; CHECK: nilf %r2, 254
273 ; CHECK: rll %r2, %r2, 29
275 %and = and i32 %foo, 254
276 %parta = lshr i32 %and, 3
277 %partb = shl i32 %and, 29
278 %rotl = or i32 %parta, %partb
282 ; ...and again with i64, where a single RISBG is enough.
283 define i64 @f25(i64 %foo) {
285 ; CHECK: risbg %r2, %r2, 57, 187, 3
287 %and = and i64 %foo, 14
288 %parta = shl i64 %and, 3
289 %partb = lshr i64 %and, 61
290 %rotl = or i64 %parta, %partb
294 ; Test a wrap-around case in which the AND comes before a rotate.
295 ; This again needs a separate mask and rotate.
296 define i32 @f26(i32 %foo) {
298 ; CHECK: rll %r2, %r2, 5
300 %and = and i32 %foo, -49
301 %parta = shl i32 %and, 5
302 %partb = lshr i32 %and, 27
303 %rotl = or i32 %parta, %partb
307 ; ...and again with i64, where a single RISBG is OK.
308 define i64 @f27(i64 %foo) {
310 ; CHECK: risbg %r2, %r2, 55, 180, 5
312 %and = and i64 %foo, -49
313 %parta = shl i64 %and, 5
314 %partb = lshr i64 %and, 59
315 %rotl = or i64 %parta, %partb
319 ; Test a case where the AND comes before a shift left.
320 define i32 @f28(i32 %foo) {
322 ; CHECK: risblg %r2, %r2, 0, 141, 17
324 %and = and i32 %foo, 32766
325 %shl = shl i32 %and, 17
329 ; ...and again with i64.
330 define i64 @f29(i64 %foo) {
332 ; CHECK: risbg %r2, %r2, 0, 141, 49
334 %and = and i64 %foo, 32766
335 %shl = shl i64 %and, 49
339 ; Test the next shift up from f28, in which the mask should get shortened.
340 define i32 @f30(i32 %foo) {
342 ; CHECK: risblg %r2, %r2, 0, 140, 18
344 %and = and i32 %foo, 32766
345 %shl = shl i32 %and, 18
349 ; ...and again with i64.
350 define i64 @f31(i64 %foo) {
352 ; CHECK: risbg %r2, %r2, 0, 140, 50
354 %and = and i64 %foo, 32766
355 %shl = shl i64 %and, 50
359 ; Test a wrap-around case in which the shift left comes after the AND.
360 ; We can't use RISBG for the shift in that case.
361 define i32 @f32(i32 %foo) {
365 %and = and i32 %foo, -7
366 %shl = shl i32 %and, 10
370 ; ...and again with i64.
371 define i64 @f33(i64 %foo) {
375 %and = and i64 %foo, -7
376 %shl = shl i64 %and, 10
380 ; Test a case where the AND comes before a shift right.
381 define i32 @f34(i32 %foo) {
383 ; CHECK: risblg %r2, %r2, 25, 159, 55
385 %and = and i32 %foo, 65535
386 %shl = lshr i32 %and, 9
390 ; ...and again with i64.
391 define i64 @f35(i64 %foo) {
393 ; CHECK: risbg %r2, %r2, 57, 191, 55
395 %and = and i64 %foo, 65535
396 %shl = lshr i64 %and, 9
400 ; Test a wrap-around case where the AND comes before a shift right.
401 ; We can't use RISBG for the shift in that case.
402 define i32 @f36(i32 %foo) {
406 %and = and i32 %foo, -25
407 %shl = lshr i32 %and, 1
411 ; ...and again with i64.
412 define i64 @f37(i64 %foo) {
416 %and = and i64 %foo, -25
417 %shl = lshr i64 %and, 1
421 ; Test a combination involving a large ASHR and a shift left. We can't
423 define i64 @f38(i64 %foo) {
425 ; CHECK: srag {{%r[0-5]}}
426 ; CHECK: sllg {{%r[0-5]}}
428 %ashr = ashr i64 %foo, 32
429 %shl = shl i64 %ashr, 5
433 ; Try a similar thing in which no shifted sign bits are kept.
434 define i64 @f39(i64 %foo, i64 *%dest) {
436 ; CHECK: srag [[REG:%r[01345]]], %r2, 35
437 ; CHECK: risbg %r2, %r2, 33, 189, 31
439 %ashr = ashr i64 %foo, 35
440 store i64 %ashr, i64 *%dest
441 %shl = shl i64 %ashr, 2
442 %and = and i64 %shl, 2147483647
446 ; ...and again with the next highest shift value, where one sign bit is kept.
447 define i64 @f40(i64 %foo, i64 *%dest) {
449 ; CHECK: srag [[REG:%r[01345]]], %r2, 36
450 ; CHECK: risbg %r2, [[REG]], 33, 189, 2
452 %ashr = ashr i64 %foo, 36
453 store i64 %ashr, i64 *%dest
454 %shl = shl i64 %ashr, 2
455 %and = and i64 %shl, 2147483647
459 ; Check a case where the result is zero-extended.
460 define i64 @f41(i32 %a) {
462 ; CHECK: risbg %r2, %r2, 36, 191, 62
465 %shr = lshr i32 %shl, 4
466 %ext = zext i32 %shr to i64
470 ; In this case the sign extension is converted to a pair of 32-bit shifts,
471 ; which is then extended to 64 bits. We previously used the wrong bit size
472 ; when testing whether the shifted-in bits of the shift right were significant.
473 define i64 @f42(i1 %x) {
476 ; CHECK: lcr %r0, %r2
477 ; CHECK: llgcr %r2, %r0
479 %ext = sext i1 %x to i8
480 %ext2 = zext i8 %ext to i64
484 ; Check that we get the case where a 64-bit shift is used by a 32-bit and.
485 ; Note that this cannot use RISBLG, but should use RISBG.
486 define signext i32 @f43(i64 %x) {
488 ; CHECK: risbg [[REG:%r[0-5]]], %r2, 32, 189, 52
489 ; CHECK: lgfr %r2, [[REG]]
490 %shr3 = lshr i64 %x, 12
491 %shr3.tr = trunc i64 %shr3 to i32
492 %conv = and i32 %shr3.tr, -4
496 ; Check that we don't get the case where the 32-bit and mask is not contiguous
497 define signext i32 @f44(i64 %x) {
499 ; CHECK: srlg [[REG:%r[0-5]]], %r2, 12
500 %shr4 = lshr i64 %x, 12
501 %conv = trunc i64 %shr4 to i32
502 %and = and i32 %conv, 10