1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=arm-eabi -mattr=+v6t2 | FileCheck %s --check-prefixes=CHECK,SCALAR
3 ; RUN: llc < %s -mtriple=arm-eabi -mattr=+v6t2 -mattr=+neon | FileCheck %s --check-prefixes=CHECK,NEON
5 declare i8 @llvm.fshl.i8(i8, i8, i8)
6 declare i16 @llvm.fshl.i16(i16, i16, i16)
7 declare i32 @llvm.fshl.i32(i32, i32, i32)
8 declare i64 @llvm.fshl.i64(i64, i64, i64)
9 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
11 declare i8 @llvm.fshr.i8(i8, i8, i8)
12 declare i16 @llvm.fshr.i16(i16, i16, i16)
13 declare i32 @llvm.fshr.i32(i32, i32, i32)
14 declare i64 @llvm.fshr.i64(i64, i64, i64)
15 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
17 ; General case - all operands can be variables.
19 define i16 @fshl_i16(i16 %x, i16 %y, i16 %z) {
20 ; CHECK-LABEL: fshl_i16:
22 ; CHECK-NEXT: pkhbt r0, r1, r0, lsl #16
23 ; CHECK-NEXT: and r1, r2, #15
24 ; CHECK-NEXT: lsl r0, r0, r1
25 ; CHECK-NEXT: lsr r0, r0, #16
27 %f = call i16 @llvm.fshl.i16(i16 %x, i16 %y, i16 %z)
31 define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) {
32 ; CHECK-LABEL: fshl_i32:
34 ; CHECK-NEXT: mov r3, #31
35 ; CHECK-NEXT: lsr r1, r1, #1
36 ; CHECK-NEXT: bic r3, r3, r2
37 ; CHECK-NEXT: and r2, r2, #31
38 ; CHECK-NEXT: lsl r0, r0, r2
39 ; CHECK-NEXT: orr r0, r0, r1, lsr r3
41 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z)
45 ; Verify that weird types are minimally supported.
46 declare i37 @llvm.fshl.i37(i37, i37, i37)
47 define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
48 ; SCALAR-LABEL: fshl_i37:
50 ; SCALAR-NEXT: .save {r4, r5, r6, r7, r8, r9, r11, lr}
51 ; SCALAR-NEXT: push {r4, r5, r6, r7, r8, r9, r11, lr}
52 ; SCALAR-NEXT: mov r8, r0
53 ; SCALAR-NEXT: ldr r0, [sp, #36]
54 ; SCALAR-NEXT: mov r4, r1
55 ; SCALAR-NEXT: mov r6, r3
56 ; SCALAR-NEXT: and r1, r0, #31
57 ; SCALAR-NEXT: ldr r0, [sp, #32]
58 ; SCALAR-NEXT: mov r9, r2
59 ; SCALAR-NEXT: mov r2, #37
60 ; SCALAR-NEXT: mov r3, #0
61 ; SCALAR-NEXT: bl __aeabi_uldivmod
62 ; SCALAR-NEXT: lsl r1, r6, #27
63 ; SCALAR-NEXT: ands r0, r2, #32
64 ; SCALAR-NEXT: orr r1, r1, r9, lsr #5
65 ; SCALAR-NEXT: mov r3, r8
66 ; SCALAR-NEXT: and r6, r2, #31
67 ; SCALAR-NEXT: mov r7, #31
68 ; SCALAR-NEXT: movne r3, r1
69 ; SCALAR-NEXT: cmp r0, #0
70 ; SCALAR-NEXT: lslne r1, r9, #27
71 ; SCALAR-NEXT: bic r2, r7, r2
72 ; SCALAR-NEXT: movne r4, r8
73 ; SCALAR-NEXT: lsl r5, r3, r6
74 ; SCALAR-NEXT: lsr r0, r1, #1
75 ; SCALAR-NEXT: lsl r1, r4, r6
76 ; SCALAR-NEXT: lsr r3, r3, #1
77 ; SCALAR-NEXT: orr r0, r5, r0, lsr r2
78 ; SCALAR-NEXT: orr r1, r1, r3, lsr r2
79 ; SCALAR-NEXT: pop {r4, r5, r6, r7, r8, r9, r11, pc}
81 ; NEON-LABEL: fshl_i37:
83 ; NEON-NEXT: .save {r4, r5, r6, r7, r11, lr}
84 ; NEON-NEXT: push {r4, r5, r6, r7, r11, lr}
85 ; NEON-NEXT: mov r4, r1
86 ; NEON-NEXT: ldr r1, [sp, #28]
87 ; NEON-NEXT: mov r6, r0
88 ; NEON-NEXT: ldr r0, [sp, #24]
89 ; NEON-NEXT: and r1, r1, #31
90 ; NEON-NEXT: mov r5, r3
91 ; NEON-NEXT: mov r7, r2
92 ; NEON-NEXT: mov r2, #37
93 ; NEON-NEXT: mov r3, #0
94 ; NEON-NEXT: bl __aeabi_uldivmod
95 ; NEON-NEXT: mov r0, #31
96 ; NEON-NEXT: bic r1, r0, r2
97 ; NEON-NEXT: lsl r0, r5, #27
98 ; NEON-NEXT: ands r12, r2, #32
99 ; NEON-NEXT: orr r0, r0, r7, lsr #5
100 ; NEON-NEXT: mov r5, r6
101 ; NEON-NEXT: and r2, r2, #31
102 ; NEON-NEXT: movne r5, r0
103 ; NEON-NEXT: lslne r0, r7, #27
104 ; NEON-NEXT: cmp r12, #0
105 ; NEON-NEXT: lsl r3, r5, r2
106 ; NEON-NEXT: lsr r0, r0, #1
107 ; NEON-NEXT: movne r4, r6
108 ; NEON-NEXT: orr r0, r3, r0, lsr r1
109 ; NEON-NEXT: lsr r3, r5, #1
110 ; NEON-NEXT: lsl r2, r4, r2
111 ; NEON-NEXT: orr r1, r2, r3, lsr r1
112 ; NEON-NEXT: pop {r4, r5, r6, r7, r11, pc}
113 %f = call i37 @llvm.fshl.i37(i37 %x, i37 %y, i37 %z)
117 ; extract(concat(0b1110000, 0b1111111) << 2) = 0b1000011
119 declare i7 @llvm.fshl.i7(i7, i7, i7)
120 define i7 @fshl_i7_const_fold() {
121 ; CHECK-LABEL: fshl_i7_const_fold:
123 ; CHECK-NEXT: mov r0, #67
125 %f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2)
129 define i8 @fshl_i8_const_fold_overshift_1() {
130 ; CHECK-LABEL: fshl_i8_const_fold_overshift_1:
132 ; CHECK-NEXT: mov r0, #128
134 %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 15)
138 define i8 @fshl_i8_const_fold_overshift_2() {
139 ; CHECK-LABEL: fshl_i8_const_fold_overshift_2:
141 ; CHECK-NEXT: mov r0, #120
143 %f = call i8 @llvm.fshl.i8(i8 15, i8 15, i8 11)
147 define i8 @fshl_i8_const_fold_overshift_3() {
148 ; CHECK-LABEL: fshl_i8_const_fold_overshift_3:
150 ; CHECK-NEXT: mov r0, #0
152 %f = call i8 @llvm.fshl.i8(i8 0, i8 225, i8 8)
156 ; With constant shift amount, this is 'extr'.
158 define i32 @fshl_i32_const_shift(i32 %x, i32 %y) {
159 ; CHECK-LABEL: fshl_i32_const_shift:
161 ; CHECK-NEXT: lsl r0, r0, #9
162 ; CHECK-NEXT: orr r0, r0, r1, lsr #23
164 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 9)
168 ; Check modulo math on shift amount.
170 define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) {
171 ; CHECK-LABEL: fshl_i32_const_overshift:
173 ; CHECK-NEXT: lsl r0, r0, #9
174 ; CHECK-NEXT: orr r0, r0, r1, lsr #23
176 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 41)
180 ; 64-bit should also work.
182 define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) {
183 ; CHECK-LABEL: fshl_i64_const_overshift:
185 ; CHECK-NEXT: lsl r1, r3, #9
186 ; CHECK-NEXT: orr r2, r1, r2, lsr #23
187 ; CHECK-NEXT: lsl r0, r0, #9
188 ; CHECK-NEXT: orr r1, r0, r3, lsr #23
189 ; CHECK-NEXT: mov r0, r2
191 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 105)
195 ; This should work without any node-specific logic.
197 define i8 @fshl_i8_const_fold() {
198 ; CHECK-LABEL: fshl_i8_const_fold:
200 ; CHECK-NEXT: mov r0, #128
202 %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
206 ; Repeat everything for funnel shift right.
208 ; General case - all operands can be variables.
210 define i16 @fshr_i16(i16 %x, i16 %y, i16 %z) {
211 ; CHECK-LABEL: fshr_i16:
213 ; CHECK-NEXT: pkhbt r0, r1, r0, lsl #16
214 ; CHECK-NEXT: and r1, r2, #15
215 ; CHECK-NEXT: lsr r0, r0, r1
217 %f = call i16 @llvm.fshr.i16(i16 %x, i16 %y, i16 %z)
221 define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) {
222 ; CHECK-LABEL: fshr_i32:
224 ; CHECK-NEXT: mov r3, #31
225 ; CHECK-NEXT: lsl r0, r0, #1
226 ; CHECK-NEXT: bic r3, r3, r2
227 ; CHECK-NEXT: and r2, r2, #31
228 ; CHECK-NEXT: lsl r0, r0, r3
229 ; CHECK-NEXT: orr r0, r0, r1, lsr r2
231 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
235 ; Verify that weird types are minimally supported.
236 declare i37 @llvm.fshr.i37(i37, i37, i37)
237 define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
238 ; SCALAR-LABEL: fshr_i37:
240 ; SCALAR-NEXT: .save {r4, r5, r6, r7, r8, lr}
241 ; SCALAR-NEXT: push {r4, r5, r6, r7, r8, lr}
242 ; SCALAR-NEXT: mov r8, r0
243 ; SCALAR-NEXT: ldr r0, [sp, #28]
244 ; SCALAR-NEXT: mov r4, r1
245 ; SCALAR-NEXT: mov r5, r3
246 ; SCALAR-NEXT: and r1, r0, #31
247 ; SCALAR-NEXT: ldr r0, [sp, #24]
248 ; SCALAR-NEXT: mov r7, r2
249 ; SCALAR-NEXT: mov r2, #37
250 ; SCALAR-NEXT: mov r3, #0
251 ; SCALAR-NEXT: bl __aeabi_uldivmod
252 ; SCALAR-NEXT: lsl r3, r5, #27
253 ; SCALAR-NEXT: add r0, r2, #27
254 ; SCALAR-NEXT: orr r3, r3, r7, lsr #5
255 ; SCALAR-NEXT: ands r2, r0, #32
256 ; SCALAR-NEXT: mov r5, r8
257 ; SCALAR-NEXT: mov r1, #31
258 ; SCALAR-NEXT: moveq r5, r3
259 ; SCALAR-NEXT: lsleq r3, r7, #27
260 ; SCALAR-NEXT: cmp r2, #0
261 ; SCALAR-NEXT: bic r1, r1, r0
262 ; SCALAR-NEXT: moveq r4, r8
263 ; SCALAR-NEXT: lsl r6, r5, #1
264 ; SCALAR-NEXT: and r7, r0, #31
265 ; SCALAR-NEXT: lsl r2, r4, #1
266 ; SCALAR-NEXT: lsl r6, r6, r1
267 ; SCALAR-NEXT: lsl r1, r2, r1
268 ; SCALAR-NEXT: orr r0, r6, r3, lsr r7
269 ; SCALAR-NEXT: orr r1, r1, r5, lsr r7
270 ; SCALAR-NEXT: pop {r4, r5, r6, r7, r8, pc}
272 ; NEON-LABEL: fshr_i37:
274 ; NEON-NEXT: .save {r4, r5, r6, r7, r8, lr}
275 ; NEON-NEXT: push {r4, r5, r6, r7, r8, lr}
276 ; NEON-NEXT: mov r4, r1
277 ; NEON-NEXT: ldr r1, [sp, #28]
278 ; NEON-NEXT: mov r8, r0
279 ; NEON-NEXT: ldr r0, [sp, #24]
280 ; NEON-NEXT: and r1, r1, #31
281 ; NEON-NEXT: mov r5, r3
282 ; NEON-NEXT: mov r7, r2
283 ; NEON-NEXT: mov r2, #37
284 ; NEON-NEXT: mov r3, #0
285 ; NEON-NEXT: bl __aeabi_uldivmod
286 ; NEON-NEXT: lsl r3, r5, #27
287 ; NEON-NEXT: add r0, r2, #27
288 ; NEON-NEXT: orr r3, r3, r7, lsr #5
289 ; NEON-NEXT: ands r2, r0, #32
290 ; NEON-NEXT: mov r5, r8
291 ; NEON-NEXT: mov r1, #31
292 ; NEON-NEXT: moveq r5, r3
293 ; NEON-NEXT: lsleq r3, r7, #27
294 ; NEON-NEXT: cmp r2, #0
295 ; NEON-NEXT: bic r1, r1, r0
296 ; NEON-NEXT: moveq r4, r8
297 ; NEON-NEXT: lsl r6, r5, #1
298 ; NEON-NEXT: and r7, r0, #31
299 ; NEON-NEXT: lsl r2, r4, #1
300 ; NEON-NEXT: lsl r6, r6, r1
301 ; NEON-NEXT: lsl r1, r2, r1
302 ; NEON-NEXT: orr r0, r6, r3, lsr r7
303 ; NEON-NEXT: orr r1, r1, r5, lsr r7
304 ; NEON-NEXT: pop {r4, r5, r6, r7, r8, pc}
305 %f = call i37 @llvm.fshr.i37(i37 %x, i37 %y, i37 %z)
309 ; extract(concat(0b1110000, 0b1111111) >> 2) = 0b0011111
311 declare i7 @llvm.fshr.i7(i7, i7, i7)
312 define i7 @fshr_i7_const_fold() {
313 ; CHECK-LABEL: fshr_i7_const_fold:
315 ; CHECK-NEXT: mov r0, #31
317 %f = call i7 @llvm.fshr.i7(i7 112, i7 127, i7 2)
321 define i8 @fshr_i8_const_fold_overshift_1() {
322 ; CHECK-LABEL: fshr_i8_const_fold_overshift_1:
324 ; CHECK-NEXT: mov r0, #254
326 %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 15)
330 define i8 @fshr_i8_const_fold_overshift_2() {
331 ; CHECK-LABEL: fshr_i8_const_fold_overshift_2:
333 ; CHECK-NEXT: mov r0, #225
335 %f = call i8 @llvm.fshr.i8(i8 15, i8 15, i8 11)
339 define i8 @fshr_i8_const_fold_overshift_3() {
340 ; CHECK-LABEL: fshr_i8_const_fold_overshift_3:
342 ; CHECK-NEXT: mov r0, #255
344 %f = call i8 @llvm.fshr.i8(i8 0, i8 255, i8 8)
348 ; With constant shift amount, this is 'extr'.
350 define i32 @fshr_i32_const_shift(i32 %x, i32 %y) {
351 ; CHECK-LABEL: fshr_i32_const_shift:
353 ; CHECK-NEXT: lsl r0, r0, #23
354 ; CHECK-NEXT: orr r0, r0, r1, lsr #9
356 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 9)
360 ; Check modulo math on shift amount. 41-32=9.
362 define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) {
363 ; CHECK-LABEL: fshr_i32_const_overshift:
365 ; CHECK-NEXT: lsl r0, r0, #23
366 ; CHECK-NEXT: orr r0, r0, r1, lsr #9
368 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 41)
372 ; 64-bit should also work. 105-64 = 41.
374 define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) {
375 ; CHECK-LABEL: fshr_i64_const_overshift:
377 ; CHECK-NEXT: lsl r2, r0, #23
378 ; CHECK-NEXT: lsl r1, r1, #23
379 ; CHECK-NEXT: orr r2, r2, r3, lsr #9
380 ; CHECK-NEXT: orr r1, r1, r0, lsr #9
381 ; CHECK-NEXT: mov r0, r2
383 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 105)
387 ; This should work without any node-specific logic.
389 define i8 @fshr_i8_const_fold() {
390 ; CHECK-LABEL: fshr_i8_const_fold:
392 ; CHECK-NEXT: mov r0, #254
394 %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
398 define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) {
399 ; CHECK-LABEL: fshl_i32_shift_by_bitwidth:
402 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 32)
406 define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) {
407 ; CHECK-LABEL: fshr_i32_shift_by_bitwidth:
409 ; CHECK-NEXT: mov r0, r1
411 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 32)
415 define <4 x i32> @fshl_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) {
416 ; CHECK-LABEL: fshl_v4i32_shift_by_bitwidth:
419 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
423 define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) {
424 ; SCALAR-LABEL: fshr_v4i32_shift_by_bitwidth:
426 ; SCALAR-NEXT: ldm sp, {r0, r1, r2, r3}
429 ; NEON-LABEL: fshr_v4i32_shift_by_bitwidth:
431 ; NEON-NEXT: mov r0, sp
432 ; NEON-NEXT: vld1.64 {d16, d17}, [r0]
433 ; NEON-NEXT: vmov r0, r1, d16
434 ; NEON-NEXT: vmov r2, r3, d17
436 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)