1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=arm-eabi -mattr=+v6t2 | FileCheck %s --check-prefixes=CHECK,SCALAR
3 ; RUN: llc < %s -mtriple=arm-eabi -mattr=+v6t2 -mattr=+neon | FileCheck %s --check-prefixes=CHECK,NEON
5 declare i8 @llvm.fshl.i8(i8, i8, i8)
6 declare i16 @llvm.fshl.i16(i16, i16, i16)
7 declare i32 @llvm.fshl.i32(i32, i32, i32)
8 declare i64 @llvm.fshl.i64(i64, i64, i64)
9 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
11 declare i8 @llvm.fshr.i8(i8, i8, i8)
12 declare i16 @llvm.fshr.i16(i16, i16, i16)
13 declare i32 @llvm.fshr.i32(i32, i32, i32)
14 declare i64 @llvm.fshr.i64(i64, i64, i64)
15 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
17 ; When first 2 operands match, it's a rotate.
19 define i8 @rotl_i8_const_shift(i8 %x) {
20 ; CHECK-LABEL: rotl_i8_const_shift:
22 ; CHECK-NEXT: uxtb r1, r0
23 ; CHECK-NEXT: lsl r0, r0, #3
24 ; CHECK-NEXT: orr r0, r0, r1, lsr #5
26 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3)
30 define i64 @rotl_i64_const_shift(i64 %x) {
31 ; CHECK-LABEL: rotl_i64_const_shift:
33 ; CHECK-NEXT: lsl r2, r0, #3
34 ; CHECK-NEXT: orr r2, r2, r1, lsr #29
35 ; CHECK-NEXT: lsl r1, r1, #3
36 ; CHECK-NEXT: orr r1, r1, r0, lsr #29
37 ; CHECK-NEXT: mov r0, r2
39 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3)
43 ; When first 2 operands match, it's a rotate (by variable amount).
45 define i16 @rotl_i16(i16 %x, i16 %z) {
46 ; CHECK-LABEL: rotl_i16:
48 ; CHECK-NEXT: and r2, r1, #15
49 ; CHECK-NEXT: rsb r1, r1, #0
50 ; CHECK-NEXT: and r1, r1, #15
51 ; CHECK-NEXT: lsl r2, r0, r2
52 ; CHECK-NEXT: uxth r0, r0
53 ; CHECK-NEXT: orr r0, r2, r0, lsr r1
55 %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z)
59 define i32 @rotl_i32(i32 %x, i32 %z) {
60 ; CHECK-LABEL: rotl_i32:
62 ; CHECK-NEXT: rsb r1, r1, #0
63 ; CHECK-NEXT: ror r0, r0, r1
65 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z)
69 define i64 @rotl_i64(i64 %x, i64 %z) {
70 ; CHECK-LABEL: rotl_i64:
72 ; CHECK-NEXT: .save {r4, lr}
73 ; CHECK-NEXT: push {r4, lr}
74 ; CHECK-NEXT: ands r3, r2, #32
75 ; CHECK-NEXT: and r12, r2, #31
76 ; CHECK-NEXT: mov r3, r0
77 ; CHECK-NEXT: mov r4, #31
78 ; CHECK-NEXT: movne r3, r1
79 ; CHECK-NEXT: movne r1, r0
80 ; CHECK-NEXT: bic r2, r4, r2
81 ; CHECK-NEXT: lsl lr, r3, r12
82 ; CHECK-NEXT: lsr r0, r1, #1
83 ; CHECK-NEXT: lsl r1, r1, r12
84 ; CHECK-NEXT: lsr r3, r3, #1
85 ; CHECK-NEXT: orr r0, lr, r0, lsr r2
86 ; CHECK-NEXT: orr r1, r1, r3, lsr r2
87 ; CHECK-NEXT: pop {r4, pc}
88 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z)
94 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
95 ; SCALAR-LABEL: rotl_v4i32:
97 ; SCALAR-NEXT: ldr r12, [sp]
98 ; SCALAR-NEXT: rsb r12, r12, #0
99 ; SCALAR-NEXT: ror r0, r0, r12
100 ; SCALAR-NEXT: ldr r12, [sp, #4]
101 ; SCALAR-NEXT: rsb r12, r12, #0
102 ; SCALAR-NEXT: ror r1, r1, r12
103 ; SCALAR-NEXT: ldr r12, [sp, #8]
104 ; SCALAR-NEXT: rsb r12, r12, #0
105 ; SCALAR-NEXT: ror r2, r2, r12
106 ; SCALAR-NEXT: ldr r12, [sp, #12]
107 ; SCALAR-NEXT: rsb r12, r12, #0
108 ; SCALAR-NEXT: ror r3, r3, r12
111 ; NEON-LABEL: rotl_v4i32:
113 ; NEON-NEXT: mov r12, sp
114 ; NEON-NEXT: vld1.64 {d16, d17}, [r12]
115 ; NEON-NEXT: vmov.i32 q10, #0x1f
116 ; NEON-NEXT: vneg.s32 q9, q8
117 ; NEON-NEXT: vmov d23, r2, r3
118 ; NEON-NEXT: vand q9, q9, q10
119 ; NEON-NEXT: vand q8, q8, q10
120 ; NEON-NEXT: vmov d22, r0, r1
121 ; NEON-NEXT: vneg.s32 q9, q9
122 ; NEON-NEXT: vshl.u32 q8, q11, q8
123 ; NEON-NEXT: vshl.u32 q9, q11, q9
124 ; NEON-NEXT: vorr q8, q8, q9
125 ; NEON-NEXT: vmov r0, r1, d16
126 ; NEON-NEXT: vmov r2, r3, d17
128 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
132 ; Vector rotate by constant splat amount.
134 define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
135 ; SCALAR-LABEL: rotl_v4i32_rotl_const_shift:
137 ; SCALAR-NEXT: ror r0, r0, #29
138 ; SCALAR-NEXT: ror r1, r1, #29
139 ; SCALAR-NEXT: ror r2, r2, #29
140 ; SCALAR-NEXT: ror r3, r3, #29
143 ; NEON-LABEL: rotl_v4i32_rotl_const_shift:
145 ; NEON-NEXT: vmov d17, r2, r3
146 ; NEON-NEXT: vmov d16, r0, r1
147 ; NEON-NEXT: vshr.u32 q9, q8, #29
148 ; NEON-NEXT: vshl.i32 q8, q8, #3
149 ; NEON-NEXT: vorr q8, q8, q9
150 ; NEON-NEXT: vmov r0, r1, d16
151 ; NEON-NEXT: vmov r2, r3, d17
153 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
157 ; Repeat everything for funnel shift right.
159 ; When first 2 operands match, it's a rotate.
161 define i8 @rotr_i8_const_shift(i8 %x) {
162 ; CHECK-LABEL: rotr_i8_const_shift:
164 ; CHECK-NEXT: uxtb r1, r0
165 ; CHECK-NEXT: lsr r1, r1, #3
166 ; CHECK-NEXT: orr r0, r1, r0, lsl #5
168 %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
172 define i32 @rotr_i32_const_shift(i32 %x) {
173 ; CHECK-LABEL: rotr_i32_const_shift:
175 ; CHECK-NEXT: ror r0, r0, #3
177 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3)
181 ; When first 2 operands match, it's a rotate (by variable amount).
183 define i16 @rotr_i16(i16 %x, i16 %z) {
184 ; CHECK-LABEL: rotr_i16:
186 ; CHECK-NEXT: and r2, r1, #15
187 ; CHECK-NEXT: rsb r1, r1, #0
188 ; CHECK-NEXT: and r1, r1, #15
189 ; CHECK-NEXT: uxth r3, r0
190 ; CHECK-NEXT: lsr r2, r3, r2
191 ; CHECK-NEXT: orr r0, r2, r0, lsl r1
193 %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z)
197 define i32 @rotr_i32(i32 %x, i32 %z) {
198 ; CHECK-LABEL: rotr_i32:
200 ; CHECK-NEXT: ror r0, r0, r1
202 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z)
206 define i64 @rotr_i64(i64 %x, i64 %z) {
207 ; CHECK-LABEL: rotr_i64:
209 ; CHECK-NEXT: ands r3, r2, #32
210 ; CHECK-NEXT: mov r3, r1
211 ; CHECK-NEXT: moveq r3, r0
212 ; CHECK-NEXT: moveq r0, r1
213 ; CHECK-NEXT: mov r1, #31
214 ; CHECK-NEXT: lsl r12, r0, #1
215 ; CHECK-NEXT: bic r1, r1, r2
216 ; CHECK-NEXT: and r2, r2, #31
217 ; CHECK-NEXT: lsl r12, r12, r1
218 ; CHECK-NEXT: orr r12, r12, r3, lsr r2
219 ; CHECK-NEXT: lsl r3, r3, #1
220 ; CHECK-NEXT: lsl r1, r3, r1
221 ; CHECK-NEXT: orr r1, r1, r0, lsr r2
222 ; CHECK-NEXT: mov r0, r12
224 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z)
230 define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
231 ; SCALAR-LABEL: rotr_v4i32:
233 ; SCALAR-NEXT: ldr r12, [sp]
234 ; SCALAR-NEXT: ror r0, r0, r12
235 ; SCALAR-NEXT: ldr r12, [sp, #4]
236 ; SCALAR-NEXT: ror r1, r1, r12
237 ; SCALAR-NEXT: ldr r12, [sp, #8]
238 ; SCALAR-NEXT: ror r2, r2, r12
239 ; SCALAR-NEXT: ldr r12, [sp, #12]
240 ; SCALAR-NEXT: ror r3, r3, r12
243 ; NEON-LABEL: rotr_v4i32:
245 ; NEON-NEXT: mov r12, sp
246 ; NEON-NEXT: vld1.64 {d16, d17}, [r12]
247 ; NEON-NEXT: vmov.i32 q9, #0x1f
248 ; NEON-NEXT: vneg.s32 q10, q8
249 ; NEON-NEXT: vand q8, q8, q9
250 ; NEON-NEXT: vmov d23, r2, r3
251 ; NEON-NEXT: vand q9, q10, q9
252 ; NEON-NEXT: vneg.s32 q8, q8
253 ; NEON-NEXT: vmov d22, r0, r1
254 ; NEON-NEXT: vshl.u32 q9, q11, q9
255 ; NEON-NEXT: vshl.u32 q8, q11, q8
256 ; NEON-NEXT: vorr q8, q8, q9
257 ; NEON-NEXT: vmov r0, r1, d16
258 ; NEON-NEXT: vmov r2, r3, d17
260 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
264 ; Vector rotate by constant splat amount.
266 define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
267 ; SCALAR-LABEL: rotr_v4i32_const_shift:
269 ; SCALAR-NEXT: ror r0, r0, #3
270 ; SCALAR-NEXT: ror r1, r1, #3
271 ; SCALAR-NEXT: ror r2, r2, #3
272 ; SCALAR-NEXT: ror r3, r3, #3
275 ; NEON-LABEL: rotr_v4i32_const_shift:
277 ; NEON-NEXT: vmov d17, r2, r3
278 ; NEON-NEXT: vmov d16, r0, r1
279 ; NEON-NEXT: vshl.i32 q9, q8, #29
280 ; NEON-NEXT: vshr.u32 q8, q8, #3
281 ; NEON-NEXT: vorr q8, q8, q9
282 ; NEON-NEXT: vmov r0, r1, d16
283 ; NEON-NEXT: vmov r2, r3, d17
285 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
289 define i32 @rotl_i32_shift_by_bitwidth(i32 %x) {
290 ; CHECK-LABEL: rotl_i32_shift_by_bitwidth:
293 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32)
297 define i32 @rotr_i32_shift_by_bitwidth(i32 %x) {
298 ; CHECK-LABEL: rotr_i32_shift_by_bitwidth:
301 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32)
305 define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) {
306 ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth:
309 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
313 define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) {
314 ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth:
317 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)