1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=ppc32-- | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_32
3 ; RUN: llc < %s -mtriple=ppc32-- -mcpu=ppc64 | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_64
4 ; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s --check-prefixes=CHECK,CHECK64
6 declare i8 @llvm.fshl.i8(i8, i8, i8)
7 declare i16 @llvm.fshl.i16(i16, i16, i16)
8 declare i32 @llvm.fshl.i32(i32, i32, i32)
9 declare i64 @llvm.fshl.i64(i64, i64, i64)
10 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
12 declare i8 @llvm.fshr.i8(i8, i8, i8)
13 declare i16 @llvm.fshr.i16(i16, i16, i16)
14 declare i32 @llvm.fshr.i32(i32, i32, i32)
15 declare i64 @llvm.fshr.i64(i64, i64, i64)
16 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
18 ; When first 2 operands match, it's a rotate.
20 define i8 @rotl_i8_const_shift(i8 %x) {
21 ; CHECK-LABEL: rotl_i8_const_shift:
23 ; CHECK-NEXT: rotlwi 4, 3, 27
24 ; CHECK-NEXT: rlwimi 4, 3, 3, 0, 28
27 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3)
31 define i64 @rotl_i64_const_shift(i64 %x) {
32 ; CHECK32-LABEL: rotl_i64_const_shift:
34 ; CHECK32-NEXT: rotlwi 5, 4, 3
35 ; CHECK32-NEXT: rotlwi 6, 3, 3
36 ; CHECK32-NEXT: rlwimi 5, 3, 3, 0, 28
37 ; CHECK32-NEXT: rlwimi 6, 4, 3, 0, 28
38 ; CHECK32-NEXT: mr 3, 5
39 ; CHECK32-NEXT: mr 4, 6
42 ; CHECK64-LABEL: rotl_i64_const_shift:
44 ; CHECK64-NEXT: rotldi 3, 3, 3
46 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3)
50 ; When first 2 operands match, it's a rotate (by variable amount).
52 define i16 @rotl_i16(i16 %x, i16 %z) {
53 ; CHECK-LABEL: rotl_i16:
55 ; CHECK-NEXT: clrlwi 6, 4, 28
56 ; CHECK-NEXT: neg 4, 4
57 ; CHECK-NEXT: clrlwi 5, 3, 16
58 ; CHECK-NEXT: clrlwi 4, 4, 28
59 ; CHECK-NEXT: slw 3, 3, 6
60 ; CHECK-NEXT: srw 4, 5, 4
61 ; CHECK-NEXT: or 3, 3, 4
63 %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z)
67 define i32 @rotl_i32(i32 %x, i32 %z) {
68 ; CHECK-LABEL: rotl_i32:
70 ; CHECK-NEXT: rotlw 3, 3, 4
72 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z)
76 define i64 @rotl_i64(i64 %x, i64 %z) {
77 ; CHECK32_32-LABEL: rotl_i64:
78 ; CHECK32_32: # %bb.0:
79 ; CHECK32_32-NEXT: andi. 5, 6, 32
80 ; CHECK32_32-NEXT: clrlwi 5, 6, 27
81 ; CHECK32_32-NEXT: subfic 6, 5, 32
82 ; CHECK32_32-NEXT: bc 12, 2, .LBB4_2
83 ; CHECK32_32-NEXT: # %bb.1:
84 ; CHECK32_32-NEXT: ori 7, 3, 0
85 ; CHECK32_32-NEXT: ori 3, 4, 0
86 ; CHECK32_32-NEXT: b .LBB4_3
87 ; CHECK32_32-NEXT: .LBB4_2:
88 ; CHECK32_32-NEXT: addi 7, 4, 0
89 ; CHECK32_32-NEXT: .LBB4_3:
90 ; CHECK32_32-NEXT: srw 4, 7, 6
91 ; CHECK32_32-NEXT: slw 8, 3, 5
92 ; CHECK32_32-NEXT: srw 6, 3, 6
93 ; CHECK32_32-NEXT: slw 5, 7, 5
94 ; CHECK32_32-NEXT: or 3, 8, 4
95 ; CHECK32_32-NEXT: or 4, 5, 6
96 ; CHECK32_32-NEXT: blr
98 ; CHECK32_64-LABEL: rotl_i64:
99 ; CHECK32_64: # %bb.0:
100 ; CHECK32_64-NEXT: andi. 5, 6, 32
101 ; CHECK32_64-NEXT: clrlwi 5, 6, 27
102 ; CHECK32_64-NEXT: bc 12, 2, .LBB4_2
103 ; CHECK32_64-NEXT: # %bb.1:
104 ; CHECK32_64-NEXT: ori 7, 3, 0
105 ; CHECK32_64-NEXT: ori 3, 4, 0
106 ; CHECK32_64-NEXT: b .LBB4_3
107 ; CHECK32_64-NEXT: .LBB4_2:
108 ; CHECK32_64-NEXT: addi 7, 4, 0
109 ; CHECK32_64-NEXT: .LBB4_3:
110 ; CHECK32_64-NEXT: subfic 6, 5, 32
111 ; CHECK32_64-NEXT: srw 4, 7, 6
112 ; CHECK32_64-NEXT: slw 8, 3, 5
113 ; CHECK32_64-NEXT: srw 6, 3, 6
114 ; CHECK32_64-NEXT: slw 5, 7, 5
115 ; CHECK32_64-NEXT: or 3, 8, 4
116 ; CHECK32_64-NEXT: or 4, 5, 6
117 ; CHECK32_64-NEXT: blr
119 ; CHECK64-LABEL: rotl_i64:
121 ; CHECK64-NEXT: rotld 3, 3, 4
123 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z)
129 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
130 ; CHECK32_32-LABEL: rotl_v4i32:
131 ; CHECK32_32: # %bb.0:
132 ; CHECK32_32-NEXT: rotlw 3, 3, 7
133 ; CHECK32_32-NEXT: rotlw 4, 4, 8
134 ; CHECK32_32-NEXT: rotlw 5, 5, 9
135 ; CHECK32_32-NEXT: rotlw 6, 6, 10
136 ; CHECK32_32-NEXT: blr
138 ; CHECK32_64-LABEL: rotl_v4i32:
139 ; CHECK32_64: # %bb.0:
140 ; CHECK32_64-NEXT: vrlw 2, 2, 3
141 ; CHECK32_64-NEXT: blr
143 ; CHECK64-LABEL: rotl_v4i32:
145 ; CHECK64-NEXT: vrlw 2, 2, 3
147 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
151 ; Vector rotate by constant splat amount.
153 define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) {
154 ; CHECK32_32-LABEL: rotl_v4i32_const_shift:
155 ; CHECK32_32: # %bb.0:
156 ; CHECK32_32-NEXT: rotlwi 3, 3, 3
157 ; CHECK32_32-NEXT: rotlwi 4, 4, 3
158 ; CHECK32_32-NEXT: rotlwi 5, 5, 3
159 ; CHECK32_32-NEXT: rotlwi 6, 6, 3
160 ; CHECK32_32-NEXT: blr
162 ; CHECK32_64-LABEL: rotl_v4i32_const_shift:
163 ; CHECK32_64: # %bb.0:
164 ; CHECK32_64-NEXT: vspltisw 3, 3
165 ; CHECK32_64-NEXT: vrlw 2, 2, 3
166 ; CHECK32_64-NEXT: blr
168 ; CHECK64-LABEL: rotl_v4i32_const_shift:
170 ; CHECK64-NEXT: vspltisw 3, 3
171 ; CHECK64-NEXT: vrlw 2, 2, 3
173 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
177 ; Repeat everything for funnel shift right.
179 define i8 @rotr_i8_const_shift(i8 %x) {
180 ; CHECK-LABEL: rotr_i8_const_shift:
182 ; CHECK-NEXT: rotlwi 4, 3, 29
183 ; CHECK-NEXT: rlwimi 4, 3, 5, 0, 26
184 ; CHECK-NEXT: mr 3, 4
186 %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
190 define i32 @rotr_i32_const_shift(i32 %x) {
191 ; CHECK-LABEL: rotr_i32_const_shift:
193 ; CHECK-NEXT: rotlwi 3, 3, 29
195 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3)
199 ; When first 2 operands match, it's a rotate (by variable amount).
201 define i16 @rotr_i16(i16 %x, i16 %z) {
202 ; CHECK-LABEL: rotr_i16:
204 ; CHECK-NEXT: clrlwi 6, 4, 28
205 ; CHECK-NEXT: neg 4, 4
206 ; CHECK-NEXT: clrlwi 5, 3, 16
207 ; CHECK-NEXT: clrlwi 4, 4, 28
208 ; CHECK-NEXT: srw 5, 5, 6
209 ; CHECK-NEXT: slw 3, 3, 4
210 ; CHECK-NEXT: or 3, 5, 3
212 %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z)
216 define i32 @rotr_i32(i32 %x, i32 %z) {
217 ; CHECK-LABEL: rotr_i32:
219 ; CHECK-NEXT: neg 4, 4
220 ; CHECK-NEXT: rotlw 3, 3, 4
222 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z)
226 define i64 @rotr_i64(i64 %x, i64 %z) {
227 ; CHECK32_32-LABEL: rotr_i64:
228 ; CHECK32_32: # %bb.0:
229 ; CHECK32_32-NEXT: andi. 5, 6, 32
230 ; CHECK32_32-NEXT: clrlwi 5, 6, 27
231 ; CHECK32_32-NEXT: subfic 6, 5, 32
232 ; CHECK32_32-NEXT: bc 12, 2, .LBB11_2
233 ; CHECK32_32-NEXT: # %bb.1:
234 ; CHECK32_32-NEXT: ori 7, 4, 0
235 ; CHECK32_32-NEXT: b .LBB11_3
236 ; CHECK32_32-NEXT: .LBB11_2:
237 ; CHECK32_32-NEXT: addi 7, 3, 0
238 ; CHECK32_32-NEXT: addi 3, 4, 0
239 ; CHECK32_32-NEXT: .LBB11_3:
240 ; CHECK32_32-NEXT: srw 4, 7, 5
241 ; CHECK32_32-NEXT: slw 8, 3, 6
242 ; CHECK32_32-NEXT: srw 5, 3, 5
243 ; CHECK32_32-NEXT: slw 6, 7, 6
244 ; CHECK32_32-NEXT: or 3, 8, 4
245 ; CHECK32_32-NEXT: or 4, 6, 5
246 ; CHECK32_32-NEXT: blr
248 ; CHECK32_64-LABEL: rotr_i64:
249 ; CHECK32_64: # %bb.0:
250 ; CHECK32_64-NEXT: andi. 5, 6, 32
251 ; CHECK32_64-NEXT: clrlwi 5, 6, 27
252 ; CHECK32_64-NEXT: bc 12, 2, .LBB11_2
253 ; CHECK32_64-NEXT: # %bb.1:
254 ; CHECK32_64-NEXT: ori 7, 4, 0
255 ; CHECK32_64-NEXT: b .LBB11_3
256 ; CHECK32_64-NEXT: .LBB11_2:
257 ; CHECK32_64-NEXT: addi 7, 3, 0
258 ; CHECK32_64-NEXT: addi 3, 4, 0
259 ; CHECK32_64-NEXT: .LBB11_3:
260 ; CHECK32_64-NEXT: subfic 6, 5, 32
261 ; CHECK32_64-NEXT: srw 4, 7, 5
262 ; CHECK32_64-NEXT: slw 8, 3, 6
263 ; CHECK32_64-NEXT: srw 5, 3, 5
264 ; CHECK32_64-NEXT: slw 6, 7, 6
265 ; CHECK32_64-NEXT: or 3, 8, 4
266 ; CHECK32_64-NEXT: or 4, 6, 5
267 ; CHECK32_64-NEXT: blr
269 ; CHECK64-LABEL: rotr_i64:
271 ; CHECK64-NEXT: neg 4, 4
272 ; CHECK64-NEXT: rotld 3, 3, 4
274 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z)
280 define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
281 ; CHECK32_32-LABEL: rotr_v4i32:
282 ; CHECK32_32: # %bb.0:
283 ; CHECK32_32-NEXT: neg 7, 7
284 ; CHECK32_32-NEXT: neg 8, 8
285 ; CHECK32_32-NEXT: neg 9, 9
286 ; CHECK32_32-NEXT: neg 10, 10
287 ; CHECK32_32-NEXT: rotlw 3, 3, 7
288 ; CHECK32_32-NEXT: rotlw 4, 4, 8
289 ; CHECK32_32-NEXT: rotlw 5, 5, 9
290 ; CHECK32_32-NEXT: rotlw 6, 6, 10
291 ; CHECK32_32-NEXT: blr
293 ; CHECK32_64-LABEL: rotr_v4i32:
294 ; CHECK32_64: # %bb.0:
295 ; CHECK32_64-NEXT: vxor 4, 4, 4
296 ; CHECK32_64-NEXT: vsubuwm 3, 4, 3
297 ; CHECK32_64-NEXT: vrlw 2, 2, 3
298 ; CHECK32_64-NEXT: blr
300 ; CHECK64-LABEL: rotr_v4i32:
302 ; CHECK64-NEXT: xxlxor 36, 36, 36
303 ; CHECK64-NEXT: vsubuwm 3, 4, 3
304 ; CHECK64-NEXT: vrlw 2, 2, 3
306 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
310 ; Vector rotate by constant splat amount.
312 define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
313 ; CHECK32_32-LABEL: rotr_v4i32_const_shift:
314 ; CHECK32_32: # %bb.0:
315 ; CHECK32_32-NEXT: rotlwi 3, 3, 29
316 ; CHECK32_32-NEXT: rotlwi 4, 4, 29
317 ; CHECK32_32-NEXT: rotlwi 5, 5, 29
318 ; CHECK32_32-NEXT: rotlwi 6, 6, 29
319 ; CHECK32_32-NEXT: blr
321 ; CHECK32_64-LABEL: rotr_v4i32_const_shift:
322 ; CHECK32_64: # %bb.0:
323 ; CHECK32_64-NEXT: vspltisw 3, -16
324 ; CHECK32_64-NEXT: vspltisw 4, 13
325 ; CHECK32_64-NEXT: vsubuwm 3, 4, 3
326 ; CHECK32_64-NEXT: vrlw 2, 2, 3
327 ; CHECK32_64-NEXT: blr
329 ; CHECK64-LABEL: rotr_v4i32_const_shift:
331 ; CHECK64-NEXT: vspltisw 3, -16
332 ; CHECK64-NEXT: vspltisw 4, 13
333 ; CHECK64-NEXT: vsubuwm 3, 4, 3
334 ; CHECK64-NEXT: vrlw 2, 2, 3
336 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
340 define i32 @rotl_i32_shift_by_bitwidth(i32 %x) {
341 ; CHECK-LABEL: rotl_i32_shift_by_bitwidth:
344 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32)
348 define i32 @rotr_i32_shift_by_bitwidth(i32 %x) {
349 ; CHECK-LABEL: rotr_i32_shift_by_bitwidth:
352 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32)
356 define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) {
357 ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth:
360 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
364 define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) {
365 ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth:
368 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)