1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=ppc32-- | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_32
3 ; RUN: llc < %s -mtriple=ppc32-- -mcpu=ppc64 | FileCheck %s --check-prefixes=CHECK,CHECK32,CHECK32_64
4 ; RUN: llc < %s -mtriple=powerpc64le-- | FileCheck %s --check-prefixes=CHECK,CHECK64
6 declare i8 @llvm.fshl.i8(i8, i8, i8)
7 declare i16 @llvm.fshl.i16(i16, i16, i16)
8 declare i32 @llvm.fshl.i32(i32, i32, i32)
9 declare i64 @llvm.fshl.i64(i64, i64, i64)
10 declare i128 @llvm.fshl.i128(i128, i128, i128)
11 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
13 declare i8 @llvm.fshr.i8(i8, i8, i8)
14 declare i16 @llvm.fshr.i16(i16, i16, i16)
15 declare i32 @llvm.fshr.i32(i32, i32, i32)
16 declare i64 @llvm.fshr.i64(i64, i64, i64)
17 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
19 ; General case - all operands can be variables.
21 define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) {
22 ; CHECK-LABEL: fshl_i32:
24 ; CHECK-NEXT: clrlwi 5, 5, 27
25 ; CHECK-NEXT: slw 3, 3, 5
26 ; CHECK-NEXT: subfic 5, 5, 32
27 ; CHECK-NEXT: srw 4, 4, 5
28 ; CHECK-NEXT: or 3, 3, 4
30 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %z)
34 define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) {
35 ; CHECK32_32-LABEL: fshl_i64:
36 ; CHECK32_32: # %bb.0:
37 ; CHECK32_32-NEXT: andi. 7, 8, 32
38 ; CHECK32_32-NEXT: clrlwi 7, 8, 27
39 ; CHECK32_32-NEXT: subfic 8, 7, 32
40 ; CHECK32_32-NEXT: bc 12, 2, .LBB1_2
41 ; CHECK32_32-NEXT: # %bb.1:
42 ; CHECK32_32-NEXT: ori 9, 5, 0
43 ; CHECK32_32-NEXT: ori 3, 4, 0
44 ; CHECK32_32-NEXT: ori 4, 6, 0
45 ; CHECK32_32-NEXT: b .LBB1_3
46 ; CHECK32_32-NEXT: .LBB1_2:
47 ; CHECK32_32-NEXT: addi 9, 4, 0
48 ; CHECK32_32-NEXT: addi 4, 5, 0
49 ; CHECK32_32-NEXT: .LBB1_3:
50 ; CHECK32_32-NEXT: srw 5, 9, 8
51 ; CHECK32_32-NEXT: slw 3, 3, 7
52 ; CHECK32_32-NEXT: srw 4, 4, 8
53 ; CHECK32_32-NEXT: slw 6, 9, 7
54 ; CHECK32_32-NEXT: or 3, 3, 5
55 ; CHECK32_32-NEXT: or 4, 6, 4
56 ; CHECK32_32-NEXT: blr
58 ; CHECK32_64-LABEL: fshl_i64:
59 ; CHECK32_64: # %bb.0:
60 ; CHECK32_64-NEXT: andi. 7, 8, 32
61 ; CHECK32_64-NEXT: clrlwi 7, 8, 27
62 ; CHECK32_64-NEXT: bc 12, 2, .LBB1_2
63 ; CHECK32_64-NEXT: # %bb.1:
64 ; CHECK32_64-NEXT: ori 9, 5, 0
65 ; CHECK32_64-NEXT: ori 3, 4, 0
66 ; CHECK32_64-NEXT: ori 5, 6, 0
67 ; CHECK32_64-NEXT: b .LBB1_3
68 ; CHECK32_64-NEXT: .LBB1_2:
69 ; CHECK32_64-NEXT: addi 9, 4, 0
70 ; CHECK32_64-NEXT: .LBB1_3:
71 ; CHECK32_64-NEXT: subfic 8, 7, 32
72 ; CHECK32_64-NEXT: srw 4, 9, 8
73 ; CHECK32_64-NEXT: slw 3, 3, 7
74 ; CHECK32_64-NEXT: srw 5, 5, 8
75 ; CHECK32_64-NEXT: slw 6, 9, 7
76 ; CHECK32_64-NEXT: or 3, 3, 4
77 ; CHECK32_64-NEXT: or 4, 6, 5
78 ; CHECK32_64-NEXT: blr
80 ; CHECK64-LABEL: fshl_i64:
82 ; CHECK64-NEXT: clrlwi 5, 5, 26
83 ; CHECK64-NEXT: sld 3, 3, 5
84 ; CHECK64-NEXT: subfic 5, 5, 64
85 ; CHECK64-NEXT: srd 4, 4, 5
86 ; CHECK64-NEXT: or 3, 3, 4
88 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 %z)
92 define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
93 ; CHECK32_32-LABEL: fshl_i128:
94 ; CHECK32_32: # %bb.0:
95 ; CHECK32_32-NEXT: lwz 11, 20(1)
96 ; CHECK32_32-NEXT: andi. 12, 11, 64
97 ; CHECK32_32-NEXT: mcrf 1, 0
98 ; CHECK32_32-NEXT: andi. 12, 11, 32
99 ; CHECK32_32-NEXT: clrlwi 11, 11, 27
100 ; CHECK32_32-NEXT: bc 12, 6, .LBB2_2
101 ; CHECK32_32-NEXT: # %bb.1:
102 ; CHECK32_32-NEXT: ori 4, 6, 0
103 ; CHECK32_32-NEXT: ori 12, 7, 0
104 ; CHECK32_32-NEXT: ori 3, 5, 0
105 ; CHECK32_32-NEXT: ori 5, 8, 0
106 ; CHECK32_32-NEXT: ori 6, 9, 0
107 ; CHECK32_32-NEXT: ori 7, 10, 0
108 ; CHECK32_32-NEXT: b .LBB2_3
109 ; CHECK32_32-NEXT: .LBB2_2:
110 ; CHECK32_32-NEXT: addi 12, 5, 0
111 ; CHECK32_32-NEXT: addi 5, 6, 0
112 ; CHECK32_32-NEXT: addi 6, 7, 0
113 ; CHECK32_32-NEXT: addi 7, 8, 0
114 ; CHECK32_32-NEXT: .LBB2_3:
115 ; CHECK32_32-NEXT: subfic 8, 11, 32
116 ; CHECK32_32-NEXT: bc 12, 2, .LBB2_5
117 ; CHECK32_32-NEXT: # %bb.4:
118 ; CHECK32_32-NEXT: ori 9, 12, 0
119 ; CHECK32_32-NEXT: ori 3, 4, 0
120 ; CHECK32_32-NEXT: ori 4, 5, 0
121 ; CHECK32_32-NEXT: ori 5, 6, 0
122 ; CHECK32_32-NEXT: ori 6, 7, 0
123 ; CHECK32_32-NEXT: b .LBB2_6
124 ; CHECK32_32-NEXT: .LBB2_5:
125 ; CHECK32_32-NEXT: addi 9, 4, 0
126 ; CHECK32_32-NEXT: addi 4, 12, 0
127 ; CHECK32_32-NEXT: .LBB2_6:
128 ; CHECK32_32-NEXT: srw 7, 9, 8
129 ; CHECK32_32-NEXT: slw 3, 3, 11
130 ; CHECK32_32-NEXT: srw 10, 4, 8
131 ; CHECK32_32-NEXT: slw 9, 9, 11
132 ; CHECK32_32-NEXT: srw 12, 5, 8
133 ; CHECK32_32-NEXT: slw 0, 4, 11
134 ; CHECK32_32-NEXT: srw 6, 6, 8
135 ; CHECK32_32-NEXT: slw 8, 5, 11
136 ; CHECK32_32-NEXT: or 3, 3, 7
137 ; CHECK32_32-NEXT: or 4, 9, 10
138 ; CHECK32_32-NEXT: or 5, 0, 12
139 ; CHECK32_32-NEXT: or 6, 8, 6
140 ; CHECK32_32-NEXT: blr
142 ; CHECK32_64-LABEL: fshl_i128:
143 ; CHECK32_64: # %bb.0:
144 ; CHECK32_64-NEXT: stwu 1, -16(1)
145 ; CHECK32_64-NEXT: lwz 11, 36(1)
146 ; CHECK32_64-NEXT: andi. 12, 11, 64
147 ; CHECK32_64-NEXT: stw 30, 8(1) # 4-byte Folded Spill
148 ; CHECK32_64-NEXT: mcrf 1, 0
149 ; CHECK32_64-NEXT: clrlwi 12, 11, 27
150 ; CHECK32_64-NEXT: andi. 11, 11, 32
151 ; CHECK32_64-NEXT: bc 12, 6, .LBB2_2
152 ; CHECK32_64-NEXT: # %bb.1:
153 ; CHECK32_64-NEXT: ori 4, 6, 0
154 ; CHECK32_64-NEXT: ori 30, 7, 0
155 ; CHECK32_64-NEXT: ori 3, 5, 0
156 ; CHECK32_64-NEXT: ori 7, 9, 0
157 ; CHECK32_64-NEXT: b .LBB2_3
158 ; CHECK32_64-NEXT: .LBB2_2:
159 ; CHECK32_64-NEXT: addi 30, 5, 0
160 ; CHECK32_64-NEXT: .LBB2_3:
161 ; CHECK32_64-NEXT: bc 12, 2, .LBB2_5
162 ; CHECK32_64-NEXT: # %bb.4:
163 ; CHECK32_64-NEXT: ori 5, 30, 0
164 ; CHECK32_64-NEXT: ori 3, 4, 0
165 ; CHECK32_64-NEXT: b .LBB2_6
166 ; CHECK32_64-NEXT: .LBB2_5:
167 ; CHECK32_64-NEXT: addi 5, 4, 0
168 ; CHECK32_64-NEXT: .LBB2_6:
169 ; CHECK32_64-NEXT: bc 12, 6, .LBB2_8
170 ; CHECK32_64-NEXT: # %bb.7:
171 ; CHECK32_64-NEXT: ori 4, 8, 0
172 ; CHECK32_64-NEXT: ori 8, 10, 0
173 ; CHECK32_64-NEXT: b .LBB2_9
174 ; CHECK32_64-NEXT: .LBB2_8:
175 ; CHECK32_64-NEXT: addi 4, 6, 0
176 ; CHECK32_64-NEXT: .LBB2_9:
177 ; CHECK32_64-NEXT: subfic 11, 12, 32
178 ; CHECK32_64-NEXT: bc 12, 2, .LBB2_11
179 ; CHECK32_64-NEXT: # %bb.10:
180 ; CHECK32_64-NEXT: ori 0, 4, 0
181 ; CHECK32_64-NEXT: ori 4, 7, 0
182 ; CHECK32_64-NEXT: ori 7, 8, 0
183 ; CHECK32_64-NEXT: b .LBB2_12
184 ; CHECK32_64-NEXT: .LBB2_11:
185 ; CHECK32_64-NEXT: addi 0, 30, 0
186 ; CHECK32_64-NEXT: .LBB2_12:
187 ; CHECK32_64-NEXT: srw 6, 5, 11
188 ; CHECK32_64-NEXT: lwz 30, 8(1) # 4-byte Folded Reload
189 ; CHECK32_64-NEXT: slw 3, 3, 12
190 ; CHECK32_64-NEXT: srw 9, 0, 11
191 ; CHECK32_64-NEXT: slw 5, 5, 12
192 ; CHECK32_64-NEXT: srw 10, 4, 11
193 ; CHECK32_64-NEXT: slw 0, 0, 12
194 ; CHECK32_64-NEXT: srw 7, 7, 11
195 ; CHECK32_64-NEXT: slw 8, 4, 12
196 ; CHECK32_64-NEXT: or 3, 3, 6
197 ; CHECK32_64-NEXT: or 4, 5, 9
198 ; CHECK32_64-NEXT: or 5, 0, 10
199 ; CHECK32_64-NEXT: or 6, 8, 7
200 ; CHECK32_64-NEXT: addi 1, 1, 16
201 ; CHECK32_64-NEXT: blr
203 ; CHECK64-LABEL: fshl_i128:
205 ; CHECK64-NEXT: andi. 8, 7, 64
206 ; CHECK64-NEXT: clrlwi 7, 7, 26
207 ; CHECK64-NEXT: subfic 8, 7, 64
208 ; CHECK64-NEXT: iseleq 5, 6, 5
209 ; CHECK64-NEXT: iseleq 6, 3, 6
210 ; CHECK64-NEXT: iseleq 3, 4, 3
211 ; CHECK64-NEXT: srd 5, 5, 8
212 ; CHECK64-NEXT: sld 9, 6, 7
213 ; CHECK64-NEXT: srd 6, 6, 8
214 ; CHECK64-NEXT: sld 3, 3, 7
215 ; CHECK64-NEXT: or 5, 9, 5
216 ; CHECK64-NEXT: or 4, 3, 6
217 ; CHECK64-NEXT: mr 3, 5
219 %f = call i128 @llvm.fshl.i128(i128 %x, i128 %y, i128 %z)
223 ; Verify that weird types are minimally supported.
224 declare i37 @llvm.fshl.i37(i37, i37, i37)
225 define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) {
226 ; CHECK32_32-LABEL: fshl_i37:
227 ; CHECK32_32: # %bb.0:
228 ; CHECK32_32-NEXT: mflr 0
229 ; CHECK32_32-NEXT: stwu 1, -32(1)
230 ; CHECK32_32-NEXT: stw 0, 36(1)
231 ; CHECK32_32-NEXT: .cfi_def_cfa_offset 32
232 ; CHECK32_32-NEXT: .cfi_offset lr, 4
233 ; CHECK32_32-NEXT: .cfi_offset r27, -20
234 ; CHECK32_32-NEXT: .cfi_offset r28, -16
235 ; CHECK32_32-NEXT: .cfi_offset r29, -12
236 ; CHECK32_32-NEXT: .cfi_offset r30, -8
237 ; CHECK32_32-NEXT: stw 27, 12(1) # 4-byte Folded Spill
238 ; CHECK32_32-NEXT: mr 27, 3
239 ; CHECK32_32-NEXT: stw 28, 16(1) # 4-byte Folded Spill
240 ; CHECK32_32-NEXT: mr 28, 4
241 ; CHECK32_32-NEXT: stw 29, 20(1) # 4-byte Folded Spill
242 ; CHECK32_32-NEXT: mr 29, 5
243 ; CHECK32_32-NEXT: stw 30, 24(1) # 4-byte Folded Spill
244 ; CHECK32_32-NEXT: mr 30, 6
245 ; CHECK32_32-NEXT: clrlwi 3, 7, 27
246 ; CHECK32_32-NEXT: mr 4, 8
247 ; CHECK32_32-NEXT: li 5, 0
248 ; CHECK32_32-NEXT: li 6, 37
249 ; CHECK32_32-NEXT: bl __umoddi3
250 ; CHECK32_32-NEXT: rotlwi 3, 30, 27
251 ; CHECK32_32-NEXT: slwi 5, 30, 27
252 ; CHECK32_32-NEXT: andi. 6, 4, 32
253 ; CHECK32_32-NEXT: rlwimi 3, 29, 27, 0, 4
254 ; CHECK32_32-NEXT: clrlwi 4, 4, 27
255 ; CHECK32_32-NEXT: subfic 6, 4, 32
256 ; CHECK32_32-NEXT: bc 12, 2, .LBB3_2
257 ; CHECK32_32-NEXT: # %bb.1:
258 ; CHECK32_32-NEXT: ori 7, 3, 0
259 ; CHECK32_32-NEXT: ori 8, 28, 0
260 ; CHECK32_32-NEXT: ori 3, 5, 0
261 ; CHECK32_32-NEXT: b .LBB3_3
262 ; CHECK32_32-NEXT: .LBB3_2:
263 ; CHECK32_32-NEXT: addi 7, 28, 0
264 ; CHECK32_32-NEXT: addi 8, 27, 0
265 ; CHECK32_32-NEXT: .LBB3_3:
266 ; CHECK32_32-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
267 ; CHECK32_32-NEXT: srw 5, 7, 6
268 ; CHECK32_32-NEXT: slw 8, 8, 4
269 ; CHECK32_32-NEXT: srw 6, 3, 6
270 ; CHECK32_32-NEXT: slw 4, 7, 4
271 ; CHECK32_32-NEXT: or 3, 8, 5
272 ; CHECK32_32-NEXT: or 4, 4, 6
273 ; CHECK32_32-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
274 ; CHECK32_32-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
275 ; CHECK32_32-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
276 ; CHECK32_32-NEXT: lwz 0, 36(1)
277 ; CHECK32_32-NEXT: addi 1, 1, 32
278 ; CHECK32_32-NEXT: mtlr 0
279 ; CHECK32_32-NEXT: blr
281 ; CHECK32_64-LABEL: fshl_i37:
282 ; CHECK32_64: # %bb.0:
283 ; CHECK32_64-NEXT: mflr 0
284 ; CHECK32_64-NEXT: stwu 1, -32(1)
285 ; CHECK32_64-NEXT: stw 0, 36(1)
286 ; CHECK32_64-NEXT: .cfi_def_cfa_offset 32
287 ; CHECK32_64-NEXT: .cfi_offset lr, 4
288 ; CHECK32_64-NEXT: .cfi_offset r27, -20
289 ; CHECK32_64-NEXT: .cfi_offset r28, -16
290 ; CHECK32_64-NEXT: .cfi_offset r29, -12
291 ; CHECK32_64-NEXT: .cfi_offset r30, -8
292 ; CHECK32_64-NEXT: stw 27, 12(1) # 4-byte Folded Spill
293 ; CHECK32_64-NEXT: mr 27, 3
294 ; CHECK32_64-NEXT: clrlwi 3, 7, 27
295 ; CHECK32_64-NEXT: stw 28, 16(1) # 4-byte Folded Spill
296 ; CHECK32_64-NEXT: mr 28, 4
297 ; CHECK32_64-NEXT: mr 4, 8
298 ; CHECK32_64-NEXT: stw 29, 20(1) # 4-byte Folded Spill
299 ; CHECK32_64-NEXT: mr 29, 5
300 ; CHECK32_64-NEXT: li 5, 0
301 ; CHECK32_64-NEXT: stw 30, 24(1) # 4-byte Folded Spill
302 ; CHECK32_64-NEXT: mr 30, 6
303 ; CHECK32_64-NEXT: li 6, 37
304 ; CHECK32_64-NEXT: bl __umoddi3
305 ; CHECK32_64-NEXT: rotlwi 3, 30, 27
306 ; CHECK32_64-NEXT: andi. 5, 4, 32
307 ; CHECK32_64-NEXT: bc 12, 2, .LBB3_2
308 ; CHECK32_64-NEXT: # %bb.1:
309 ; CHECK32_64-NEXT: ori 8, 28, 0
310 ; CHECK32_64-NEXT: b .LBB3_3
311 ; CHECK32_64-NEXT: .LBB3_2:
312 ; CHECK32_64-NEXT: addi 8, 27, 0
313 ; CHECK32_64-NEXT: .LBB3_3:
314 ; CHECK32_64-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
315 ; CHECK32_64-NEXT: rlwimi 3, 29, 27, 0, 4
316 ; CHECK32_64-NEXT: clrlwi 4, 4, 27
317 ; CHECK32_64-NEXT: bc 12, 2, .LBB3_5
318 ; CHECK32_64-NEXT: # %bb.4:
319 ; CHECK32_64-NEXT: ori 7, 3, 0
320 ; CHECK32_64-NEXT: b .LBB3_6
321 ; CHECK32_64-NEXT: .LBB3_5:
322 ; CHECK32_64-NEXT: addi 7, 28, 0
323 ; CHECK32_64-NEXT: .LBB3_6:
324 ; CHECK32_64-NEXT: slwi 5, 30, 27
325 ; CHECK32_64-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
326 ; CHECK32_64-NEXT: bc 12, 2, .LBB3_8
327 ; CHECK32_64-NEXT: # %bb.7:
328 ; CHECK32_64-NEXT: ori 3, 5, 0
329 ; CHECK32_64-NEXT: b .LBB3_8
330 ; CHECK32_64-NEXT: .LBB3_8:
331 ; CHECK32_64-NEXT: subfic 6, 4, 32
332 ; CHECK32_64-NEXT: slw 8, 8, 4
333 ; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
334 ; CHECK32_64-NEXT: srw 9, 7, 6
335 ; CHECK32_64-NEXT: srw 5, 3, 6
336 ; CHECK32_64-NEXT: slw 4, 7, 4
337 ; CHECK32_64-NEXT: or 3, 8, 9
338 ; CHECK32_64-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
339 ; CHECK32_64-NEXT: or 4, 4, 5
340 ; CHECK32_64-NEXT: lwz 0, 36(1)
341 ; CHECK32_64-NEXT: addi 1, 1, 32
342 ; CHECK32_64-NEXT: mtlr 0
343 ; CHECK32_64-NEXT: blr
345 ; CHECK64-LABEL: fshl_i37:
347 ; CHECK64-NEXT: lis 7, 1771
348 ; CHECK64-NEXT: clrldi 6, 5, 27
349 ; CHECK64-NEXT: sldi 4, 4, 27
350 ; CHECK64-NEXT: ori 7, 7, 15941
351 ; CHECK64-NEXT: rldic 7, 7, 32, 5
352 ; CHECK64-NEXT: oris 7, 7, 12398
353 ; CHECK64-NEXT: ori 7, 7, 46053
354 ; CHECK64-NEXT: mulhdu 6, 6, 7
355 ; CHECK64-NEXT: mulli 6, 6, 37
356 ; CHECK64-NEXT: sub 5, 5, 6
357 ; CHECK64-NEXT: clrlwi 5, 5, 26
358 ; CHECK64-NEXT: sld 3, 3, 5
359 ; CHECK64-NEXT: subfic 5, 5, 64
360 ; CHECK64-NEXT: srd 4, 4, 5
361 ; CHECK64-NEXT: or 3, 3, 4
363 %f = call i37 @llvm.fshl.i37(i37 %x, i37 %y, i37 %z)
367 ; extract(concat(0b1110000, 0b1111111) << 2) = 0b1000011
369 declare i7 @llvm.fshl.i7(i7, i7, i7)
370 define i7 @fshl_i7_const_fold() {
371 ; CHECK-LABEL: fshl_i7_const_fold:
373 ; CHECK-NEXT: li 3, 67
375 %f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2)
379 ; With constant shift amount, this is rotate + insert (missing extended mnemonics).
381 define i32 @fshl_i32_const_shift(i32 %x, i32 %y) {
382 ; CHECK-LABEL: fshl_i32_const_shift:
384 ; CHECK-NEXT: rotlwi 4, 4, 9
385 ; CHECK-NEXT: rlwimi 4, 3, 9, 0, 22
386 ; CHECK-NEXT: mr 3, 4
388 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 9)
392 ; Check modulo math on shift amount.
394 define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) {
395 ; CHECK-LABEL: fshl_i32_const_overshift:
397 ; CHECK-NEXT: rotlwi 4, 4, 9
398 ; CHECK-NEXT: rlwimi 4, 3, 9, 0, 22
399 ; CHECK-NEXT: mr 3, 4
401 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 41)
405 ; 64-bit should also work.
407 define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) {
408 ; CHECK32-LABEL: fshl_i64_const_overshift:
410 ; CHECK32-NEXT: rotlwi 6, 6, 9
411 ; CHECK32-NEXT: rotlwi 3, 5, 9
412 ; CHECK32-NEXT: rlwimi 6, 5, 9, 0, 22
413 ; CHECK32-NEXT: rlwimi 3, 4, 9, 0, 22
414 ; CHECK32-NEXT: mr 4, 6
417 ; CHECK64-LABEL: fshl_i64_const_overshift:
419 ; CHECK64-NEXT: rotldi 4, 4, 41
420 ; CHECK64-NEXT: rldimi 4, 3, 41, 0
421 ; CHECK64-NEXT: mr 3, 4
423 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %y, i64 105)
427 ; This should work without any node-specific logic.
429 define i8 @fshl_i8_const_fold() {
430 ; CHECK-LABEL: fshl_i8_const_fold:
432 ; CHECK-NEXT: li 3, 128
434 %f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
438 ; Repeat everything for funnel shift right.
440 ; General case - all operands can be variables.
442 define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) {
443 ; CHECK-LABEL: fshr_i32:
445 ; CHECK-NEXT: clrlwi 5, 5, 27
446 ; CHECK-NEXT: srw 4, 4, 5
447 ; CHECK-NEXT: subfic 5, 5, 32
448 ; CHECK-NEXT: slw 3, 3, 5
449 ; CHECK-NEXT: or 3, 3, 4
451 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %z)
455 define i64 @fshr_i64(i64 %x, i64 %y, i64 %z) {
456 ; CHECK32_32-LABEL: fshr_i64:
457 ; CHECK32_32: # %bb.0:
458 ; CHECK32_32-NEXT: andi. 7, 8, 32
459 ; CHECK32_32-NEXT: clrlwi 7, 8, 27
460 ; CHECK32_32-NEXT: subfic 8, 7, 32
461 ; CHECK32_32-NEXT: bc 12, 2, .LBB10_2
462 ; CHECK32_32-NEXT: # %bb.1:
463 ; CHECK32_32-NEXT: ori 9, 4, 0
464 ; CHECK32_32-NEXT: ori 4, 5, 0
465 ; CHECK32_32-NEXT: b .LBB10_3
466 ; CHECK32_32-NEXT: .LBB10_2:
467 ; CHECK32_32-NEXT: addi 9, 5, 0
468 ; CHECK32_32-NEXT: addi 3, 4, 0
469 ; CHECK32_32-NEXT: addi 4, 6, 0
470 ; CHECK32_32-NEXT: .LBB10_3:
471 ; CHECK32_32-NEXT: srw 5, 9, 7
472 ; CHECK32_32-NEXT: slw 3, 3, 8
473 ; CHECK32_32-NEXT: srw 4, 4, 7
474 ; CHECK32_32-NEXT: slw 6, 9, 8
475 ; CHECK32_32-NEXT: or 3, 3, 5
476 ; CHECK32_32-NEXT: or 4, 6, 4
477 ; CHECK32_32-NEXT: blr
479 ; CHECK32_64-LABEL: fshr_i64:
480 ; CHECK32_64: # %bb.0:
481 ; CHECK32_64-NEXT: andi. 7, 8, 32
482 ; CHECK32_64-NEXT: clrlwi 7, 8, 27
483 ; CHECK32_64-NEXT: bc 12, 2, .LBB10_2
484 ; CHECK32_64-NEXT: # %bb.1:
485 ; CHECK32_64-NEXT: ori 9, 4, 0
486 ; CHECK32_64-NEXT: b .LBB10_3
487 ; CHECK32_64-NEXT: .LBB10_2:
488 ; CHECK32_64-NEXT: addi 9, 5, 0
489 ; CHECK32_64-NEXT: addi 3, 4, 0
490 ; CHECK32_64-NEXT: addi 5, 6, 0
491 ; CHECK32_64-NEXT: .LBB10_3:
492 ; CHECK32_64-NEXT: subfic 8, 7, 32
493 ; CHECK32_64-NEXT: srw 4, 9, 7
494 ; CHECK32_64-NEXT: slw 3, 3, 8
495 ; CHECK32_64-NEXT: srw 5, 5, 7
496 ; CHECK32_64-NEXT: slw 6, 9, 8
497 ; CHECK32_64-NEXT: or 3, 3, 4
498 ; CHECK32_64-NEXT: or 4, 6, 5
499 ; CHECK32_64-NEXT: blr
501 ; CHECK64-LABEL: fshr_i64:
503 ; CHECK64-NEXT: clrlwi 5, 5, 26
504 ; CHECK64-NEXT: srd 4, 4, 5
505 ; CHECK64-NEXT: subfic 5, 5, 64
506 ; CHECK64-NEXT: sld 3, 3, 5
507 ; CHECK64-NEXT: or 3, 3, 4
509 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 %z)
513 ; Verify that weird types are minimally supported.
514 declare i37 @llvm.fshr.i37(i37, i37, i37)
515 define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
516 ; CHECK32_32-LABEL: fshr_i37:
517 ; CHECK32_32: # %bb.0:
518 ; CHECK32_32-NEXT: mflr 0
519 ; CHECK32_32-NEXT: stwu 1, -32(1)
520 ; CHECK32_32-NEXT: stw 0, 36(1)
521 ; CHECK32_32-NEXT: .cfi_def_cfa_offset 32
522 ; CHECK32_32-NEXT: .cfi_offset lr, 4
523 ; CHECK32_32-NEXT: .cfi_offset r27, -20
524 ; CHECK32_32-NEXT: .cfi_offset r28, -16
525 ; CHECK32_32-NEXT: .cfi_offset r29, -12
526 ; CHECK32_32-NEXT: .cfi_offset r30, -8
527 ; CHECK32_32-NEXT: stw 27, 12(1) # 4-byte Folded Spill
528 ; CHECK32_32-NEXT: mr 27, 3
529 ; CHECK32_32-NEXT: stw 28, 16(1) # 4-byte Folded Spill
530 ; CHECK32_32-NEXT: mr 28, 4
531 ; CHECK32_32-NEXT: stw 29, 20(1) # 4-byte Folded Spill
532 ; CHECK32_32-NEXT: mr 29, 5
533 ; CHECK32_32-NEXT: stw 30, 24(1) # 4-byte Folded Spill
534 ; CHECK32_32-NEXT: mr 30, 6
535 ; CHECK32_32-NEXT: clrlwi 3, 7, 27
536 ; CHECK32_32-NEXT: mr 4, 8
537 ; CHECK32_32-NEXT: li 5, 0
538 ; CHECK32_32-NEXT: li 6, 37
539 ; CHECK32_32-NEXT: bl __umoddi3
540 ; CHECK32_32-NEXT: rotlwi 3, 30, 27
541 ; CHECK32_32-NEXT: addi 4, 4, 27
542 ; CHECK32_32-NEXT: slwi 5, 30, 27
543 ; CHECK32_32-NEXT: rlwimi 3, 29, 27, 0, 4
544 ; CHECK32_32-NEXT: andi. 6, 4, 32
545 ; CHECK32_32-NEXT: clrlwi 4, 4, 27
546 ; CHECK32_32-NEXT: subfic 6, 4, 32
547 ; CHECK32_32-NEXT: bc 12, 2, .LBB11_2
548 ; CHECK32_32-NEXT: # %bb.1:
549 ; CHECK32_32-NEXT: ori 7, 28, 0
550 ; CHECK32_32-NEXT: ori 8, 27, 0
551 ; CHECK32_32-NEXT: b .LBB11_3
552 ; CHECK32_32-NEXT: .LBB11_2:
553 ; CHECK32_32-NEXT: addi 7, 3, 0
554 ; CHECK32_32-NEXT: addi 8, 28, 0
555 ; CHECK32_32-NEXT: addi 3, 5, 0
556 ; CHECK32_32-NEXT: .LBB11_3:
557 ; CHECK32_32-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
558 ; CHECK32_32-NEXT: srw 5, 7, 4
559 ; CHECK32_32-NEXT: slw 8, 8, 6
560 ; CHECK32_32-NEXT: srw 4, 3, 4
561 ; CHECK32_32-NEXT: slw 6, 7, 6
562 ; CHECK32_32-NEXT: or 3, 8, 5
563 ; CHECK32_32-NEXT: or 4, 6, 4
564 ; CHECK32_32-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
565 ; CHECK32_32-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
566 ; CHECK32_32-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
567 ; CHECK32_32-NEXT: lwz 0, 36(1)
568 ; CHECK32_32-NEXT: addi 1, 1, 32
569 ; CHECK32_32-NEXT: mtlr 0
570 ; CHECK32_32-NEXT: blr
572 ; CHECK32_64-LABEL: fshr_i37:
573 ; CHECK32_64: # %bb.0:
574 ; CHECK32_64-NEXT: mflr 0
575 ; CHECK32_64-NEXT: stwu 1, -32(1)
576 ; CHECK32_64-NEXT: stw 0, 36(1)
577 ; CHECK32_64-NEXT: .cfi_def_cfa_offset 32
578 ; CHECK32_64-NEXT: .cfi_offset lr, 4
579 ; CHECK32_64-NEXT: .cfi_offset r27, -20
580 ; CHECK32_64-NEXT: .cfi_offset r28, -16
581 ; CHECK32_64-NEXT: .cfi_offset r29, -12
582 ; CHECK32_64-NEXT: .cfi_offset r30, -8
583 ; CHECK32_64-NEXT: stw 27, 12(1) # 4-byte Folded Spill
584 ; CHECK32_64-NEXT: mr 27, 3
585 ; CHECK32_64-NEXT: clrlwi 3, 7, 27
586 ; CHECK32_64-NEXT: stw 28, 16(1) # 4-byte Folded Spill
587 ; CHECK32_64-NEXT: mr 28, 4
588 ; CHECK32_64-NEXT: mr 4, 8
589 ; CHECK32_64-NEXT: stw 29, 20(1) # 4-byte Folded Spill
590 ; CHECK32_64-NEXT: mr 29, 5
591 ; CHECK32_64-NEXT: li 5, 0
592 ; CHECK32_64-NEXT: stw 30, 24(1) # 4-byte Folded Spill
593 ; CHECK32_64-NEXT: mr 30, 6
594 ; CHECK32_64-NEXT: li 6, 37
595 ; CHECK32_64-NEXT: bl __umoddi3
596 ; CHECK32_64-NEXT: addi 4, 4, 27
597 ; CHECK32_64-NEXT: rotlwi 3, 30, 27
598 ; CHECK32_64-NEXT: andi. 5, 4, 32
599 ; CHECK32_64-NEXT: rlwimi 3, 29, 27, 0, 4
600 ; CHECK32_64-NEXT: lwz 29, 20(1) # 4-byte Folded Reload
601 ; CHECK32_64-NEXT: bc 12, 2, .LBB11_2
602 ; CHECK32_64-NEXT: # %bb.1:
603 ; CHECK32_64-NEXT: ori 7, 28, 0
604 ; CHECK32_64-NEXT: ori 8, 27, 0
605 ; CHECK32_64-NEXT: b .LBB11_3
606 ; CHECK32_64-NEXT: .LBB11_2:
607 ; CHECK32_64-NEXT: addi 7, 3, 0
608 ; CHECK32_64-NEXT: addi 8, 28, 0
609 ; CHECK32_64-NEXT: .LBB11_3:
610 ; CHECK32_64-NEXT: clrlwi 4, 4, 27
611 ; CHECK32_64-NEXT: lwz 28, 16(1) # 4-byte Folded Reload
612 ; CHECK32_64-NEXT: slwi 5, 30, 27
613 ; CHECK32_64-NEXT: subfic 6, 4, 32
614 ; CHECK32_64-NEXT: bc 12, 2, .LBB11_4
615 ; CHECK32_64-NEXT: b .LBB11_5
616 ; CHECK32_64-NEXT: .LBB11_4:
617 ; CHECK32_64-NEXT: addi 3, 5, 0
618 ; CHECK32_64-NEXT: .LBB11_5:
619 ; CHECK32_64-NEXT: srw 9, 7, 4
620 ; CHECK32_64-NEXT: slw 8, 8, 6
621 ; CHECK32_64-NEXT: lwz 30, 24(1) # 4-byte Folded Reload
622 ; CHECK32_64-NEXT: srw 4, 3, 4
623 ; CHECK32_64-NEXT: slw 5, 7, 6
624 ; CHECK32_64-NEXT: lwz 27, 12(1) # 4-byte Folded Reload
625 ; CHECK32_64-NEXT: or 3, 8, 9
626 ; CHECK32_64-NEXT: or 4, 5, 4
627 ; CHECK32_64-NEXT: lwz 0, 36(1)
628 ; CHECK32_64-NEXT: addi 1, 1, 32
629 ; CHECK32_64-NEXT: mtlr 0
630 ; CHECK32_64-NEXT: blr
632 ; CHECK64-LABEL: fshr_i37:
634 ; CHECK64-NEXT: lis 7, 1771
635 ; CHECK64-NEXT: clrldi 6, 5, 27
636 ; CHECK64-NEXT: sldi 4, 4, 27
637 ; CHECK64-NEXT: ori 7, 7, 15941
638 ; CHECK64-NEXT: rldic 7, 7, 32, 5
639 ; CHECK64-NEXT: oris 7, 7, 12398
640 ; CHECK64-NEXT: ori 7, 7, 46053
641 ; CHECK64-NEXT: mulhdu 6, 6, 7
642 ; CHECK64-NEXT: mulli 6, 6, 37
643 ; CHECK64-NEXT: sub 5, 5, 6
644 ; CHECK64-NEXT: addi 5, 5, 27
645 ; CHECK64-NEXT: clrlwi 5, 5, 26
646 ; CHECK64-NEXT: srd 4, 4, 5
647 ; CHECK64-NEXT: subfic 5, 5, 64
648 ; CHECK64-NEXT: sld 3, 3, 5
649 ; CHECK64-NEXT: or 3, 3, 4
651 %f = call i37 @llvm.fshr.i37(i37 %x, i37 %y, i37 %z)
655 ; extract(concat(0b1110000, 0b1111111) >> 2) = 0b0011111
657 declare i7 @llvm.fshr.i7(i7, i7, i7)
658 define i7 @fshr_i7_const_fold() {
659 ; CHECK-LABEL: fshr_i7_const_fold:
661 ; CHECK-NEXT: li 3, 31
663 %f = call i7 @llvm.fshr.i7(i7 112, i7 127, i7 2)
667 ; With constant shift amount, this is rotate + insert (missing extended mnemonics).
669 define i32 @fshr_i32_const_shift(i32 %x, i32 %y) {
670 ; CHECK-LABEL: fshr_i32_const_shift:
672 ; CHECK-NEXT: rotlwi 4, 4, 23
673 ; CHECK-NEXT: rlwimi 4, 3, 23, 0, 8
674 ; CHECK-NEXT: mr 3, 4
676 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 9)
680 ; Check modulo math on shift amount. 41-32=9.
682 define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) {
683 ; CHECK-LABEL: fshr_i32_const_overshift:
685 ; CHECK-NEXT: rotlwi 4, 4, 23
686 ; CHECK-NEXT: rlwimi 4, 3, 23, 0, 8
687 ; CHECK-NEXT: mr 3, 4
689 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 41)
693 ; 64-bit should also work. 105-64 = 41.
695 define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) {
696 ; CHECK32-LABEL: fshr_i64_const_overshift:
698 ; CHECK32-NEXT: rotlwi 6, 4, 23
699 ; CHECK32-NEXT: rotlwi 5, 5, 23
700 ; CHECK32-NEXT: rlwimi 6, 3, 23, 0, 8
701 ; CHECK32-NEXT: rlwimi 5, 4, 23, 0, 8
702 ; CHECK32-NEXT: mr 3, 6
703 ; CHECK32-NEXT: mr 4, 5
706 ; CHECK64-LABEL: fshr_i64_const_overshift:
708 ; CHECK64-NEXT: rotldi 4, 4, 23
709 ; CHECK64-NEXT: rldimi 4, 3, 23, 0
710 ; CHECK64-NEXT: mr 3, 4
712 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %y, i64 105)
716 ; This should work without any node-specific logic.
718 define i8 @fshr_i8_const_fold() {
719 ; CHECK-LABEL: fshr_i8_const_fold:
721 ; CHECK-NEXT: li 3, 254
723 %f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
727 define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) {
728 ; CHECK-LABEL: fshl_i32_shift_by_bitwidth:
731 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 32)
735 define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) {
736 ; CHECK-LABEL: fshr_i32_shift_by_bitwidth:
738 ; CHECK-NEXT: mr 3, 4
740 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 32)
744 define <4 x i32> @fshl_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) {
745 ; CHECK-LABEL: fshl_v4i32_shift_by_bitwidth:
748 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
752 define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) {
753 ; CHECK32_32-LABEL: fshr_v4i32_shift_by_bitwidth:
754 ; CHECK32_32: # %bb.0:
755 ; CHECK32_32-NEXT: mr 6, 10
756 ; CHECK32_32-NEXT: mr 5, 9
757 ; CHECK32_32-NEXT: mr 4, 8
758 ; CHECK32_32-NEXT: mr 3, 7
759 ; CHECK32_32-NEXT: blr
761 ; CHECK32_64-LABEL: fshr_v4i32_shift_by_bitwidth:
762 ; CHECK32_64: # %bb.0:
763 ; CHECK32_64-NEXT: vmr 2, 3
764 ; CHECK32_64-NEXT: blr
766 ; CHECK64-LABEL: fshr_v4i32_shift_by_bitwidth:
768 ; CHECK64-NEXT: vmr 2, 3
770 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)