1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64I %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
5 ; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck -check-prefix=RV64IM %s
7 define i32 @udiv(i32 %a, i32 %b) nounwind {
10 ; RV64I-NEXT: addi sp, sp, -16
11 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
12 ; RV64I-NEXT: slli a0, a0, 32
13 ; RV64I-NEXT: srli a0, a0, 32
14 ; RV64I-NEXT: slli a1, a1, 32
15 ; RV64I-NEXT: srli a1, a1, 32
16 ; RV64I-NEXT: call __udivdi3@plt
17 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
18 ; RV64I-NEXT: addi sp, sp, 16
23 ; RV64IM-NEXT: divuw a0, a0, a1
29 define i32 @udiv_constant(i32 %a) nounwind {
30 ; RV64I-LABEL: udiv_constant:
32 ; RV64I-NEXT: addi sp, sp, -16
33 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
34 ; RV64I-NEXT: slli a0, a0, 32
35 ; RV64I-NEXT: srli a0, a0, 32
36 ; RV64I-NEXT: li a1, 5
37 ; RV64I-NEXT: call __udivdi3@plt
38 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
39 ; RV64I-NEXT: addi sp, sp, 16
42 ; RV64IM-LABEL: udiv_constant:
44 ; RV64IM-NEXT: slli a0, a0, 32
45 ; RV64IM-NEXT: lui a1, 838861
46 ; RV64IM-NEXT: addi a1, a1, -819
47 ; RV64IM-NEXT: slli a1, a1, 32
48 ; RV64IM-NEXT: mulhu a0, a0, a1
49 ; RV64IM-NEXT: srli a0, a0, 34
55 define i32 @udiv_pow2(i32 %a) nounwind {
56 ; RV64I-LABEL: udiv_pow2:
58 ; RV64I-NEXT: srliw a0, a0, 3
61 ; RV64IM-LABEL: udiv_pow2:
63 ; RV64IM-NEXT: srliw a0, a0, 3
69 define i32 @udiv_constant_lhs(i32 %a) nounwind {
70 ; RV64I-LABEL: udiv_constant_lhs:
72 ; RV64I-NEXT: addi sp, sp, -16
73 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
74 ; RV64I-NEXT: slli a0, a0, 32
75 ; RV64I-NEXT: srli a1, a0, 32
76 ; RV64I-NEXT: li a0, 10
77 ; RV64I-NEXT: call __udivdi3@plt
78 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
79 ; RV64I-NEXT: addi sp, sp, 16
82 ; RV64IM-LABEL: udiv_constant_lhs:
84 ; RV64IM-NEXT: li a1, 10
85 ; RV64IM-NEXT: divuw a0, a1, a0
91 define i64 @udiv64(i64 %a, i64 %b) nounwind {
92 ; RV64I-LABEL: udiv64:
94 ; RV64I-NEXT: tail __udivdi3@plt
96 ; RV64IM-LABEL: udiv64:
98 ; RV64IM-NEXT: divu a0, a0, a1
104 define i64 @udiv64_constant(i64 %a) nounwind {
105 ; RV64I-LABEL: udiv64_constant:
107 ; RV64I-NEXT: li a1, 5
108 ; RV64I-NEXT: tail __udivdi3@plt
110 ; RV64IM-LABEL: udiv64_constant:
112 ; RV64IM-NEXT: lui a1, 838861
113 ; RV64IM-NEXT: addiw a1, a1, -819
114 ; RV64IM-NEXT: slli a2, a1, 32
115 ; RV64IM-NEXT: add a1, a1, a2
116 ; RV64IM-NEXT: mulhu a0, a0, a1
117 ; RV64IM-NEXT: srli a0, a0, 2
123 define i64 @udiv64_constant_lhs(i64 %a) nounwind {
124 ; RV64I-LABEL: udiv64_constant_lhs:
126 ; RV64I-NEXT: mv a1, a0
127 ; RV64I-NEXT: li a0, 10
128 ; RV64I-NEXT: tail __udivdi3@plt
130 ; RV64IM-LABEL: udiv64_constant_lhs:
132 ; RV64IM-NEXT: li a1, 10
133 ; RV64IM-NEXT: divu a0, a1, a0
139 define i8 @udiv8(i8 %a, i8 %b) nounwind {
140 ; RV64I-LABEL: udiv8:
142 ; RV64I-NEXT: addi sp, sp, -16
143 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
144 ; RV64I-NEXT: andi a0, a0, 255
145 ; RV64I-NEXT: andi a1, a1, 255
146 ; RV64I-NEXT: call __udivdi3@plt
147 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
148 ; RV64I-NEXT: addi sp, sp, 16
151 ; RV64IM-LABEL: udiv8:
153 ; RV64IM-NEXT: andi a1, a1, 255
154 ; RV64IM-NEXT: andi a0, a0, 255
155 ; RV64IM-NEXT: divuw a0, a0, a1
161 define i8 @udiv8_constant(i8 %a) nounwind {
162 ; RV64I-LABEL: udiv8_constant:
164 ; RV64I-NEXT: addi sp, sp, -16
165 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
166 ; RV64I-NEXT: andi a0, a0, 255
167 ; RV64I-NEXT: li a1, 5
168 ; RV64I-NEXT: call __udivdi3@plt
169 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
170 ; RV64I-NEXT: addi sp, sp, 16
173 ; RV64IM-LABEL: udiv8_constant:
175 ; RV64IM-NEXT: andi a0, a0, 255
176 ; RV64IM-NEXT: li a1, 205
177 ; RV64IM-NEXT: mul a0, a0, a1
178 ; RV64IM-NEXT: srliw a0, a0, 10
184 define i8 @udiv8_pow2(i8 %a) nounwind {
185 ; RV64I-LABEL: udiv8_pow2:
187 ; RV64I-NEXT: slli a0, a0, 56
188 ; RV64I-NEXT: srli a0, a0, 59
191 ; RV64IM-LABEL: udiv8_pow2:
193 ; RV64IM-NEXT: slli a0, a0, 56
194 ; RV64IM-NEXT: srli a0, a0, 59
200 define i8 @udiv8_constant_lhs(i8 %a) nounwind {
201 ; RV64I-LABEL: udiv8_constant_lhs:
203 ; RV64I-NEXT: addi sp, sp, -16
204 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
205 ; RV64I-NEXT: andi a1, a0, 255
206 ; RV64I-NEXT: li a0, 10
207 ; RV64I-NEXT: call __udivdi3@plt
208 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
209 ; RV64I-NEXT: addi sp, sp, 16
212 ; RV64IM-LABEL: udiv8_constant_lhs:
214 ; RV64IM-NEXT: andi a0, a0, 255
215 ; RV64IM-NEXT: li a1, 10
216 ; RV64IM-NEXT: divuw a0, a1, a0
222 define i16 @udiv16(i16 %a, i16 %b) nounwind {
223 ; RV64I-LABEL: udiv16:
225 ; RV64I-NEXT: addi sp, sp, -16
226 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
227 ; RV64I-NEXT: lui a2, 16
228 ; RV64I-NEXT: addiw a2, a2, -1
229 ; RV64I-NEXT: and a0, a0, a2
230 ; RV64I-NEXT: and a1, a1, a2
231 ; RV64I-NEXT: call __udivdi3@plt
232 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
233 ; RV64I-NEXT: addi sp, sp, 16
236 ; RV64IM-LABEL: udiv16:
238 ; RV64IM-NEXT: lui a2, 16
239 ; RV64IM-NEXT: addi a2, a2, -1
240 ; RV64IM-NEXT: and a1, a1, a2
241 ; RV64IM-NEXT: and a0, a0, a2
242 ; RV64IM-NEXT: divuw a0, a0, a1
248 define i16 @udiv16_constant(i16 %a) nounwind {
249 ; RV64I-LABEL: udiv16_constant:
251 ; RV64I-NEXT: addi sp, sp, -16
252 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
253 ; RV64I-NEXT: slli a0, a0, 48
254 ; RV64I-NEXT: srli a0, a0, 48
255 ; RV64I-NEXT: li a1, 5
256 ; RV64I-NEXT: call __udivdi3@plt
257 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
258 ; RV64I-NEXT: addi sp, sp, 16
261 ; RV64IM-LABEL: udiv16_constant:
263 ; RV64IM-NEXT: lui a1, 52429
264 ; RV64IM-NEXT: slli a1, a1, 4
265 ; RV64IM-NEXT: slli a0, a0, 48
266 ; RV64IM-NEXT: mulhu a0, a0, a1
267 ; RV64IM-NEXT: srliw a0, a0, 18
273 define i16 @udiv16_pow2(i16 %a) nounwind {
274 ; RV64I-LABEL: udiv16_pow2:
276 ; RV64I-NEXT: slli a0, a0, 48
277 ; RV64I-NEXT: srli a0, a0, 51
280 ; RV64IM-LABEL: udiv16_pow2:
282 ; RV64IM-NEXT: slli a0, a0, 48
283 ; RV64IM-NEXT: srli a0, a0, 51
289 define i16 @udiv16_constant_lhs(i16 %a) nounwind {
290 ; RV64I-LABEL: udiv16_constant_lhs:
292 ; RV64I-NEXT: addi sp, sp, -16
293 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
294 ; RV64I-NEXT: slli a0, a0, 48
295 ; RV64I-NEXT: srli a1, a0, 48
296 ; RV64I-NEXT: li a0, 10
297 ; RV64I-NEXT: call __udivdi3@plt
298 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
299 ; RV64I-NEXT: addi sp, sp, 16
302 ; RV64IM-LABEL: udiv16_constant_lhs:
304 ; RV64IM-NEXT: lui a1, 16
305 ; RV64IM-NEXT: addi a1, a1, -1
306 ; RV64IM-NEXT: and a0, a0, a1
307 ; RV64IM-NEXT: li a1, 10
308 ; RV64IM-NEXT: divuw a0, a1, a0
314 define i32 @sdiv(i32 %a, i32 %b) nounwind {
317 ; RV64I-NEXT: addi sp, sp, -16
318 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
319 ; RV64I-NEXT: sext.w a0, a0
320 ; RV64I-NEXT: sext.w a1, a1
321 ; RV64I-NEXT: call __divdi3@plt
322 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
323 ; RV64I-NEXT: addi sp, sp, 16
326 ; RV64IM-LABEL: sdiv:
328 ; RV64IM-NEXT: divw a0, a0, a1
334 define i32 @sdiv_constant(i32 %a) nounwind {
335 ; RV64I-LABEL: sdiv_constant:
337 ; RV64I-NEXT: addi sp, sp, -16
338 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
339 ; RV64I-NEXT: sext.w a0, a0
340 ; RV64I-NEXT: li a1, 5
341 ; RV64I-NEXT: call __divdi3@plt
342 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
343 ; RV64I-NEXT: addi sp, sp, 16
346 ; RV64IM-LABEL: sdiv_constant:
348 ; RV64IM-NEXT: sext.w a0, a0
349 ; RV64IM-NEXT: lui a1, 419430
350 ; RV64IM-NEXT: addiw a1, a1, 1639
351 ; RV64IM-NEXT: mul a0, a0, a1
352 ; RV64IM-NEXT: srli a1, a0, 63
353 ; RV64IM-NEXT: srai a0, a0, 33
354 ; RV64IM-NEXT: addw a0, a0, a1
360 define i32 @sdiv_pow2(i32 %a) nounwind {
361 ; RV64I-LABEL: sdiv_pow2:
363 ; RV64I-NEXT: sraiw a1, a0, 31
364 ; RV64I-NEXT: srliw a1, a1, 29
365 ; RV64I-NEXT: add a0, a0, a1
366 ; RV64I-NEXT: sraiw a0, a0, 3
369 ; RV64IM-LABEL: sdiv_pow2:
371 ; RV64IM-NEXT: sraiw a1, a0, 31
372 ; RV64IM-NEXT: srliw a1, a1, 29
373 ; RV64IM-NEXT: add a0, a0, a1
374 ; RV64IM-NEXT: sraiw a0, a0, 3
380 define i32 @sdiv_pow2_2(i32 %a) nounwind {
381 ; RV64I-LABEL: sdiv_pow2_2:
383 ; RV64I-NEXT: sraiw a1, a0, 31
384 ; RV64I-NEXT: srliw a1, a1, 16
385 ; RV64I-NEXT: add a0, a0, a1
386 ; RV64I-NEXT: sraiw a0, a0, 16
389 ; RV64IM-LABEL: sdiv_pow2_2:
391 ; RV64IM-NEXT: sraiw a1, a0, 31
392 ; RV64IM-NEXT: srliw a1, a1, 16
393 ; RV64IM-NEXT: add a0, a0, a1
394 ; RV64IM-NEXT: sraiw a0, a0, 16
396 %1 = sdiv i32 %a, 65536
400 define i32 @sdiv_constant_lhs(i32 %a) nounwind {
401 ; RV64I-LABEL: sdiv_constant_lhs:
403 ; RV64I-NEXT: addi sp, sp, -16
404 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
405 ; RV64I-NEXT: sext.w a1, a0
406 ; RV64I-NEXT: li a0, -10
407 ; RV64I-NEXT: call __divdi3@plt
408 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
409 ; RV64I-NEXT: addi sp, sp, 16
412 ; RV64IM-LABEL: sdiv_constant_lhs:
414 ; RV64IM-NEXT: li a1, -10
415 ; RV64IM-NEXT: divw a0, a1, a0
417 %1 = sdiv i32 -10, %a
421 define i64 @sdiv64(i64 %a, i64 %b) nounwind {
422 ; RV64I-LABEL: sdiv64:
424 ; RV64I-NEXT: tail __divdi3@plt
426 ; RV64IM-LABEL: sdiv64:
428 ; RV64IM-NEXT: div a0, a0, a1
434 define i64 @sdiv64_constant(i64 %a) nounwind {
435 ; RV64I-LABEL: sdiv64_constant:
437 ; RV64I-NEXT: li a1, 5
438 ; RV64I-NEXT: tail __divdi3@plt
440 ; RV64IM-LABEL: sdiv64_constant:
442 ; RV64IM-NEXT: lui a1, %hi(.LCPI21_0)
443 ; RV64IM-NEXT: ld a1, %lo(.LCPI21_0)(a1)
444 ; RV64IM-NEXT: mulh a0, a0, a1
445 ; RV64IM-NEXT: srli a1, a0, 63
446 ; RV64IM-NEXT: srai a0, a0, 1
447 ; RV64IM-NEXT: add a0, a0, a1
453 define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
454 ; RV64I-LABEL: sdiv64_constant_lhs:
456 ; RV64I-NEXT: mv a1, a0
457 ; RV64I-NEXT: li a0, 10
458 ; RV64I-NEXT: tail __divdi3@plt
460 ; RV64IM-LABEL: sdiv64_constant_lhs:
462 ; RV64IM-NEXT: li a1, 10
463 ; RV64IM-NEXT: div a0, a1, a0
469 ; Although this sdiv has two sexti32 operands, it shouldn't compile to divw on
470 ; RV64M as that wouldn't produce the correct result for e.g. INT_MIN/-1.
472 define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
473 ; RV64I-LABEL: sdiv64_sext_operands:
475 ; RV64I-NEXT: sext.w a0, a0
476 ; RV64I-NEXT: sext.w a1, a1
477 ; RV64I-NEXT: tail __divdi3@plt
479 ; RV64IM-LABEL: sdiv64_sext_operands:
481 ; RV64IM-NEXT: sext.w a0, a0
482 ; RV64IM-NEXT: sext.w a1, a1
483 ; RV64IM-NEXT: div a0, a0, a1
485 %1 = sext i32 %a to i64
486 %2 = sext i32 %b to i64
491 define i8 @sdiv8(i8 %a, i8 %b) nounwind {
492 ; RV64I-LABEL: sdiv8:
494 ; RV64I-NEXT: addi sp, sp, -16
495 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
496 ; RV64I-NEXT: slli a1, a1, 24
497 ; RV64I-NEXT: sraiw a1, a1, 24
498 ; RV64I-NEXT: slli a0, a0, 24
499 ; RV64I-NEXT: sraiw a0, a0, 24
500 ; RV64I-NEXT: call __divdi3@plt
501 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
502 ; RV64I-NEXT: addi sp, sp, 16
505 ; RV64IM-LABEL: sdiv8:
507 ; RV64IM-NEXT: slli a1, a1, 24
508 ; RV64IM-NEXT: sraiw a1, a1, 24
509 ; RV64IM-NEXT: slli a0, a0, 24
510 ; RV64IM-NEXT: sraiw a0, a0, 24
511 ; RV64IM-NEXT: divw a0, a0, a1
517 define i8 @sdiv8_constant(i8 %a) nounwind {
518 ; RV64I-LABEL: sdiv8_constant:
520 ; RV64I-NEXT: addi sp, sp, -16
521 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
522 ; RV64I-NEXT: slli a0, a0, 24
523 ; RV64I-NEXT: sraiw a0, a0, 24
524 ; RV64I-NEXT: li a1, 5
525 ; RV64I-NEXT: call __divdi3@plt
526 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
527 ; RV64I-NEXT: addi sp, sp, 16
530 ; RV64IM-LABEL: sdiv8_constant:
532 ; RV64IM-NEXT: slli a0, a0, 24
533 ; RV64IM-NEXT: sraiw a0, a0, 24
534 ; RV64IM-NEXT: li a1, 103
535 ; RV64IM-NEXT: mul a0, a0, a1
536 ; RV64IM-NEXT: sraiw a1, a0, 9
537 ; RV64IM-NEXT: slli a0, a0, 48
538 ; RV64IM-NEXT: srli a0, a0, 63
539 ; RV64IM-NEXT: addw a0, a1, a0
545 define i8 @sdiv8_pow2(i8 %a) nounwind {
546 ; RV64I-LABEL: sdiv8_pow2:
548 ; RV64I-NEXT: slli a1, a0, 24
549 ; RV64I-NEXT: sraiw a1, a1, 24
550 ; RV64I-NEXT: slli a1, a1, 49
551 ; RV64I-NEXT: srli a1, a1, 61
552 ; RV64I-NEXT: add a0, a0, a1
553 ; RV64I-NEXT: slli a0, a0, 24
554 ; RV64I-NEXT: sraiw a0, a0, 27
557 ; RV64IM-LABEL: sdiv8_pow2:
559 ; RV64IM-NEXT: slli a1, a0, 24
560 ; RV64IM-NEXT: sraiw a1, a1, 24
561 ; RV64IM-NEXT: slli a1, a1, 49
562 ; RV64IM-NEXT: srli a1, a1, 61
563 ; RV64IM-NEXT: add a0, a0, a1
564 ; RV64IM-NEXT: slli a0, a0, 24
565 ; RV64IM-NEXT: sraiw a0, a0, 27
571 define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
572 ; RV64I-LABEL: sdiv8_constant_lhs:
574 ; RV64I-NEXT: addi sp, sp, -16
575 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
576 ; RV64I-NEXT: slli a0, a0, 24
577 ; RV64I-NEXT: sraiw a1, a0, 24
578 ; RV64I-NEXT: li a0, -10
579 ; RV64I-NEXT: call __divdi3@plt
580 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
581 ; RV64I-NEXT: addi sp, sp, 16
584 ; RV64IM-LABEL: sdiv8_constant_lhs:
586 ; RV64IM-NEXT: slli a0, a0, 24
587 ; RV64IM-NEXT: sraiw a0, a0, 24
588 ; RV64IM-NEXT: li a1, -10
589 ; RV64IM-NEXT: divw a0, a1, a0
595 define i16 @sdiv16(i16 %a, i16 %b) nounwind {
596 ; RV64I-LABEL: sdiv16:
598 ; RV64I-NEXT: addi sp, sp, -16
599 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
600 ; RV64I-NEXT: slli a1, a1, 16
601 ; RV64I-NEXT: sraiw a1, a1, 16
602 ; RV64I-NEXT: slli a0, a0, 16
603 ; RV64I-NEXT: sraiw a0, a0, 16
604 ; RV64I-NEXT: call __divdi3@plt
605 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
606 ; RV64I-NEXT: addi sp, sp, 16
609 ; RV64IM-LABEL: sdiv16:
611 ; RV64IM-NEXT: slli a1, a1, 16
612 ; RV64IM-NEXT: sraiw a1, a1, 16
613 ; RV64IM-NEXT: slli a0, a0, 16
614 ; RV64IM-NEXT: sraiw a0, a0, 16
615 ; RV64IM-NEXT: divw a0, a0, a1
621 define i16 @sdiv16_constant(i16 %a) nounwind {
622 ; RV64I-LABEL: sdiv16_constant:
624 ; RV64I-NEXT: addi sp, sp, -16
625 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
626 ; RV64I-NEXT: slli a0, a0, 16
627 ; RV64I-NEXT: sraiw a0, a0, 16
628 ; RV64I-NEXT: li a1, 5
629 ; RV64I-NEXT: call __divdi3@plt
630 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
631 ; RV64I-NEXT: addi sp, sp, 16
634 ; RV64IM-LABEL: sdiv16_constant:
636 ; RV64IM-NEXT: slli a0, a0, 16
637 ; RV64IM-NEXT: sraiw a0, a0, 16
638 ; RV64IM-NEXT: lui a1, 6
639 ; RV64IM-NEXT: addi a1, a1, 1639
640 ; RV64IM-NEXT: mul a0, a0, a1
641 ; RV64IM-NEXT: srliw a1, a0, 31
642 ; RV64IM-NEXT: sraiw a0, a0, 17
643 ; RV64IM-NEXT: addw a0, a0, a1
649 define i16 @sdiv16_pow2(i16 %a) nounwind {
650 ; RV64I-LABEL: sdiv16_pow2:
652 ; RV64I-NEXT: slli a1, a0, 16
653 ; RV64I-NEXT: sraiw a1, a1, 16
654 ; RV64I-NEXT: slli a1, a1, 33
655 ; RV64I-NEXT: srli a1, a1, 61
656 ; RV64I-NEXT: add a0, a0, a1
657 ; RV64I-NEXT: slli a0, a0, 16
658 ; RV64I-NEXT: sraiw a0, a0, 19
661 ; RV64IM-LABEL: sdiv16_pow2:
663 ; RV64IM-NEXT: slli a1, a0, 16
664 ; RV64IM-NEXT: sraiw a1, a1, 16
665 ; RV64IM-NEXT: slli a1, a1, 33
666 ; RV64IM-NEXT: srli a1, a1, 61
667 ; RV64IM-NEXT: add a0, a0, a1
668 ; RV64IM-NEXT: slli a0, a0, 16
669 ; RV64IM-NEXT: sraiw a0, a0, 19
675 define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
676 ; RV64I-LABEL: sdiv16_constant_lhs:
678 ; RV64I-NEXT: addi sp, sp, -16
679 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
680 ; RV64I-NEXT: slli a0, a0, 16
681 ; RV64I-NEXT: sraiw a1, a0, 16
682 ; RV64I-NEXT: li a0, -10
683 ; RV64I-NEXT: call __divdi3@plt
684 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
685 ; RV64I-NEXT: addi sp, sp, 16
688 ; RV64IM-LABEL: sdiv16_constant_lhs:
690 ; RV64IM-NEXT: slli a0, a0, 16
691 ; RV64IM-NEXT: sraiw a0, a0, 16
692 ; RV64IM-NEXT: li a1, -10
693 ; RV64IM-NEXT: divw a0, a1, a0
695 %1 = sdiv i16 -10, %a