1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
4 ; Check that we optimize out AND instructions and ADD/SUB instructions
5 ; modulo the shift size to take advantage of the implicit mod done on
6 ; the shift amount value by the variable shift/rotate instructions.
8 define i32 @test1(i32 %x, i64 %y) {
11 ; CHECK-NEXT: lsr w0, w0, w1
13 %sh_prom = trunc i64 %y to i32
14 %shr = lshr i32 %x, %sh_prom
18 define i64 @test2(i32 %x, i64 %y) {
21 ; CHECK-NEXT: neg w8, w0
22 ; CHECK-NEXT: asr x0, x1, x8
24 %sub9 = sub nsw i32 64, %x
25 %sh_prom12.i = zext i32 %sub9 to i64
26 %shr.i = ashr i64 %y, %sh_prom12.i
30 define i64 @test3(i64 %x, i64 %y) {
33 ; CHECK-NEXT: lsl x0, x1, x0
35 %add = add nsw i64 64, %x
36 %shl = shl i64 %y, %add
40 define i64 @test4(i64 %y, i32 %s) {
42 ; CHECK: // %bb.0: // %entry
43 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
44 ; CHECK-NEXT: asr x0, x0, x1
47 %sh_prom = zext i32 %s to i64
48 %shr = ashr i64 %y, %sh_prom
52 define i64 @test5(i64 %y, i32 %s) {
54 ; CHECK: // %bb.0: // %entry
55 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
56 ; CHECK-NEXT: asr x0, x0, x1
59 %sh_prom = sext i32 %s to i64
60 %shr = ashr i64 %y, %sh_prom
64 define i64 @test6(i64 %y, i32 %s) {
66 ; CHECK: // %bb.0: // %entry
67 ; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
68 ; CHECK-NEXT: lsl x0, x0, x1
71 %sh_prom = sext i32 %s to i64
72 %shr = shl i64 %y, %sh_prom
76 ; PR42644 - https://bugs.llvm.org/show_bug.cgi?id=42644
78 define i64 @ashr_add_shl_i32(i64 %r) {
79 ; CHECK-LABEL: ashr_add_shl_i32:
81 ; CHECK-NEXT: add w8, w0, #1
82 ; CHECK-NEXT: sxtw x0, w8
84 %conv = shl i64 %r, 32
85 %sext = add i64 %conv, 4294967296
86 %conv1 = ashr i64 %sext, 32
90 define i64 @ashr_add_shl_i8(i64 %r) {
91 ; CHECK-LABEL: ashr_add_shl_i8:
93 ; CHECK-NEXT: add w8, w0, #1
94 ; CHECK-NEXT: sxtb x0, w8
96 %conv = shl i64 %r, 56
97 %sext = add i64 %conv, 72057594037927936
98 %conv1 = ashr i64 %sext, 56
102 define <4 x i32> @ashr_add_shl_v4i8(<4 x i32> %r) {
103 ; CHECK-LABEL: ashr_add_shl_v4i8:
105 ; CHECK-NEXT: movi v1.4s, #1, lsl #24
106 ; CHECK-NEXT: shl v0.4s, v0.4s, #24
107 ; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
108 ; CHECK-NEXT: sshr v0.4s, v0.4s, #24
110 %conv = shl <4 x i32> %r, <i32 24, i32 24, i32 24, i32 24>
111 %sext = add <4 x i32> %conv, <i32 16777216, i32 16777216, i32 16777216, i32 16777216>
112 %conv1 = ashr <4 x i32> %sext, <i32 24, i32 24, i32 24, i32 24>
116 define i64 @ashr_add_shl_i36(i64 %r) {
117 ; CHECK-LABEL: ashr_add_shl_i36:
119 ; CHECK-NEXT: sbfx x0, x0, #0, #28
121 %conv = shl i64 %r, 36
122 %sext = add i64 %conv, 4294967296
123 %conv1 = ashr i64 %sext, 36
127 define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) {
128 ; CHECK-LABEL: ashr_add_shl_mismatch_shifts1:
130 ; CHECK-NEXT: mov x8, #4294967296
131 ; CHECK-NEXT: add x8, x8, x0, lsl #8
132 ; CHECK-NEXT: asr x0, x8, #32
134 %conv = shl i64 %r, 8
135 %sext = add i64 %conv, 4294967296
136 %conv1 = ashr i64 %sext, 32
140 define i64 @ashr_add_shl_mismatch_shifts2(i64 %r) {
141 ; CHECK-LABEL: ashr_add_shl_mismatch_shifts2:
143 ; CHECK-NEXT: mov x8, #4294967296
144 ; CHECK-NEXT: add x8, x8, x0, lsr #8
145 ; CHECK-NEXT: lsr x0, x8, #8
147 %conv = lshr i64 %r, 8
148 %sext = add i64 %conv, 4294967296
149 %conv1 = ashr i64 %sext, 8