1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s -check-prefix=RV64I
5 ; Test that we turn (sra (shl X, 32), 32-C) into (slli (sext.w X), C)
7 define i64 @test1(i64 %a) nounwind {
10 ; RV64I-NEXT: sext.w a0, a0
11 ; RV64I-NEXT: slli a0, a0, 2
18 define i64 @test2(i32 signext %a) nounwind {
21 ; RV64I-NEXT: slli a0, a0, 3
23 %1 = zext i32 %a to i64
29 define i64 @test3(ptr %a) nounwind {
32 ; RV64I-NEXT: lw a0, 0(a0)
33 ; RV64I-NEXT: slli a0, a0, 4
36 %2 = zext i32 %1 to i64
42 define i64 @test4(i32 signext %a, i32 signext %b) nounwind {
45 ; RV64I-NEXT: addw a0, a0, a1
46 ; RV64I-NEXT: slli a0, a0, 30
49 %2 = zext i32 %1 to i64
55 define i64 @test5(i32 signext %a, i32 signext %b) nounwind {
58 ; RV64I-NEXT: xor a0, a0, a1
59 ; RV64I-NEXT: slli a0, a0, 31
62 %2 = zext i32 %1 to i64
68 define i64 @test6(i32 signext %a, i32 signext %b) nounwind {
71 ; RV64I-NEXT: sllw a0, a0, a1
72 ; RV64I-NEXT: slli a0, a0, 16
75 %2 = zext i32 %1 to i64
81 ; The ashr+add+shl is canonical IR from InstCombine for
82 ; (sext (add (trunc X to i32), 1) to i32).
83 ; That can be implemented as addiw make sure we recover it.
84 define i64 @test7(ptr %0, i64 %1) {
87 ; RV64I-NEXT: addiw a0, a1, 1
90 %4 = add i64 %3, 4294967296
91 %5 = ashr exact i64 %4, 32
95 ; The ashr+add+shl is canonical IR from InstCombine for
96 ; (sext (sub 1, (trunc X to i32)) to i32).
97 ; That can be implemented as (li 1)+subw make sure we recover it.
98 define i64 @test8(ptr %0, i64 %1) {
101 ; RV64I-NEXT: li a0, 1
102 ; RV64I-NEXT: subw a0, a0, a1
104 %3 = mul i64 %1, -4294967296
105 %4 = add i64 %3, 4294967296
106 %5 = ashr exact i64 %4, 32
110 ; The gep is here to introduce a shl by 2 after the ashr that will get folded
111 ; and make this harder to recover.
112 define signext i32 @test9(ptr %0, i64 %1) {
113 ; RV64I-LABEL: test9:
115 ; RV64I-NEXT: lui a2, 1
116 ; RV64I-NEXT: addi a2, a2, 1
117 ; RV64I-NEXT: addw a1, a1, a2
118 ; RV64I-NEXT: slli a1, a1, 2
119 ; RV64I-NEXT: add a0, a0, a1
120 ; RV64I-NEXT: lw a0, 0(a0)
123 %4 = add i64 %3, 17596481011712 ; 4097 << 32
124 %5 = ashr exact i64 %4, 32
125 %6 = getelementptr inbounds i32, ptr %0, i64 %5
126 %7 = load i32, ptr %6, align 4
130 ; The gep is here to introduce a shl by 2 after the ashr that will get folded
131 ; and make this harder to recover.
132 define signext i32 @test10(ptr %0, i64 %1) {
133 ; RV64I-LABEL: test10:
135 ; RV64I-NEXT: lui a2, 30141
136 ; RV64I-NEXT: addi a2, a2, -747
137 ; RV64I-NEXT: subw a2, a2, a1
138 ; RV64I-NEXT: slli a2, a2, 2
139 ; RV64I-NEXT: add a0, a0, a2
140 ; RV64I-NEXT: lw a0, 0(a0)
142 %3 = mul i64 %1, -4294967296
143 %4 = add i64 %3, 530242871224172544 ; 123456789 << 32
144 %5 = ashr exact i64 %4, 32
145 %6 = getelementptr inbounds i32, ptr %0, i64 %5
146 %7 = load i32, ptr %6, align 4
150 define i64 @test11(ptr %0, i64 %1) {
151 ; RV64I-LABEL: test11:
153 ; RV64I-NEXT: lui a0, 524288
154 ; RV64I-NEXT: subw a0, a0, a1
156 %3 = mul i64 %1, -4294967296
157 %4 = add i64 %3, 9223372036854775808 ;0x8000'0000'0000'0000
158 %5 = ashr exact i64 %4, 32
162 ; Make sure we use slli+srai to enable the possibility of compressed
163 define i32 @test12(i32 signext %0) {
164 ; RV64I-LABEL: test12:
166 ; RV64I-NEXT: slli a0, a0, 49
167 ; RV64I-NEXT: srai a0, a0, 47
174 define i8 @test13(ptr %0, i64 %1) {
175 ; RV64I-LABEL: test13:
177 ; RV64I-NEXT: li a2, 1
178 ; RV64I-NEXT: subw a2, a2, a1
179 ; RV64I-NEXT: add a2, a0, a2
180 ; RV64I-NEXT: lbu a2, 0(a2)
181 ; RV64I-NEXT: li a3, 2
182 ; RV64I-NEXT: subw a3, a3, a1
183 ; RV64I-NEXT: add a0, a0, a3
184 ; RV64I-NEXT: lbu a0, 0(a0)
185 ; RV64I-NEXT: add a0, a2, a0
187 %3 = mul i64 %1, -4294967296
188 %4 = add i64 %3, 4294967296 ; 1 << 32
189 %5 = ashr exact i64 %4, 32
190 %6 = getelementptr inbounds i8, ptr %0, i64 %5
191 %7 = load i8, ptr %6, align 4
192 %8 = add i64 %3, 8589934592 ; 2 << 32
193 %9 = ashr exact i64 %8, 32
194 %10 = getelementptr inbounds i8, ptr %0, i64 %9
195 %11 = load i8, ptr %10, align 4
200 define signext i32 @test14(ptr %0, ptr %1, i64 %2) {
201 ; RV64I-LABEL: test14:
203 ; RV64I-NEXT: li a3, 1
204 ; RV64I-NEXT: subw a3, a3, a2
205 ; RV64I-NEXT: add a0, a0, a3
206 ; RV64I-NEXT: lbu a0, 0(a0)
207 ; RV64I-NEXT: slli a3, a3, 2
208 ; RV64I-NEXT: add a1, a1, a3
209 ; RV64I-NEXT: lw a1, 0(a1)
210 ; RV64I-NEXT: addw a0, a0, a1
212 %4 = mul i64 %2, -4294967296
213 %5 = add i64 %4, 4294967296 ; 1 << 32
214 %6 = ashr exact i64 %5, 32
215 %7 = getelementptr inbounds i8, ptr %0, i64 %6
216 %8 = load i8, ptr %7, align 4
217 %9 = zext i8 %8 to i32
218 %10 = getelementptr inbounds i32, ptr %1, i64 %6
219 %11 = load i32, ptr %10, align 4
220 %12 = add i32 %9, %11