1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s
4 define i16 @lshr_15(i16 %x) {
5 ; CHECK-LABEL: @lshr_15(
6 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i16 [[X:%.*]], 15
7 ; CHECK-NEXT: ret i16 [[LSHR]]
9 %zext = zext i16 %x to i32
10 %lshr = lshr i32 %zext, 15
11 %trunc = trunc i32 %lshr to i16
17 define i16 @lshr_16(i16 %x) {
18 ; CHECK-LABEL: @lshr_16(
19 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i16 [[X:%.*]] to i32
20 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ZEXT]], 16
21 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[LSHR]] to i16
22 ; CHECK-NEXT: ret i16 [[TRUNC]]
24 %zext = zext i16 %x to i32
25 %lshr = lshr i32 %zext, 16
26 %trunc = trunc i32 %lshr to i16
32 define i16 @lshr_var_shift_amount(i8 %x, i8 %amt) {
33 ; CHECK-LABEL: @lshr_var_shift_amount(
34 ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[X:%.*]] to i32
35 ; CHECK-NEXT: [[ZA:%.*]] = zext i8 [[AMT:%.*]] to i32
36 ; CHECK-NEXT: [[S:%.*]] = lshr i32 [[Z]], [[ZA]]
37 ; CHECK-NEXT: [[A:%.*]] = add i32 [[S]], [[Z]]
38 ; CHECK-NEXT: [[S2:%.*]] = lshr i32 [[A]], 2
39 ; CHECK-NEXT: [[T:%.*]] = trunc i32 [[S2]] to i16
40 ; CHECK-NEXT: ret i16 [[T]]
42 %z = zext i8 %x to i32
43 %za = zext i8 %amt to i32
47 %t = trunc i32 %s2 to i16
51 define i16 @lshr_var_bounded_shift_amount(i8 %x, i8 %amt) {
52 ; CHECK-LABEL: @lshr_var_bounded_shift_amount(
53 ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[X:%.*]] to i16
54 ; CHECK-NEXT: [[ZA:%.*]] = zext i8 [[AMT:%.*]] to i16
55 ; CHECK-NEXT: [[ZA2:%.*]] = and i16 [[ZA]], 15
56 ; CHECK-NEXT: [[S:%.*]] = lshr i16 [[Z]], [[ZA2]]
57 ; CHECK-NEXT: [[A:%.*]] = add i16 [[S]], [[Z]]
58 ; CHECK-NEXT: [[S2:%.*]] = lshr i16 [[A]], 2
59 ; CHECK-NEXT: ret i16 [[S2]]
61 %z = zext i8 %x to i32
62 %za = zext i8 %amt to i32
63 %za2 = and i32 %za, 15
64 %s = lshr i32 %z, %za2
67 %t = trunc i32 %s2 to i16
73 define i32 @lshr_check_no_overflow(i32 %x, i16 %amt) {
74 ; CHECK-LABEL: @lshr_check_no_overflow(
75 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[X:%.*]] to i64
76 ; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[AMT:%.*]] to i64
77 ; CHECK-NEXT: [[AND:%.*]] = and i64 [[SEXT]], 4294967295
78 ; CHECK-NEXT: [[SHL:%.*]] = lshr i64 [[ZEXT]], [[AND]]
79 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32
80 ; CHECK-NEXT: ret i32 [[TRUNC]]
82 %zext = zext i32 %x to i64
83 %sext = sext i16 %amt to i64
84 %and = and i64 %sext, 4294967295
85 %shl = lshr i64 %zext, %and
86 %trunc = trunc i64 %shl to i32
90 define void @lshr_big_dag(ptr %a, i8 %b, i8 %c) {
91 ; CHECK-LABEL: @lshr_big_dag(
92 ; CHECK-NEXT: [[ZEXT1:%.*]] = zext i8 [[B:%.*]] to i16
93 ; CHECK-NEXT: [[ZEXT2:%.*]] = zext i8 [[C:%.*]] to i16
94 ; CHECK-NEXT: [[ADD1:%.*]] = add i16 [[ZEXT1]], [[ZEXT2]]
95 ; CHECK-NEXT: [[SFT1:%.*]] = and i16 [[ADD1]], 15
96 ; CHECK-NEXT: [[SHR1:%.*]] = lshr i16 [[ADD1]], [[SFT1]]
97 ; CHECK-NEXT: [[ADD2:%.*]] = add i16 [[ADD1]], [[SHR1]]
98 ; CHECK-NEXT: [[SFT2:%.*]] = and i16 [[ADD2]], 7
99 ; CHECK-NEXT: [[SHR2:%.*]] = lshr i16 [[ADD2]], [[SFT2]]
100 ; CHECK-NEXT: store i16 [[SHR2]], ptr [[A:%.*]], align 2
101 ; CHECK-NEXT: ret void
103 %zext1 = zext i8 %b to i32
104 %zext2 = zext i8 %c to i32
105 %add1 = add i32 %zext1, %zext2
106 %sft1 = and i32 %add1, 15
107 %shr1 = lshr i32 %add1, %sft1
108 %add2 = add i32 %add1, %shr1
109 %sft2 = and i32 %add2, 7
110 %shr2 = lshr i32 %add2, %sft2
111 %trunc = trunc i32 %shr2 to i16
112 store i16 %trunc, ptr %a, align 2
118 define i8 @lshr_check_not_i8_trunc(i16 %x) {
119 ; CHECK-LABEL: @lshr_check_not_i8_trunc(
120 ; CHECK-NEXT: [[LSHR:%.*]] = lshr i16 [[X:%.*]], 1
121 ; CHECK-NEXT: [[ZEXT2:%.*]] = zext i16 [[LSHR]] to i32
122 ; CHECK-NEXT: [[LSHR2:%.*]] = lshr i32 [[ZEXT2]], 2
123 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[LSHR2]] to i8
124 ; CHECK-NEXT: ret i8 [[TRUNC]]
126 %lshr = lshr i16 %x, 1
127 %zext2 = zext i16 %lshr to i32
128 %lshr2 = lshr i32 %zext2, 2
129 %trunc = trunc i32 %lshr2 to i8
133 define <2 x i16> @lshr_vector(<2 x i8> %x) {
134 ; CHECK-LABEL: @lshr_vector(
135 ; CHECK-NEXT: [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i16>
136 ; CHECK-NEXT: [[ZA:%.*]] = and <2 x i16> [[Z]], <i16 7, i16 8>
137 ; CHECK-NEXT: [[S:%.*]] = lshr <2 x i16> [[Z]], [[ZA]]
138 ; CHECK-NEXT: [[A:%.*]] = add <2 x i16> [[S]], [[Z]]
139 ; CHECK-NEXT: [[S2:%.*]] = lshr <2 x i16> [[A]], <i16 4, i16 5>
140 ; CHECK-NEXT: ret <2 x i16> [[S2]]
142 %z = zext <2 x i8> %x to <2 x i32>
143 %za = and <2 x i32> %z, <i32 7, i32 8>
144 %s = lshr <2 x i32> %z, %za
145 %a = add <2 x i32> %s, %z
146 %s2 = lshr <2 x i32> %a, <i32 4, i32 5>
147 %t = trunc <2 x i32> %s2 to <2 x i16>
151 ; Negative test - can only fold to <2 x i16>, requiring new vector type
153 define <2 x i8> @lshr_vector_no_new_vector_type(<2 x i8> %x) {
154 ; CHECK-LABEL: @lshr_vector_no_new_vector_type(
155 ; CHECK-NEXT: [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
156 ; CHECK-NEXT: [[ZA:%.*]] = and <2 x i32> [[Z]], <i32 7, i32 8>
157 ; CHECK-NEXT: [[S:%.*]] = lshr <2 x i32> [[Z]], [[ZA]]
158 ; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[S]], [[Z]]
159 ; CHECK-NEXT: [[S2:%.*]] = lshr <2 x i32> [[A]], <i32 4, i32 5>
160 ; CHECK-NEXT: [[T:%.*]] = trunc <2 x i32> [[S2]] to <2 x i8>
161 ; CHECK-NEXT: ret <2 x i8> [[T]]
163 %z = zext <2 x i8> %x to <2 x i32>
164 %za = and <2 x i32> %z, <i32 7, i32 8>
165 %s = lshr <2 x i32> %z, %za
166 %a = add <2 x i32> %s, %z
167 %s2 = lshr <2 x i32> %a, <i32 4, i32 5>
168 %t = trunc <2 x i32> %s2 to <2 x i8>
174 define <2 x i16> @lshr_vector_large_shift_amount(<2 x i8> %x) {
175 ; CHECK-LABEL: @lshr_vector_large_shift_amount(
176 ; CHECK-NEXT: [[Z:%.*]] = zext <2 x i8> [[X:%.*]] to <2 x i32>
177 ; CHECK-NEXT: [[ZA:%.*]] = and <2 x i32> [[Z]], <i32 7, i32 8>
178 ; CHECK-NEXT: [[S:%.*]] = lshr <2 x i32> [[Z]], [[ZA]]
179 ; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[S]], [[Z]]
180 ; CHECK-NEXT: [[S2:%.*]] = lshr <2 x i32> [[A]], <i32 16, i32 5>
181 ; CHECK-NEXT: [[T:%.*]] = trunc <2 x i32> [[S2]] to <2 x i16>
182 ; CHECK-NEXT: ret <2 x i16> [[T]]
184 %z = zext <2 x i8> %x to <2 x i32>
185 %za = and <2 x i32> %z, <i32 7, i32 8>
186 %s = lshr <2 x i32> %z, %za
187 %a = add <2 x i32> %s, %z
188 %s2 = lshr <2 x i32> %a, <i32 16, i32 5>
189 %t = trunc <2 x i32> %s2 to <2 x i16>
193 define i16 @lshr_exact(i16 %x) {
194 ; CHECK-LABEL: @lshr_exact(
195 ; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i16 [[X:%.*]], 15
196 ; CHECK-NEXT: ret i16 [[LSHR]]
198 %zext = zext i16 %x to i32
199 %lshr = lshr exact i32 %zext, 15
200 %trunc = trunc i32 %lshr to i16
206 define i16 @lshr_negative_operand(i16 %x) {
207 ; CHECK-LABEL: @lshr_negative_operand(
208 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i16 [[X:%.*]] to i32
209 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 -1, [[ZEXT]]
210 ; CHECK-NEXT: [[LSHR2:%.*]] = lshr i32 [[XOR]], 2
211 ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[LSHR2]] to i16
212 ; CHECK-NEXT: ret i16 [[TRUNC]]
214 %zext = zext i16 %x to i32
215 %xor = xor i32 -1, %zext
216 %lshr2 = lshr i32 %xor, 2
217 %trunc = trunc i32 %lshr2 to i16
221 ; We may encounter unoptimized IR as below,
222 ; so don't crash by assuming that we can
223 ; apply instruction flags (exact) if there
226 define i8 @non_canonical_crash() {
227 ; CHECK-LABEL: @non_canonical_crash(
228 ; CHECK-NEXT: ret i8 8
231 %tr = trunc i32 %sh to i8