1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
5 ; icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
6 ; we should move shifts to the same hand of 'and', i.e. e.g. rewrite as
7 ; icmp eq/ne (and (((x shift Q) shift K), y)), 0
8 ; We are only interested in opposite logical shifts here.
9 ; We still can handle the case where there is a truncation between a shift and
10 ; an 'and'. If it's trunc-of-shl - no extra legality check is needed.
12 ;-------------------------------------------------------------------------------
14 ;-------------------------------------------------------------------------------
16 define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) {
17 ; CHECK-LABEL: @t0_const_after_fold_lshr_shl_ne(
18 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
19 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
20 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y:%.*]], [[TMP2]]
21 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
22 ; CHECK-NEXT: ret i1 [[T5]]
24 %t0 = sub i32 32, %len
25 %t1 = lshr i32 %x, %t0
26 %t2 = add i32 %len, -1
27 %t2_wide = zext i32 %t2 to i64
28 %t3 = shl i64 %y, %t2_wide
29 %t3_trunc = trunc i64 %t3 to i32
30 %t4 = and i32 %t1, %t3_trunc
31 %t5 = icmp ne i32 %t4, 0
35 ;-------------------------------------------------------------------------------
36 ; Very basic vector tests
37 ;-------------------------------------------------------------------------------
39 define <2 x i1> @t1_vec_splat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
40 ; CHECK-LABEL: @t1_vec_splat(
41 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], splat (i32 31)
42 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
43 ; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[Y:%.*]], [[TMP2]]
44 ; CHECK-NEXT: [[T5:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
45 ; CHECK-NEXT: ret <2 x i1> [[T5]]
47 %t0 = sub <2 x i32> <i32 32, i32 32>, %len
48 %t1 = lshr <2 x i32> %x, %t0
49 %t2 = add <2 x i32> %len, <i32 -1, i32 -1>
50 %t2_wide = zext <2 x i32> %t2 to <2 x i64>
51 %t3 = shl <2 x i64> %y, %t2_wide
52 %t3_trunc = trunc <2 x i64> %t3 to <2 x i32>
53 %t4 = and <2 x i32> %t1, %t3_trunc
54 %t5 = icmp ne <2 x i32> %t4, <i32 0, i32 0>
58 define <2 x i1> @t2_vec_nonsplat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
59 ; CHECK-LABEL: @t2_vec_nonsplat(
60 ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64>
61 ; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i64> [[TMP1]], <i64 31, i64 30>
62 ; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]]
63 ; CHECK-NEXT: [[T5:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
64 ; CHECK-NEXT: ret <2 x i1> [[T5]]
66 %t0 = sub <2 x i32> <i32 30, i32 32>, %len
67 %t1 = lshr <2 x i32> %x, %t0
68 %t2 = add <2 x i32> %len, <i32 1, i32 -2>
69 %t2_wide = zext <2 x i32> %t2 to <2 x i64>
70 %t3 = shl <2 x i64> %y, %t2_wide
71 %t3_trunc = trunc <2 x i64> %t3 to <2 x i32>
72 %t4 = and <2 x i32> %t1, %t3_trunc
73 %t5 = icmp ne <2 x i32> %t4, <i32 0, i32 0>
77 ;-------------------------------------------------------------------------------
79 ;-------------------------------------------------------------------------------
84 ; While 'and' is commutative, the 'trunc' *always* seems to be getting
85 ; canonicalized to the RHS, it does not seem possible to prevent that.
87 ;-------------------------------------------------------------------------------
89 ;-------------------------------------------------------------------------------
91 declare void @use32(i32)
92 declare void @use64(i64)
94 ; Nope, everything has extra uses.
95 define i1 @t3_oneuse0(i32 %x, i64 %y, i32 %len) {
96 ; CHECK-LABEL: @t3_oneuse0(
97 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
98 ; CHECK-NEXT: call void @use32(i32 [[T0]])
99 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
100 ; CHECK-NEXT: call void @use32(i32 [[T1]])
101 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
102 ; CHECK-NEXT: call void @use32(i32 [[T2]])
103 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
104 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
105 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
106 ; CHECK-NEXT: call void @use64(i64 [[T3]])
107 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
108 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
109 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
110 ; CHECK-NEXT: call void @use32(i32 [[T4]])
111 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i32 [[T4]], 0
112 ; CHECK-NEXT: ret i1 [[T5]]
114 %t0 = sub i32 32, %len
115 call void @use32(i32 %t0)
116 %t1 = lshr i32 %x, %t0
117 call void @use32(i32 %t1)
118 %t2 = add i32 %len, -1
119 call void @use32(i32 %t2)
120 %t2_wide = zext i32 %t2 to i64
121 call void @use64(i64 %t2_wide)
122 %t3 = shl i64 %y, %t2_wide
123 call void @use64(i64 %t3)
124 %t3_trunc = trunc i64 %t3 to i32
125 call void @use32(i32 %t3_trunc)
126 %t4 = and i32 %t1, %t3_trunc
127 call void @use32(i32 %t4)
128 %t5 = icmp ne i32 %t4, 0
132 ; Nope, still too much extra uses.
133 define i1 @t4_oneuse1(i32 %x, i64 %y, i32 %len) {
134 ; CHECK-LABEL: @t4_oneuse1(
135 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
136 ; CHECK-NEXT: call void @use32(i32 [[T0]])
137 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
138 ; CHECK-NEXT: call void @use32(i32 [[T1]])
139 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
140 ; CHECK-NEXT: call void @use32(i32 [[T2]])
141 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
142 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
143 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
144 ; CHECK-NEXT: call void @use64(i64 [[T3]])
145 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
146 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
147 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
148 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i32 [[T4]], 0
149 ; CHECK-NEXT: ret i1 [[T5]]
151 %t0 = sub i32 32, %len
152 call void @use32(i32 %t0)
153 %t1 = lshr i32 %x, %t0
154 call void @use32(i32 %t1)
155 %t2 = add i32 %len, -1
156 call void @use32(i32 %t2)
157 %t2_wide = zext i32 %t2 to i64
158 call void @use64(i64 %t2_wide)
159 %t3 = shl i64 %y, %t2_wide
160 call void @use64(i64 %t3)
161 %t3_trunc = trunc i64 %t3 to i32
162 call void @use32(i32 %t3_trunc)
163 %t4 = and i32 %t1, %t3_trunc ; no extra uses
164 %t5 = icmp ne i32 %t4, 0
168 ; Still too much extra uses.
169 define i1 @t5_oneuse2(i32 %x, i64 %y, i32 %len) {
170 ; CHECK-LABEL: @t5_oneuse2(
171 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
172 ; CHECK-NEXT: call void @use32(i32 [[T0]])
173 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
174 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
175 ; CHECK-NEXT: call void @use32(i32 [[T2]])
176 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
177 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
178 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
179 ; CHECK-NEXT: call void @use64(i64 [[T3]])
180 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
181 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
182 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
183 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i32 [[T4]], 0
184 ; CHECK-NEXT: ret i1 [[T5]]
186 %t0 = sub i32 32, %len
187 call void @use32(i32 %t0)
188 %t1 = lshr i32 %x, %t0 ; no extra uses
189 %t2 = add i32 %len, -1
190 call void @use32(i32 %t2)
191 %t2_wide = zext i32 %t2 to i64
192 call void @use64(i64 %t2_wide)
193 %t3 = shl i64 %y, %t2_wide
194 call void @use64(i64 %t3)
195 %t3_trunc = trunc i64 %t3 to i32
196 call void @use32(i32 %t3_trunc)
197 %t4 = and i32 %t1, %t3_trunc ; no extra uses
198 %t5 = icmp ne i32 %t4, 0
202 ; Ok, trunc has no extra uses.
203 define i1 @t6_oneuse3(i32 %x, i64 %y, i32 %len) {
204 ; CHECK-LABEL: @t6_oneuse3(
205 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
206 ; CHECK-NEXT: call void @use32(i32 [[T0]])
207 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
208 ; CHECK-NEXT: call void @use32(i32 [[T2]])
209 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
210 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
211 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
212 ; CHECK-NEXT: call void @use64(i64 [[T3]])
213 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
214 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
215 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]]
216 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
217 ; CHECK-NEXT: ret i1 [[T5]]
219 %t0 = sub i32 32, %len
220 call void @use32(i32 %t0)
221 %t1 = lshr i32 %x, %t0 ; no extra uses
222 %t2 = add i32 %len, -1
223 call void @use32(i32 %t2)
224 %t2_wide = zext i32 %t2 to i64
225 call void @use64(i64 %t2_wide)
226 %t3 = shl i64 %y, %t2_wide
227 call void @use64(i64 %t3)
228 %t3_trunc = trunc i64 %t3 to i32 ; no extra uses
229 %t4 = and i32 %t1, %t3_trunc ; no extra uses
230 %t5 = icmp ne i32 %t4, 0
234 ; Ok, shift amount of non-truncated shift has no extra uses;
235 define i1 @t7_oneuse4(i32 %x, i64 %y, i32 %len) {
236 ; CHECK-LABEL: @t7_oneuse4(
237 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN:%.*]], -1
238 ; CHECK-NEXT: call void @use32(i32 [[T2]])
239 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
240 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
241 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
242 ; CHECK-NEXT: call void @use64(i64 [[T3]])
243 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
244 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
245 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
246 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
247 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]]
248 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
249 ; CHECK-NEXT: ret i1 [[T5]]
251 %t0 = sub i32 32, %len ; no extra uses
252 %t1 = lshr i32 %x, %t0 ; no extra uses
253 %t2 = add i32 %len, -1
254 call void @use32(i32 %t2)
255 %t2_wide = zext i32 %t2 to i64
256 call void @use64(i64 %t2_wide)
257 %t3 = shl i64 %y, %t2_wide
258 call void @use64(i64 %t3)
259 %t3_trunc = trunc i64 %t3 to i32
260 call void @use32(i32 %t3_trunc)
261 %t4 = and i32 %t1, %t3_trunc ; no extra uses
262 %t5 = icmp ne i32 %t4, 0
266 ; Ok, non-truncated shift is of constant;
267 define i1 @t8_oneuse5(i32 %x, i64 %y, i32 %len) {
268 ; CHECK-LABEL: @t8_oneuse5(
269 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
270 ; CHECK-NEXT: call void @use32(i32 [[T0]])
271 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 -52543054, [[T0]]
272 ; CHECK-NEXT: call void @use32(i32 [[T1]])
273 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
274 ; CHECK-NEXT: call void @use32(i32 [[T2]])
275 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
276 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
277 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
278 ; CHECK-NEXT: call void @use64(i64 [[T3]])
279 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
280 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
281 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[Y]], 1
282 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP1]], 0
283 ; CHECK-NEXT: ret i1 [[T5]]
285 %t0 = sub i32 32, %len
286 call void @use32(i32 %t0)
287 %t1 = lshr i32 4242424242, %t0 ; shift-of-constant
288 call void @use32(i32 %t1)
289 %t2 = add i32 %len, -1
290 call void @use32(i32 %t2)
291 %t2_wide = zext i32 %t2 to i64
292 call void @use64(i64 %t2_wide)
293 %t3 = shl i64 %y, %t2_wide
294 call void @use64(i64 %t3)
295 %t3_trunc = trunc i64 %t3 to i32
296 call void @use32(i32 %t3_trunc)
297 %t4 = and i32 %t1, %t3_trunc ; no extra uses
298 %t5 = icmp ne i32 %t4, 0
302 ; Ok, truncated shift is of constant;
303 define i1 @t9_oneuse5(i32 %x, i64 %y, i32 %len) {
304 ; CHECK-LABEL: @t9_oneuse5(
305 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
306 ; CHECK-NEXT: call void @use32(i32 [[T0]])
307 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
308 ; CHECK-NEXT: call void @use32(i32 [[T1]])
309 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
310 ; CHECK-NEXT: call void @use32(i32 [[T2]])
311 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext i32 [[T2]] to i64
312 ; CHECK-NEXT: call void @use64(i64 [[T2_WIDE]])
313 ; CHECK-NEXT: [[T3:%.*]] = shl i64 4242424242, [[T2_WIDE]]
314 ; CHECK-NEXT: call void @use64(i64 [[T3]])
315 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
316 ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]])
317 ; CHECK-NEXT: ret i1 false
319 %t0 = sub i32 32, %len
320 call void @use32(i32 %t0)
321 %t1 = lshr i32 %x, %t0 ; shift-of-constant
322 call void @use32(i32 %t1)
323 %t2 = add i32 %len, -1
324 call void @use32(i32 %t2)
325 %t2_wide = zext i32 %t2 to i64
326 call void @use64(i64 %t2_wide)
327 %t3 = shl i64 4242424242, %t2_wide
328 call void @use64(i64 %t3)
329 %t3_trunc = trunc i64 %t3 to i32
330 call void @use32(i32 %t3_trunc)
331 %t4 = and i32 %t1, %t3_trunc ; no extra uses
332 %t5 = icmp ne i32 %t4, 0
336 ;-------------------------------------------------------------------------------
337 ; Commutativity with extra uses
338 ;-------------------------------------------------------------------------------
340 ; While 'and' is commutative, the 'trunc' *always* seems to be getting
341 ; canonicalized to the RHS, it does not seem possible to prevent that.
345 ; Constant shift amounts
347 define i1 @t10_constants(i32 %x, i64 %y) {
348 ; CHECK-LABEL: @t10_constants(
349 ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i64 [[Y:%.*]] to i32
350 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 26
351 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[Y_TR]]
352 ; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 [[TMP2]], 0
353 ; CHECK-NEXT: ret i1 [[T3]]
355 %t0 = lshr i32 %x, 12
357 %t1_trunc = trunc i64 %t1 to i32
358 %t2 = and i32 %t0, %t1_trunc
359 %t3 = icmp ne i32 %t2, 0
363 define <2 x i1> @t11_constants_vec_splat(<2 x i32> %x, <2 x i64> %y) {
364 ; CHECK-LABEL: @t11_constants_vec_splat(
365 ; CHECK-NEXT: [[Y_TR:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
366 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], splat (i32 26)
367 ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[Y_TR]]
368 ; CHECK-NEXT: [[T3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
369 ; CHECK-NEXT: ret <2 x i1> [[T3]]
371 %t0 = lshr <2 x i32> %x, <i32 12, i32 12>
372 %t1 = shl <2 x i64> %y, <i64 14, i64 14>
373 %t1_trunc = trunc <2 x i64> %t1 to <2 x i32>
374 %t2 = and <2 x i32> %t0, %t1_trunc
375 %t3 = icmp ne <2 x i32> %t2, <i32 0, i32 0>
378 define <2 x i1> @t12_constants_vec_nonsplat(<2 x i32> %x, <2 x i64> %y) {
379 ; CHECK-LABEL: @t12_constants_vec_nonsplat(
380 ; CHECK-NEXT: [[Y_TR:%.*]] = trunc <2 x i64> [[Y:%.*]] to <2 x i32>
381 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], splat (i32 28)
382 ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[Y_TR]]
383 ; CHECK-NEXT: [[T3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer
384 ; CHECK-NEXT: ret <2 x i1> [[T3]]
386 %t0 = lshr <2 x i32> %x, <i32 12, i32 14>
387 %t1 = shl <2 x i64> %y, <i64 16, i64 14>
388 %t1_trunc = trunc <2 x i64> %t1 to <2 x i32>
389 %t2 = and <2 x i32> %t0, %t1_trunc
390 %t3 = icmp ne <2 x i32> %t2, <i32 0, i32 0>
394 ;-------------------------------------------------------------------------------
396 ;-------------------------------------------------------------------------------
398 define i1 @n13_overshift(i32 %x, i64 %y, i32 %len) {
399 ; CHECK-LABEL: @n13_overshift(
400 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
401 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
402 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], 32
403 ; CHECK-NEXT: [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64
404 ; CHECK-NEXT: [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
405 ; CHECK-NEXT: [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
406 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
407 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i32 [[T4]], 0
408 ; CHECK-NEXT: ret i1 [[T5]]
410 %t0 = sub i32 32, %len
411 %t1 = lshr i32 %x, %t0
412 %t2 = add i32 %len, 32 ; too much
413 %t2_wide = zext i32 %t2 to i64
414 %t3 = shl i64 %y, %t2_wide
415 %t3_trunc = trunc i64 %t3 to i32
416 %t4 = and i32 %t1, %t3_trunc
417 %t5 = icmp ne i32 %t4, 0
421 define i1 @n14_trunc_of_lshr(i64 %x, i32 %y, i32 %len) {
422 ; CHECK-LABEL: @n14_trunc_of_lshr(
423 ; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
424 ; CHECK-NEXT: [[T0_WIDE:%.*]] = zext nneg i32 [[T0]] to i64
425 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 [[X:%.*]], [[T0_WIDE]]
426 ; CHECK-NEXT: [[T1_TRUNC:%.*]] = trunc i64 [[T1]] to i32
427 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
428 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[Y:%.*]], [[T2]]
429 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T3]], [[T1_TRUNC]]
430 ; CHECK-NEXT: [[T5:%.*]] = icmp ne i32 [[T4]], 0
431 ; CHECK-NEXT: ret i1 [[T5]]
433 %t0 = sub i32 32, %len
434 %t0_wide = zext i32 %t0 to i64
435 %t1 = lshr i64 %x, %t0_wide
436 %t1_trunc = trunc i64 %t1 to i32
437 %t2 = add i32 %len, -1
438 %t3 = shl i32 %y, %t2
439 %t4 = and i32 %t1_trunc, %t3
440 %t5 = icmp ne i32 %t4, 0
444 ; Completely variable shift amounts
446 define i1 @n15_variable_shamts(i32 %x, i64 %y, i32 %shamt0, i64 %shamt1) {
447 ; CHECK-LABEL: @n15_variable_shamts(
448 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 [[X:%.*]], [[SHAMT0:%.*]]
449 ; CHECK-NEXT: [[T1:%.*]] = shl i64 [[Y:%.*]], [[SHAMT1:%.*]]
450 ; CHECK-NEXT: [[T1_TRUNC:%.*]] = trunc i64 [[T1]] to i32
451 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[T1_TRUNC]]
452 ; CHECK-NEXT: [[T3:%.*]] = icmp ne i32 [[T2]], 0
453 ; CHECK-NEXT: ret i1 [[T3]]
455 %t0 = lshr i32 %x, %shamt0
456 %t1 = shl i64 %y, %shamt1
457 %t1_trunc = trunc i64 %t1 to i32
458 %t2 = and i32 %t1_trunc, %t0
459 %t3 = icmp ne i32 %t2, 0