1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 define i1 @and_consts(i32 %k, i32 %c1, i32 %c2) {
5 ; CHECK-LABEL: @and_consts(
6 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[K:%.*]], 12
7 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 12
8 ; CHECK-NEXT: ret i1 [[TMP2]]
11 %t2 = icmp eq i32 %t1, 0
13 %t6 = icmp eq i32 %t5, 0
18 define i1 @foo1_and(i32 %k, i32 %c1, i32 %c2) {
19 ; CHECK-LABEL: @foo1_and(
20 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
21 ; CHECK-NEXT: [[T4:%.*]] = shl i32 1, [[C2:%.*]]
22 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
23 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
24 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
25 ; CHECK-NEXT: ret i1 [[TMP3]]
30 %t2 = icmp eq i32 %t1, 0
32 %t6 = icmp eq i32 %t5, 0
37 ; Same as above but with operands commuted one of the ands, but not the other.
38 define i1 @foo1_and_commuted(i32 %k, i32 %c1, i32 %c2) {
39 ; CHECK-LABEL: @foo1_and_commuted(
40 ; CHECK-NEXT: [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
41 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
42 ; CHECK-NEXT: [[T4:%.*]] = shl i32 1, [[C2:%.*]]
43 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
44 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K2]], [[TMP1]]
45 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
46 ; CHECK-NEXT: ret i1 [[TMP3]]
48 %k2 = mul i32 %k, %k ; to trick the complexity sorting
52 %t2 = icmp eq i32 %t1, 0
53 %t5 = and i32 %t4, %k2
54 %t6 = icmp eq i32 %t5, 0
59 define i1 @or_consts(i32 %k, i32 %c1, i32 %c2) {
60 ; CHECK-LABEL: @or_consts(
61 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[K:%.*]], 12
62 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 12
63 ; CHECK-NEXT: ret i1 [[TMP2]]
66 %t2 = icmp ne i32 %t1, 0
68 %t6 = icmp ne i32 %t5, 0
73 define i1 @foo1_or(i32 %k, i32 %c1, i32 %c2) {
74 ; CHECK-LABEL: @foo1_or(
75 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
76 ; CHECK-NEXT: [[T4:%.*]] = shl i32 1, [[C2:%.*]]
77 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
78 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
79 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
80 ; CHECK-NEXT: ret i1 [[TMP3]]
85 %t2 = icmp ne i32 %t1, 0
87 %t6 = icmp ne i32 %t5, 0
92 ; Same as above but with operands commuted one of the ors, but not the other.
93 define i1 @foo1_or_commuted(i32 %k, i32 %c1, i32 %c2) {
94 ; CHECK-LABEL: @foo1_or_commuted(
95 ; CHECK-NEXT: [[K2:%.*]] = mul i32 [[K:%.*]], [[K]]
96 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
97 ; CHECK-NEXT: [[T4:%.*]] = shl i32 1, [[C2:%.*]]
98 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
99 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K2]], [[TMP1]]
100 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
101 ; CHECK-NEXT: ret i1 [[TMP3]]
103 %k2 = mul i32 %k, %k ; to trick the complexity sorting
106 %t1 = and i32 %k2, %t
107 %t2 = icmp ne i32 %t1, 0
108 %t5 = and i32 %t4, %k2
109 %t6 = icmp ne i32 %t5, 0
110 %or = and i1 %t2, %t6
114 define i1 @foo1_and_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
115 ; CHECK-LABEL: @foo1_and_signbit_lshr(
116 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
117 ; CHECK-NEXT: [[T4:%.*]] = lshr i32 -2147483648, [[C2:%.*]]
118 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
119 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
120 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
121 ; CHECK-NEXT: ret i1 [[TMP3]]
124 %t4 = lshr i32 -2147483648, %c2
126 %t2 = icmp eq i32 %t1, 0
127 %t5 = and i32 %t4, %k
128 %t6 = icmp eq i32 %t5, 0
133 define i1 @foo1_or_signbit_lshr(i32 %k, i32 %c1, i32 %c2) {
134 ; CHECK-LABEL: @foo1_or_signbit_lshr(
135 ; CHECK-NEXT: [[T:%.*]] = shl i32 1, [[C1:%.*]]
136 ; CHECK-NEXT: [[T4:%.*]] = lshr i32 -2147483648, [[C2:%.*]]
137 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]]
138 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
139 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]]
140 ; CHECK-NEXT: ret i1 [[TMP3]]
143 %t4 = lshr i32 -2147483648, %c2
145 %t2 = icmp ne i32 %t1, 0
146 %t5 = and i32 %t4, %k
147 %t6 = icmp ne i32 %t5, 0
148 %or = and i1 %t2, %t6
152 ; Same as last two, but shift-of-signbit replaced with 'icmp s*'
153 define i1 @foo1_and_signbit_lshr_without_shifting_signbit(i32 %k, i32 %c1, i32 %c2) {
154 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit(
155 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
156 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
157 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
158 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
159 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
160 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
161 ; CHECK-NEXT: ret i1 [[OR]]
164 %t1 = and i32 %t0, %k
165 %t2 = icmp eq i32 %t1, 0
166 %t3 = shl i32 %k, %c2
167 %t4 = icmp sgt i32 %t3, -1
172 define i1 @foo1_or_signbit_lshr_without_shifting_signbit(i32 %k, i32 %c1, i32 %c2) {
173 ; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit(
174 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
175 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
176 ; CHECK-NEXT: [[T2:%.*]] = icmp ne i32 [[T1]], 0
177 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
178 ; CHECK-NEXT: [[T4:%.*]] = icmp slt i32 [[T3]], 0
179 ; CHECK-NEXT: [[OR:%.*]] = and i1 [[T2]], [[T4]]
180 ; CHECK-NEXT: ret i1 [[OR]]
183 %t1 = and i32 %t0, %k
184 %t2 = icmp ne i32 %t1, 0
185 %t3 = shl i32 %k, %c2
186 %t4 = icmp slt i32 %t3, 0
187 %or = and i1 %t2, %t4
191 ; Shift-of-signbit replaced with 'icmp s*' for both sides
192 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_both_sides(i32 %k, i32 %c1, i32 %c2) {
193 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_both_sides(
194 ; CHECK-NEXT: [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
195 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
196 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[T0]], [[T2]]
197 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1
198 ; CHECK-NEXT: ret i1 [[TMP2]]
200 %t0 = shl i32 %k, %c1
201 %t1 = icmp sgt i32 %t0, -1
202 %t2 = shl i32 %k, %c2
203 %t3 = icmp sgt i32 %t2, -1
208 define i1 @foo1_or_signbit_lshr_without_shifting_signbit_both_sides(i32 %k, i32 %c1, i32 %c2) {
209 ; CHECK-LABEL: @foo1_or_signbit_lshr_without_shifting_signbit_both_sides(
210 ; CHECK-NEXT: [[T0:%.*]] = shl i32 [[K:%.*]], [[C1:%.*]]
211 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[K]], [[C2:%.*]]
212 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[T0]], [[T2]]
213 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
214 ; CHECK-NEXT: ret i1 [[TMP2]]
216 %t0 = shl i32 %k, %c1
217 %t1 = icmp slt i32 %t0, 0
218 %t2 = shl i32 %k, %c2
219 %t3 = icmp slt i32 %t2, 0
220 %or = and i1 %t1, %t3
227 define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, i32* %p) {
228 ; CHECK-LABEL: @foo1_and_extra_use_shl(
229 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
230 ; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
231 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
232 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
233 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
234 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
235 ; CHECK-NEXT: ret i1 [[TMP3]]
238 store i32 %t0, i32* %p ; extra use of shl
240 %t2 = and i32 %t0, %k
241 %t3 = icmp eq i32 %t2, 0
242 %t4 = and i32 %t1, %k
243 %t5 = icmp eq i32 %t4, 0
249 define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, i32* %p) {
250 ; CHECK-LABEL: @foo1_and_extra_use_and(
251 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
252 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
253 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
254 ; CHECK-NEXT: store i32 [[T2]], i32* [[P:%.*]], align 4
255 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
256 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
257 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
258 ; CHECK-NEXT: ret i1 [[TMP3]]
262 %t2 = and i32 %t0, %k
263 store i32 %t2, i32* %p ; extra use of and
264 %t3 = icmp eq i32 %t2, 0
265 %t4 = and i32 %t1, %k
266 %t5 = icmp eq i32 %t4, 0
272 define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, i1* %p) {
273 ; CHECK-LABEL: @foo1_and_extra_use_cmp(
274 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
275 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
276 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]]
277 ; CHECK-NEXT: [[T3:%.*]] = icmp eq i32 [[T2]], 0
278 ; CHECK-NEXT: store i1 [[T3]], i1* [[P:%.*]], align 1
279 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
280 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
281 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
282 ; CHECK-NEXT: ret i1 [[TMP3]]
286 %t2 = and i32 %t0, %k
287 %t3 = icmp eq i32 %t2, 0
288 store i1 %t3, i1* %p ; extra use of cmp
289 %t4 = and i32 %t1, %k
290 %t5 = icmp eq i32 %t4, 0
296 define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
297 ; CHECK-LABEL: @foo1_and_extra_use_shl2(
298 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
299 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
300 ; CHECK-NEXT: store i32 [[T1]], i32* [[P:%.*]], align 4
301 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
302 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]]
303 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
304 ; CHECK-NEXT: ret i1 [[TMP3]]
308 store i32 %t1, i32* %p ; extra use of shl
309 %t2 = and i32 %t0, %k
310 %t3 = icmp eq i32 %t2, 0
311 %t4 = and i32 %t1, %k
312 %t5 = icmp eq i32 %t4, 0
318 define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
319 ; CHECK-LABEL: @foo1_and_extra_use_and2(
320 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
321 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
322 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
323 ; CHECK-NEXT: store i32 [[T4]], i32* [[P:%.*]], align 4
324 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
325 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
326 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
327 ; CHECK-NEXT: ret i1 [[TMP3]]
331 %t2 = and i32 %t0, %k
332 %t3 = icmp eq i32 %t2, 0
333 %t4 = and i32 %t1, %k
334 store i32 %t4, i32* %p ; extra use of and
335 %t5 = icmp eq i32 %t4, 0
341 define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, i1* %p) {
342 ; CHECK-LABEL: @foo1_and_extra_use_cmp2(
343 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
344 ; CHECK-NEXT: [[T1:%.*]] = shl i32 1, [[C2:%.*]]
345 ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]]
346 ; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0
347 ; CHECK-NEXT: store i1 [[T5]], i1* [[P:%.*]], align 1
348 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]]
349 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]]
350 ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]]
351 ; CHECK-NEXT: ret i1 [[TMP3]]
355 %t2 = and i32 %t0, %k
356 %t3 = icmp eq i32 %t2, 0
357 %t4 = and i32 %t1, %k
358 %t5 = icmp eq i32 %t4, 0
359 store i1 %t5, i1* %p ; extra use of cmp
364 ; Shift-of-signbit replaced with 'icmp s*'
366 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(i32 %k, i32 %c1, i32 %c2, i32* %p) {
367 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl1(
368 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
369 ; CHECK-NEXT: store i32 [[T0]], i32* [[P:%.*]], align 4
370 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
371 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
372 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
373 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
374 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
375 ; CHECK-NEXT: ret i1 [[OR]]
378 store i32 %t0, i32* %p ; extra use of shl
379 %t1 = and i32 %t0, %k
380 %t2 = icmp eq i32 %t1, 0
381 %t3 = shl i32 %k, %c2
382 %t4 = icmp sgt i32 %t3, -1
388 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(i32 %k, i32 %c1, i32 %c2, i32* %p) {
389 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_and(
390 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
391 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
392 ; CHECK-NEXT: store i32 [[T1]], i32* [[P:%.*]], align 4
393 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
394 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
395 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
396 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
397 ; CHECK-NEXT: ret i1 [[OR]]
400 %t1 = and i32 %t0, %k
401 store i32 %t1, i32* %p ; extra use of and
402 %t2 = icmp eq i32 %t1, 0
403 %t3 = shl i32 %k, %c2
404 %t4 = icmp sgt i32 %t3, -1
410 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(i32 %k, i32 %c1, i32 %c2, i1* %p) {
411 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp1(
412 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
413 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
414 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
415 ; CHECK-NEXT: store i1 [[T2]], i1* [[P:%.*]], align 1
416 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
417 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
418 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
419 ; CHECK-NEXT: ret i1 [[OR]]
422 %t1 = and i32 %t0, %k
423 %t2 = icmp eq i32 %t1, 0
424 store i1 %t2, i1* %p ; extra use of cmp
425 %t3 = shl i32 %k, %c2
426 %t4 = icmp sgt i32 %t3, -1
432 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, i32* %p) {
433 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_shl2(
434 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
435 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
436 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
437 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
438 ; CHECK-NEXT: store i32 [[T3]], i32* [[P:%.*]], align 4
439 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
440 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
441 ; CHECK-NEXT: ret i1 [[OR]]
444 %t1 = and i32 %t0, %k
445 %t2 = icmp eq i32 %t1, 0
446 %t3 = shl i32 %k, %c2
447 store i32 %t3, i32* %p ; extra use of shl
448 %t4 = icmp sgt i32 %t3, -1
454 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, i1* %p) {
455 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_extra_use_cmp2(
456 ; CHECK-NEXT: [[T0:%.*]] = shl i32 1, [[C1:%.*]]
457 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
458 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
459 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
460 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
461 ; CHECK-NEXT: store i1 [[T4]], i1* [[P:%.*]], align 1
462 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
463 ; CHECK-NEXT: ret i1 [[OR]]
466 %t1 = and i32 %t0, %k
467 %t2 = icmp eq i32 %t1, 0
468 %t3 = shl i32 %k, %c2
469 %t4 = icmp sgt i32 %t3, -1
470 store i1 %t4, i1* %p ; extra use of cmp
477 ; This test checks that we are not creating additional shift instruction when fold fails.
478 define i1 @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2(i32 %k, i32 %c1, i32 %c2) {
479 ; CHECK-LABEL: @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2(
480 ; CHECK-NEXT: [[T0:%.*]] = shl i32 3, [[C1:%.*]]
481 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[K:%.*]]
482 ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[T1]], 0
483 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[K]], [[C2:%.*]]
484 ; CHECK-NEXT: [[T4:%.*]] = icmp sgt i32 [[T3]], -1
485 ; CHECK-NEXT: [[OR:%.*]] = or i1 [[T2]], [[T4]]
486 ; CHECK-NEXT: ret i1 [[OR]]
489 %t1 = and i32 %t0, %k
490 %t2 = icmp eq i32 %t1, 0
491 %t3 = shl i32 %k, %c2
492 %t4 = icmp sgt i32 %t3, -1