1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
8 define i1 @test1(i32 %a) {
10 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[A:%.*]], -5
11 ; CHECK-NEXT: ret i1 [[C]]
14 %c = icmp ult i32 %b, 4
18 define <2 x i1> @test1vec(<2 x i32> %a) {
19 ; CHECK-LABEL: @test1vec(
20 ; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> [[A:%.*]], <i32 -5, i32 -5>
21 ; CHECK-NEXT: ret <2 x i1> [[C]]
23 %b = add <2 x i32> %a, <i32 4, i32 4>
24 %c = icmp ult <2 x i32> %b, <i32 4, i32 4>
28 define i1 @test2(i32 %a) {
29 ; CHECK-LABEL: @test2(
30 ; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[A:%.*]], 4
31 ; CHECK-NEXT: ret i1 [[C]]
34 %c = icmp ugt i32 %b, -5
38 define <2 x i1> @test2vec(<2 x i32> %a) {
39 ; CHECK-LABEL: @test2vec(
40 ; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i32> [[A:%.*]], <i32 4, i32 4>
41 ; CHECK-NEXT: ret <2 x i1> [[C]]
43 %b = sub <2 x i32> %a, <i32 4, i32 4>
44 %c = icmp ugt <2 x i32> %b, <i32 -5, i32 -5>
48 define i1 @test3(i32 %a) {
49 ; CHECK-LABEL: @test3(
50 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A:%.*]], 2147483643
51 ; CHECK-NEXT: ret i1 [[C]]
54 %c = icmp slt i32 %b, 2147483652
58 define <2 x i1> @test3vec(<2 x i32> %a) {
59 ; CHECK-LABEL: @test3vec(
60 ; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i32> [[A:%.*]], <i32 2147483643, i32 2147483643>
61 ; CHECK-NEXT: ret <2 x i1> [[C]]
63 %b = add <2 x i32> %a, <i32 4, i32 4>
64 %c = icmp slt <2 x i32> %b, <i32 2147483652, i32 2147483652>
68 define i1 @test4(i32 %a) {
69 ; CHECK-LABEL: @test4(
70 ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[A:%.*]], -4
71 ; CHECK-NEXT: ret i1 [[C]]
73 %b = add i32 %a, 2147483652
74 %c = icmp sge i32 %b, 4
78 define { i32, i1 } @test4multiuse(i32 %a) {
79 ; CHECK-LABEL: @test4multiuse(
80 ; CHECK-NEXT: [[B:%.*]] = add i32 [[A:%.*]], -2147483644
81 ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[B]], -4
82 ; CHECK-NEXT: [[TMP:%.*]] = insertvalue { i32, i1 } undef, i32 [[B]], 0
83 ; CHECK-NEXT: [[RES:%.*]] = insertvalue { i32, i1 } [[TMP]], i1 [[C]], 1
84 ; CHECK-NEXT: ret { i32, i1 } [[RES]]
87 %b = add i32 %a, -2147483644
88 %c = icmp slt i32 %b, -4
90 %tmp = insertvalue { i32, i1 } undef, i32 %b, 0
91 %res = insertvalue { i32, i1 } %tmp, i1 %c, 1
96 define <2 x i1> @test4vec(<2 x i32> %a) {
97 ; CHECK-LABEL: @test4vec(
98 ; CHECK-NEXT: [[C:%.*]] = icmp slt <2 x i32> [[A:%.*]], <i32 -4, i32 -4>
99 ; CHECK-NEXT: ret <2 x i1> [[C]]
101 %b = add <2 x i32> %a, <i32 2147483652, i32 2147483652>
102 %c = icmp sge <2 x i32> %b, <i32 4, i32 4>
106 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
107 ; This becomes equality because it's at the limit.
109 define i1 @nsw_slt1(i8 %a) {
110 ; CHECK-LABEL: @nsw_slt1(
111 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], -128
112 ; CHECK-NEXT: ret i1 [[C]]
114 %b = add nsw i8 %a, 100
115 %c = icmp slt i8 %b, -27
119 define <2 x i1> @nsw_slt1_splat_vec(<2 x i8> %a) {
120 ; CHECK-LABEL: @nsw_slt1_splat_vec(
121 ; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], <i8 -128, i8 -128>
122 ; CHECK-NEXT: ret <2 x i1> [[C]]
124 %b = add nsw <2 x i8> %a, <i8 100, i8 100>
125 %c = icmp slt <2 x i8> %b, <i8 -27, i8 -27>
129 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
130 ; This becomes equality because it's at the limit.
132 define i1 @nsw_slt2(i8 %a) {
133 ; CHECK-LABEL: @nsw_slt2(
134 ; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A:%.*]], 127
135 ; CHECK-NEXT: ret i1 [[C]]
137 %b = add nsw i8 %a, -100
138 %c = icmp slt i8 %b, 27
142 define <2 x i1> @nsw_slt2_splat_vec(<2 x i8> %a) {
143 ; CHECK-LABEL: @nsw_slt2_splat_vec(
144 ; CHECK-NEXT: [[C:%.*]] = icmp ne <2 x i8> [[A:%.*]], <i8 127, i8 127>
145 ; CHECK-NEXT: ret <2 x i1> [[C]]
147 %b = add nsw <2 x i8> %a, <i8 -100, i8 -100>
148 %c = icmp slt <2 x i8> %b, <i8 27, i8 27>
152 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
153 ; Less than the limit, so the predicate doesn't change.
155 define i1 @nsw_slt3(i8 %a) {
156 ; CHECK-LABEL: @nsw_slt3(
157 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[A:%.*]], -126
158 ; CHECK-NEXT: ret i1 [[C]]
160 %b = add nsw i8 %a, 100
161 %c = icmp slt i8 %b, -26
165 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
166 ; Less than the limit, so the predicate doesn't change.
168 define i1 @nsw_slt4(i8 %a) {
169 ; CHECK-LABEL: @nsw_slt4(
170 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[A:%.*]], 126
171 ; CHECK-NEXT: ret i1 [[C]]
173 %b = add nsw i8 %a, -100
174 %c = icmp slt i8 %b, 26
178 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
179 ; Try sgt to make sure that works too.
181 define i1 @nsw_sgt1(i8 %a) {
182 ; CHECK-LABEL: @nsw_sgt1(
183 ; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], 127
184 ; CHECK-NEXT: ret i1 [[C]]
186 %b = add nsw i8 %a, -100
187 %c = icmp sgt i8 %b, 26
191 define <2 x i1> @nsw_sgt1_splat_vec(<2 x i8> %a) {
192 ; CHECK-LABEL: @nsw_sgt1_splat_vec(
193 ; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], <i8 127, i8 127>
194 ; CHECK-NEXT: ret <2 x i1> [[C]]
196 %b = add nsw <2 x i8> %a, <i8 -100, i8 -100>
197 %c = icmp sgt <2 x i8> %b, <i8 26, i8 26>
201 define i1 @nsw_sgt2(i8 %a) {
202 ; CHECK-LABEL: @nsw_sgt2(
203 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[A:%.*]], -126
204 ; CHECK-NEXT: ret i1 [[C]]
206 %b = add nsw i8 %a, 100
207 %c = icmp sgt i8 %b, -26
211 define <2 x i1> @nsw_sgt2_splat_vec(<2 x i8> %a) {
212 ; CHECK-LABEL: @nsw_sgt2_splat_vec(
213 ; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i8> [[A:%.*]], <i8 -126, i8 -126>
214 ; CHECK-NEXT: ret <2 x i1> [[C]]
216 %b = add nsw <2 x i8> %a, <i8 100, i8 100>
217 %c = icmp sgt <2 x i8> %b, <i8 -26, i8 -26>
221 ; icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2), when C - C2 does not overflow.
222 ; Comparison with 0 doesn't need special-casing.
224 define i1 @slt_zero_add_nsw(i32 %a) {
225 ; CHECK-LABEL: @slt_zero_add_nsw(
226 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], -1
227 ; CHECK-NEXT: ret i1 [[CMP]]
229 %add = add nsw i32 %a, 1
230 %cmp = icmp slt i32 %add, 0
234 ; The same fold should work with vectors.
236 define <2 x i1> @slt_zero_add_nsw_splat_vec(<2 x i8> %a) {
237 ; CHECK-LABEL: @slt_zero_add_nsw_splat_vec(
238 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[A:%.*]], <i8 -1, i8 -1>
239 ; CHECK-NEXT: ret <2 x i1> [[CMP]]
241 %add = add nsw <2 x i8> %a, <i8 1, i8 1>
242 %cmp = icmp slt <2 x i8> %add, zeroinitializer
246 ; Test the edges - instcombine should not interfere with simplification to constants.
247 ; Constant subtraction does not overflow, but this is false.
249 define i1 @nsw_slt3_ov_no(i8 %a) {
250 ; CHECK-LABEL: @nsw_slt3_ov_no(
251 ; CHECK-NEXT: ret i1 false
253 %b = add nsw i8 %a, 100
254 %c = icmp slt i8 %b, -28
258 ; Test the edges - instcombine should not interfere with simplification to constants.
259 ; Constant subtraction overflows. This is false.
261 define i1 @nsw_slt4_ov(i8 %a) {
262 ; CHECK-LABEL: @nsw_slt4_ov(
263 ; CHECK-NEXT: ret i1 false
265 %b = add nsw i8 %a, 100
266 %c = icmp slt i8 %b, -29
270 ; Test the edges - instcombine should not interfere with simplification to constants.
271 ; Constant subtraction overflows. This is true.
273 define i1 @nsw_slt5_ov(i8 %a) {
274 ; CHECK-LABEL: @nsw_slt5_ov(
275 ; CHECK-NEXT: ret i1 true
277 %b = add nsw i8 %a, -100
278 %c = icmp slt i8 %b, 28
282 ; InstCombine should not thwart this opportunity to simplify completely.
284 define i1 @slt_zero_add_nsw_signbit(i8 %x) {
285 ; CHECK-LABEL: @slt_zero_add_nsw_signbit(
286 ; CHECK-NEXT: ret i1 true
288 %y = add nsw i8 %x, -128
289 %z = icmp slt i8 %y, 0
293 ; InstCombine should not thwart this opportunity to simplify completely.
295 define i1 @slt_zero_add_nuw_signbit(i8 %x) {
296 ; CHECK-LABEL: @slt_zero_add_nuw_signbit(
297 ; CHECK-NEXT: ret i1 true
299 %y = add nuw i8 %x, 128
300 %z = icmp slt i8 %y, 0
304 define i1 @reduce_add_ult(i32 %in) {
305 ; CHECK-LABEL: @reduce_add_ult(
306 ; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 9
307 ; CHECK-NEXT: ret i1 [[A18]]
309 %a6 = add nuw i32 %in, 3
310 %a18 = icmp ult i32 %a6, 12
314 define i1 @reduce_add_ugt(i32 %in) {
315 ; CHECK-LABEL: @reduce_add_ugt(
316 ; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 9
317 ; CHECK-NEXT: ret i1 [[A18]]
319 %a6 = add nuw i32 %in, 3
320 %a18 = icmp ugt i32 %a6, 12
324 define i1 @reduce_add_ule(i32 %in) {
325 ; CHECK-LABEL: @reduce_add_ule(
326 ; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 10
327 ; CHECK-NEXT: ret i1 [[A18]]
329 %a6 = add nuw i32 %in, 3
330 %a18 = icmp ule i32 %a6, 12
334 define i1 @reduce_add_uge(i32 %in) {
335 ; CHECK-LABEL: @reduce_add_uge(
336 ; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 8
337 ; CHECK-NEXT: ret i1 [[A18]]
339 %a6 = add nuw i32 %in, 3
340 %a18 = icmp uge i32 %a6, 12
344 define i1 @ult_add_ssubov(i32 %in) {
345 ; CHECK-LABEL: @ult_add_ssubov(
346 ; CHECK-NEXT: ret i1 false
348 %a6 = add nuw i32 %in, 71
349 %a18 = icmp ult i32 %a6, 3
353 define i1 @ult_add_nonuw(i8 %in) {
354 ; CHECK-LABEL: @ult_add_nonuw(
355 ; CHECK-NEXT: [[A6:%.*]] = add i8 [[IN:%.*]], 71
356 ; CHECK-NEXT: [[A18:%.*]] = icmp ult i8 [[A6]], 12
357 ; CHECK-NEXT: ret i1 [[A18]]
360 %a18 = icmp ult i8 %a6, 12
364 define i1 @uge_add_nonuw(i32 %in) {
365 ; CHECK-LABEL: @uge_add_nonuw(
366 ; CHECK-NEXT: [[A6:%.*]] = add i32 [[IN:%.*]], 3
367 ; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 11
368 ; CHECK-NEXT: ret i1 [[A18]]
371 %a18 = icmp uge i32 %a6, 12
375 ; Test unsigned add overflow patterns. The div ops are only here to
376 ; thwart complexity based canonicalization of the operand order.
378 define i1 @op_ugt_sum_commute1(i8 %p1, i8 %p2) {
379 ; CHECK-LABEL: @op_ugt_sum_commute1(
380 ; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
381 ; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
382 ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], -1
383 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[Y]], [[TMP1]]
384 ; CHECK-NEXT: ret i1 [[C]]
389 %c = icmp ugt i8 %x, %a
393 define <2 x i1> @op_ugt_sum_vec_commute2(<2 x i8> %p1, <2 x i8> %p2) {
394 ; CHECK-LABEL: @op_ugt_sum_vec_commute2(
395 ; CHECK-NEXT: [[X:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P1:%.*]]
396 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P2:%.*]]
397 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X]], <i8 -1, i8 -1>
398 ; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i8> [[Y]], [[TMP1]]
399 ; CHECK-NEXT: ret <2 x i1> [[C]]
401 %x = sdiv <2 x i8> <i8 42, i8 -42>, %p1
402 %y = sdiv <2 x i8> <i8 42, i8 -42>, %p2
403 %a = add <2 x i8> %y, %x
404 %c = icmp ugt <2 x i8> %x, %a
408 define i1 @sum_ugt_op_uses(i8 %p1, i8 %p2, i8* %p3) {
409 ; CHECK-LABEL: @sum_ugt_op_uses(
410 ; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
411 ; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
412 ; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X]], [[Y]]
413 ; CHECK-NEXT: store i8 [[A]], i8* [[P3:%.*]], align 1
414 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], [[A]]
415 ; CHECK-NEXT: ret i1 [[C]]
421 %c = icmp ugt i8 %x, %a
425 define <2 x i1> @sum_ult_op_vec_commute1(<2 x i8> %p1, <2 x i8> %p2) {
426 ; CHECK-LABEL: @sum_ult_op_vec_commute1(
427 ; CHECK-NEXT: [[X:%.*]] = sdiv <2 x i8> <i8 42, i8 -42>, [[P1:%.*]]
428 ; CHECK-NEXT: [[Y:%.*]] = sdiv <2 x i8> <i8 -42, i8 42>, [[P2:%.*]]
429 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X]], <i8 -1, i8 -1>
430 ; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i8> [[Y]], [[TMP1]]
431 ; CHECK-NEXT: ret <2 x i1> [[C]]
433 %x = sdiv <2 x i8> <i8 42, i8 -42>, %p1
434 %y = sdiv <2 x i8> <i8 -42, i8 42>, %p2
435 %a = add <2 x i8> %x, %y
436 %c = icmp ult <2 x i8> %a, %x
440 define i1 @sum_ult_op_commute2(i8 %p1, i8 %p2) {
441 ; CHECK-LABEL: @sum_ult_op_commute2(
442 ; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
443 ; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
444 ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], -1
445 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[Y]], [[TMP1]]
446 ; CHECK-NEXT: ret i1 [[C]]
451 %c = icmp ult i8 %a, %x
455 define i1 @sum_ult_op_uses(i8 %x, i8 %y, i8* %p) {
456 ; CHECK-LABEL: @sum_ult_op_uses(
457 ; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[X:%.*]]
458 ; CHECK-NEXT: store i8 [[A]], i8* [[P:%.*]], align 1
459 ; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[A]], [[X]]
460 ; CHECK-NEXT: ret i1 [[C]]
464 %c = icmp ult i8 %a, %x
468 ; X + Z >s Y + Z -> X > Y if there is no overflow.
469 define i1 @common_op_nsw(i32 %x, i32 %y, i32 %z) {
470 ; CHECK-LABEL: @common_op_nsw(
471 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
472 ; CHECK-NEXT: ret i1 [[C]]
474 %lhs = add nsw i32 %x, %z
475 %rhs = add nsw i32 %y, %z
476 %c = icmp sgt i32 %lhs, %rhs
480 define i1 @common_op_nsw_extra_uses(i32 %x, i32 %y, i32 %z) {
481 ; CHECK-LABEL: @common_op_nsw_extra_uses(
482 ; CHECK-NEXT: [[LHS:%.*]] = add nsw i32 [[X:%.*]], [[Z:%.*]]
483 ; CHECK-NEXT: call void @use(i32 [[LHS]])
484 ; CHECK-NEXT: [[RHS:%.*]] = add nsw i32 [[Y:%.*]], [[Z]]
485 ; CHECK-NEXT: call void @use(i32 [[RHS]])
486 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[X]], [[Y]]
487 ; CHECK-NEXT: ret i1 [[C]]
489 %lhs = add nsw i32 %x, %z
490 call void @use(i32 %lhs)
491 %rhs = add nsw i32 %y, %z
492 call void @use(i32 %rhs)
493 %c = icmp sgt i32 %lhs, %rhs
497 ; X + Z >u Z + Y -> X > Y if there is no overflow.
498 define i1 @common_op_nuw(i32 %x, i32 %y, i32 %z) {
499 ; CHECK-LABEL: @common_op_nuw(
500 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[Y:%.*]]
501 ; CHECK-NEXT: ret i1 [[C]]
503 %lhs = add nuw i32 %x, %z
504 %rhs = add nuw i32 %z, %y
505 %c = icmp ugt i32 %lhs, %rhs
509 define i1 @common_op_nuw_extra_uses(i32 %x, i32 %y, i32 %z) {
510 ; CHECK-LABEL: @common_op_nuw_extra_uses(
511 ; CHECK-NEXT: [[LHS:%.*]] = add nuw i32 [[X:%.*]], [[Z:%.*]]
512 ; CHECK-NEXT: call void @use(i32 [[LHS]])
513 ; CHECK-NEXT: [[RHS:%.*]] = add nuw i32 [[Z]], [[Y:%.*]]
514 ; CHECK-NEXT: call void @use(i32 [[RHS]])
515 ; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[Y]]
516 ; CHECK-NEXT: ret i1 [[C]]
518 %lhs = add nuw i32 %x, %z
519 call void @use(i32 %lhs)
520 %rhs = add nuw i32 %z, %y
521 call void @use(i32 %rhs)
522 %c = icmp ugt i32 %lhs, %rhs
526 define i1 @common_op_nsw_commute(i32 %x, i32 %y, i32 %z) {
527 ; CHECK-LABEL: @common_op_nsw_commute(
528 ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]]
529 ; CHECK-NEXT: ret i1 [[C]]
531 %lhs = add nsw i32 %z, %x
532 %rhs = add nsw i32 %y, %z
533 %c = icmp slt i32 %lhs, %rhs
537 define i1 @common_op_nuw_commute(i32 %x, i32 %y, i32 %z) {
538 ; CHECK-LABEL: @common_op_nuw_commute(
539 ; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[Y:%.*]]
540 ; CHECK-NEXT: ret i1 [[C]]
542 %lhs = add nuw i32 %z, %x
543 %rhs = add nuw i32 %z, %y
544 %c = icmp ult i32 %lhs, %rhs
548 ; X + Y > X -> Y > 0 if there is no overflow.
549 define i1 @common_op_test29(i32 %x, i32 %y) {
550 ; CHECK-LABEL: @common_op_test29(
551 ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[Y:%.*]], 0
552 ; CHECK-NEXT: ret i1 [[C]]
554 %lhs = add nsw i32 %x, %y
555 %c = icmp sgt i32 %lhs, %x
559 ; X + Y > X -> Y > 0 if there is no overflow.
560 define i1 @sum_nuw(i32 %x, i32 %y) {
561 ; CHECK-LABEL: @sum_nuw(
562 ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[Y:%.*]], 0
563 ; CHECK-NEXT: ret i1 [[C]]
565 %lhs = add nuw i32 %x, %y
566 %c = icmp ugt i32 %lhs, %x
570 ; X > X + Y -> 0 > Y if there is no overflow.
571 define i1 @sum_nsw_commute(i32 %x, i32 %y) {
572 ; CHECK-LABEL: @sum_nsw_commute(
573 ; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[Y:%.*]], 0
574 ; CHECK-NEXT: ret i1 [[C]]
576 %rhs = add nsw i32 %x, %y
577 %c = icmp sgt i32 %x, %rhs
581 ; X > X + Y -> 0 > Y if there is no overflow.
582 define i1 @sum_nuw_commute(i32 %x, i32 %y) {
583 ; CHECK-LABEL: @sum_nuw_commute(
584 ; CHECK-NEXT: ret i1 false
586 %rhs = add nuw i32 %x, %y
587 %c = icmp ugt i32 %x, %rhs
591 ; PR2698 - https://bugs.llvm.org/show_bug.cgi?id=2698
593 declare void @use1(i1)
594 declare void @use8(i8)
596 define void @bzip1(i8 %a, i8 %b, i8 %x) {
597 ; CHECK-LABEL: @bzip1(
598 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[B:%.*]]
599 ; CHECK-NEXT: call void @use1(i1 [[CMP]])
600 ; CHECK-NEXT: ret void
602 %add1 = add i8 %a, %x
603 %add2 = add i8 %b, %x
604 %cmp = icmp eq i8 %add1, %add2
605 call void @use1(i1 %cmp)
609 define void @bzip2(i8 %a, i8 %b, i8 %x) {
610 ; CHECK-LABEL: @bzip2(
611 ; CHECK-NEXT: [[ADD1:%.*]] = add i8 [[A:%.*]], [[X:%.*]]
612 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A]], [[B:%.*]]
613 ; CHECK-NEXT: call void @use1(i1 [[CMP]])
614 ; CHECK-NEXT: call void @use8(i8 [[ADD1]])
615 ; CHECK-NEXT: ret void
617 %add1 = add i8 %a, %x
618 %add2 = add i8 %b, %x
619 %cmp = icmp eq i8 %add1, %add2
620 call void @use1(i1 %cmp)
621 call void @use8(i8 %add1)