1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='require<profile-summary>,function(codegenprepare)' -S < %s | FileCheck %s
3 ; RUN: opt -enable-debugify -passes='require<profile-summary>,function(codegenprepare)' -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
5 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6 target triple = "x86_64-apple-darwin10.0.0"
8 define i64 @uaddo1_overflow_used(i64 %a, i64 %b) nounwind ssp {
9 ; CHECK-LABEL: @uaddo1_overflow_used(
10 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
11 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
12 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
13 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
14 ; CHECK-NEXT: ret i64 [[Q]]
17 %cmp = icmp ult i64 %add, %a
18 %Q = select i1 %cmp, i64 %b, i64 42
22 define i64 @uaddo1_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
23 ; CHECK-LABEL: @uaddo1_math_overflow_used(
24 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
25 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
26 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
27 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
28 ; CHECK-NEXT: store i64 [[MATH]], ptr [[RES:%.*]]
29 ; CHECK-NEXT: ret i64 [[Q]]
32 %cmp = icmp ult i64 %add, %a
33 %Q = select i1 %cmp, i64 %b, i64 42
34 store i64 %add, ptr %res
38 define i64 @uaddo2_overflow_used(i64 %a, i64 %b) nounwind ssp {
39 ; CHECK-LABEL: @uaddo2_overflow_used(
40 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
41 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
42 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
43 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
44 ; CHECK-NEXT: ret i64 [[Q]]
47 %cmp = icmp ult i64 %add, %b
48 %Q = select i1 %cmp, i64 %b, i64 42
52 define i64 @uaddo2_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
53 ; CHECK-LABEL: @uaddo2_math_overflow_used(
54 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
55 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
56 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
57 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
58 ; CHECK-NEXT: store i64 [[MATH]], ptr [[RES:%.*]]
59 ; CHECK-NEXT: ret i64 [[Q]]
62 %cmp = icmp ult i64 %add, %b
63 %Q = select i1 %cmp, i64 %b, i64 42
64 store i64 %add, ptr %res
68 define i64 @uaddo3_overflow_used(i64 %a, i64 %b) nounwind ssp {
69 ; CHECK-LABEL: @uaddo3_overflow_used(
70 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
71 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
72 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
73 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
74 ; CHECK-NEXT: ret i64 [[Q]]
77 %cmp = icmp ugt i64 %b, %add
78 %Q = select i1 %cmp, i64 %b, i64 42
82 define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
83 ; CHECK-LABEL: @uaddo3_math_overflow_used(
84 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
85 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
86 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
87 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
88 ; CHECK-NEXT: store i64 [[MATH]], ptr [[RES:%.*]]
89 ; CHECK-NEXT: ret i64 [[Q]]
92 %cmp = icmp ugt i64 %b, %add
93 %Q = select i1 %cmp, i64 %b, i64 42
94 store i64 %add, ptr %res
98 ; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic.
100 define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp {
101 ; CHECK-LABEL: @uaddo4(
103 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
104 ; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
106 ; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
107 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
108 ; CHECK-NEXT: ret i64 [[Q]]
110 ; CHECK-NEXT: ret i64 0
113 %add = add i64 %b, %a
114 %cmp = icmp ugt i64 %b, %add
115 br i1 %c, label %next, label %exit
118 %Q = select i1 %cmp, i64 %b, i64 42
125 define i64 @uaddo5(i64 %a, i64 %b, ptr %ptr, i1 %c) nounwind ssp {
126 ; CHECK-LABEL: @uaddo5(
128 ; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
129 ; CHECK-NEXT: store i64 [[ADD]], ptr [[PTR:%.*]]
130 ; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
132 ; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
133 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
134 ; CHECK-NEXT: ret i64 [[Q]]
136 ; CHECK-NEXT: ret i64 0
139 %add = add i64 %b, %a
140 store i64 %add, ptr %ptr
141 %cmp = icmp ugt i64 %b, %add
142 br i1 %c, label %next, label %exit
145 %Q = select i1 %cmp, i64 %b, i64 42
152 ; Instcombine folds (a + b <u a) to (a ^ -1 <u b). Make sure we match this
154 define i64 @uaddo6_xor(i64 %a, i64 %b) {
155 ; CHECK-LABEL: @uaddo6_xor(
156 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
157 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
158 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
159 ; CHECK-NEXT: ret i64 [[Q]]
162 %cmp = icmp ult i64 %x, %b
163 %Q = select i1 %cmp, i64 %b, i64 42
167 define i64 @uaddo6_xor_commuted(i64 %a, i64 %b) {
168 ; CHECK-LABEL: @uaddo6_xor_commuted(
169 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
170 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
171 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
172 ; CHECK-NEXT: ret i64 [[Q]]
175 %cmp = icmp ult i64 %x, %b
176 %Q = select i1 %cmp, i64 %b, i64 42
180 declare void @use(i64)
182 define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
183 ; CHECK-LABEL: @uaddo6_xor_multi_use(
184 ; CHECK-NEXT: [[X:%.*]] = xor i64 -1, [[A:%.*]]
185 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[X]], [[B:%.*]]
186 ; CHECK-NEXT: [[Q:%.*]] = select i1 [[CMP]], i64 [[B]], i64 42
187 ; CHECK-NEXT: call void @use(i64 [[X]])
188 ; CHECK-NEXT: ret i64 [[Q]]
191 %cmp = icmp ult i64 %x, %b
192 %Q = select i1 %cmp, i64 %b, i64 42
193 call void @use(i64 %x)
197 ; Make sure we do not use the XOR binary operator as insert point, as it may
198 ; come before the second operand of the overflow intrinsic.
199 define i1 @uaddo6_xor_op_after_XOR(i32 %a, ptr %b.ptr) {
200 ; CHECK-LABEL: @uaddo6_xor_op_after_XOR(
201 ; CHECK-NEXT: [[B:%.*]] = load i32, ptr [[B_PTR:%.*]], align 8
202 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B]])
203 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
204 ; CHECK-NEXT: [[OV:%.*]] = xor i1 [[OV1]], true
205 ; CHECK-NEXT: ret i1 [[OV]]
208 %b = load i32, ptr %b.ptr, align 8
209 %cmp14 = icmp ugt i32 %b, %x
210 %ov = xor i1 %cmp14, true
214 ; When adding 1, the general pattern for add-overflow may be different due to icmp canonicalization.
215 ; PR31754: https://bugs.llvm.org/show_bug.cgi?id=31754
217 define i1 @uaddo_i64_increment(i64 %x, ptr %p) {
218 ; CHECK-LABEL: @uaddo_i64_increment(
219 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
220 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
221 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
222 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
223 ; CHECK-NEXT: ret i1 [[OV1]]
226 %ov = icmp eq i64 %a, 0
231 define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, ptr %p) {
232 ; CHECK-LABEL: @uaddo_i8_increment_noncanonical_1(
233 ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 1, i8 [[X:%.*]])
234 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
235 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
236 ; CHECK-NEXT: store i8 [[MATH]], ptr [[P:%.*]]
237 ; CHECK-NEXT: ret i1 [[OV1]]
239 %a = add i8 1, %x ; commute
240 %ov = icmp eq i8 %a, 0
245 define i1 @uaddo_i32_increment_noncanonical_2(i32 %x, ptr %p) {
246 ; CHECK-LABEL: @uaddo_i32_increment_noncanonical_2(
247 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
248 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
249 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
250 ; CHECK-NEXT: store i32 [[MATH]], ptr [[P:%.*]]
251 ; CHECK-NEXT: ret i1 [[OV1]]
254 %ov = icmp eq i32 0, %a ; commute
259 define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, ptr %p) {
260 ; CHECK-LABEL: @uaddo_i16_increment_noncanonical_3(
261 ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 1, i16 [[X:%.*]])
262 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
263 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
264 ; CHECK-NEXT: store i16 [[MATH]], ptr [[P:%.*]]
265 ; CHECK-NEXT: ret i1 [[OV1]]
267 %a = add i16 1, %x ; commute
268 %ov = icmp eq i16 0, %a ; commute
273 ; The overflow check may be against the input rather than the sum.
275 define i1 @uaddo_i64_increment_alt(i64 %x, ptr %p) {
276 ; CHECK-LABEL: @uaddo_i64_increment_alt(
277 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
278 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
279 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
280 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
281 ; CHECK-NEXT: ret i1 [[OV1]]
285 %ov = icmp eq i64 %x, -1
289 ; Make sure insertion is done correctly based on dominance.
291 define i1 @uaddo_i64_increment_alt_dom(i64 %x, ptr %p) {
292 ; CHECK-LABEL: @uaddo_i64_increment_alt_dom(
293 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
294 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
295 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
296 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
297 ; CHECK-NEXT: ret i1 [[OV1]]
299 %ov = icmp eq i64 %x, -1
305 ; The overflow check may be against the input rather than the sum.
307 define i1 @uaddo_i64_decrement_alt(i64 %x, ptr %p) {
308 ; CHECK-LABEL: @uaddo_i64_decrement_alt(
309 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
310 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
311 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
312 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
313 ; CHECK-NEXT: ret i1 [[OV1]]
317 %ov = icmp ne i64 %x, 0
321 ; Make sure insertion is done correctly based on dominance.
323 define i1 @uaddo_i64_decrement_alt_dom(i64 %x, ptr %p) {
324 ; CHECK-LABEL: @uaddo_i64_decrement_alt_dom(
325 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
326 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
327 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
328 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
329 ; CHECK-NEXT: ret i1 [[OV1]]
331 %ov = icmp ne i64 %x, 0
337 ; No transform for illegal types.
339 define i1 @uaddo_i42_increment_illegal_type(i42 %x, ptr %p) {
340 ; CHECK-LABEL: @uaddo_i42_increment_illegal_type(
341 ; CHECK-NEXT: [[A:%.*]] = add i42 [[X:%.*]], 1
342 ; CHECK-NEXT: [[OV:%.*]] = icmp eq i42 [[A]], 0
343 ; CHECK-NEXT: store i42 [[A]], ptr [[P:%.*]]
344 ; CHECK-NEXT: ret i1 [[OV]]
347 %ov = icmp eq i42 %a, 0
352 define i1 @usubo_ult_i64_overflow_used(i64 %x, i64 %y, ptr %p) {
353 ; CHECK-LABEL: @usubo_ult_i64_overflow_used(
354 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
355 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
356 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
357 ; CHECK-NEXT: ret i1 [[OV1]]
360 %ov = icmp ult i64 %x, %y
364 define i1 @usubo_ult_i64_math_overflow_used(i64 %x, i64 %y, ptr %p) {
365 ; CHECK-LABEL: @usubo_ult_i64_math_overflow_used(
366 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
367 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
368 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
369 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
370 ; CHECK-NEXT: ret i1 [[OV1]]
374 %ov = icmp ult i64 %x, %y
378 ; Verify insertion point for single-BB. Toggle predicate.
380 define i1 @usubo_ugt_i32(i32 %x, i32 %y, ptr %p) {
381 ; CHECK-LABEL: @usubo_ugt_i32(
382 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
383 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
384 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
385 ; CHECK-NEXT: store i32 [[MATH]], ptr [[P:%.*]]
386 ; CHECK-NEXT: ret i1 [[OV1]]
388 %ov = icmp ugt i32 %y, %x
394 ; Constant operand should match.
396 define i1 @usubo_ugt_constant_op0_i8(i8 %x, ptr %p) {
397 ; CHECK-LABEL: @usubo_ugt_constant_op0_i8(
398 ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 42, i8 [[X:%.*]])
399 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
400 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
401 ; CHECK-NEXT: store i8 [[MATH]], ptr [[P:%.*]]
402 ; CHECK-NEXT: ret i1 [[OV1]]
405 %ov = icmp ugt i8 %x, 42
410 ; Compare with constant operand 0 is canonicalized by commuting, but verify match for non-canonical form.
412 define i1 @usubo_ult_constant_op0_i16(i16 %x, ptr %p) {
413 ; CHECK-LABEL: @usubo_ult_constant_op0_i16(
414 ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 43, i16 [[X:%.*]])
415 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
416 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
417 ; CHECK-NEXT: store i16 [[MATH]], ptr [[P:%.*]]
418 ; CHECK-NEXT: ret i1 [[OV1]]
421 %ov = icmp ult i16 43, %x
426 ; Subtract with constant operand 1 is canonicalized to add.
428 define i1 @usubo_ult_constant_op1_i16(i16 %x, ptr %p) {
429 ; CHECK-LABEL: @usubo_ult_constant_op1_i16(
430 ; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 [[X:%.*]], i16 44)
431 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
432 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
433 ; CHECK-NEXT: store i16 [[MATH]], ptr [[P:%.*]]
434 ; CHECK-NEXT: ret i1 [[OV1]]
437 %ov = icmp ult i16 %x, 44
442 define i1 @usubo_ugt_constant_op1_i8(i8 %x, ptr %p) {
443 ; CHECK-LABEL: @usubo_ugt_constant_op1_i8(
444 ; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X:%.*]], i8 45)
445 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
446 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
447 ; CHECK-NEXT: store i8 [[MATH]], ptr [[P:%.*]]
448 ; CHECK-NEXT: ret i1 [[OV1]]
450 %ov = icmp ugt i8 45, %x
456 ; Special-case: subtract 1 changes the compare predicate and constant.
458 define i1 @usubo_eq_constant1_op1_i32(i32 %x, ptr %p) {
459 ; CHECK-LABEL: @usubo_eq_constant1_op1_i32(
460 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 1)
461 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
462 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
463 ; CHECK-NEXT: store i32 [[MATH]], ptr [[P:%.*]]
464 ; CHECK-NEXT: ret i1 [[OV1]]
467 %ov = icmp eq i32 %x, 0
472 ; Special-case: subtract from 0 (negate) changes the compare predicate.
474 define i1 @usubo_ne_constant0_op1_i32(i32 %x, ptr %p) {
475 ; CHECK-LABEL: @usubo_ne_constant0_op1_i32(
476 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 0, i32 [[X:%.*]])
477 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
478 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
479 ; CHECK-NEXT: store i32 [[MATH]], ptr [[P:%.*]]
480 ; CHECK-NEXT: ret i1 [[OV1]]
483 %ov = icmp ne i32 %x, 0
488 ; This used to verify insertion point for multi-BB, but now we just bail out.
490 declare void @call(i1)
492 define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
493 ; CHECK-LABEL: @usubo_ult_sub_dominates_i64(
495 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
497 ; CHECK-NEXT: [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
498 ; CHECK-NEXT: store i64 [[S]], ptr [[P:%.*]]
499 ; CHECK-NEXT: br i1 [[COND]], label [[END:%.*]], label [[F]]
501 ; CHECK-NEXT: ret i1 [[COND]]
503 ; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
504 ; CHECK-NEXT: ret i1 [[OV]]
507 br i1 %cond, label %t, label %f
512 br i1 %cond, label %end, label %f
518 %ov = icmp ult i64 %x, %y
522 define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
523 ; CHECK-LABEL: @usubo_ult_cmp_dominates_i64(
525 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
527 ; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X:%.*]], [[Y:%.*]]
528 ; CHECK-NEXT: call void @call(i1 [[OV]])
529 ; CHECK-NEXT: br i1 [[OV]], label [[END:%.*]], label [[F]]
531 ; CHECK-NEXT: ret i1 [[COND]]
533 ; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X]], i64 [[Y]])
534 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
535 ; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
536 ; CHECK-NEXT: store i64 [[MATH]], ptr [[P:%.*]]
537 ; CHECK-NEXT: ret i1 [[OV1]]
540 br i1 %cond, label %t, label %f
543 %ov = icmp ult i64 %x, %y
544 call void @call(i1 %ov)
545 br i1 %ov, label %end, label %f
556 ; Verify that crazy/non-canonical code does not crash.
560 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, -1
561 ; CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
562 ; CHECK-NEXT: unreachable
564 %cmp = icmp eq i64 1, -1
565 %frombool = zext i1 %cmp to i8
571 ; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, 1
572 ; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[SUB]] to i32
573 ; CHECK-NEXT: unreachable
575 %sub = add nsw i64 1, 1
576 %conv = trunc i64 %sub to i32
580 ; Similarly for usubo.
583 ; CHECK-LABEL: @bar2(
584 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, 0
585 ; CHECK-NEXT: ret i1 [[CMP]]
587 %cmp = icmp eq i64 1, 0
591 define i64 @foo2(ptr %p) {
592 ; CHECK-LABEL: @foo2(
593 ; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, -1
594 ; CHECK-NEXT: ret i64 [[SUB]]
596 %sub = add nsw i64 1, -1
600 ; Avoid hoisting a math op into a dominating block which would
601 ; increase the critical path.
603 define void @PR41129(ptr %p64) {
604 ; CHECK-LABEL: @PR41129(
606 ; CHECK-NEXT: [[KEY:%.*]] = load i64, ptr [[P64:%.*]], align 8
607 ; CHECK-NEXT: [[COND17:%.*]] = icmp eq i64 [[KEY]], 0
608 ; CHECK-NEXT: br i1 [[COND17]], label [[TRUE:%.*]], label [[FALSE:%.*]]
610 ; CHECK-NEXT: [[ANDVAL:%.*]] = and i64 [[KEY]], 7
611 ; CHECK-NEXT: store i64 [[ANDVAL]], ptr [[P64]]
612 ; CHECK-NEXT: br label [[EXIT:%.*]]
614 ; CHECK-NEXT: [[SVALUE:%.*]] = add i64 [[KEY]], -1
615 ; CHECK-NEXT: store i64 [[SVALUE]], ptr [[P64]]
616 ; CHECK-NEXT: br label [[EXIT]]
618 ; CHECK-NEXT: ret void
621 %key = load i64, ptr %p64, align 8
622 %cond17 = icmp eq i64 %key, 0
623 br i1 %cond17, label %true, label %false
626 %andval = and i64 %key, 7
627 store i64 %andval, ptr %p64
631 %svalue = add i64 %key, -1
632 store i64 %svalue, ptr %p64
639 ; Check that every instruction inserted by -passes='require<profile-summary>,function(codegenprepare)' has a debug location.
640 ; DEBUG: CheckModuleDebugify: PASS