1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -simplifycfg -S | FileCheck %s --check-prefixes=ALL,DEFAULT,FALLBACK0
3 ; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=2 -S | FileCheck %s --check-prefixes=ALL,DEFAULT,FALLBACK1
4 ; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=3 -S | FileCheck %s --check-prefixes=ALL,COSTLY
6 ; This is checking that the multiplication does overflow, with a leftover
7 ; guard against division-by-zero that was needed before InstCombine
8 ; produced llvm.umul.with.overflow.
10 define i1 @will_overflow(i64 %size, i64 %nmemb) {
11 ; ALL-LABEL: @will_overflow(
13 ; ALL-NEXT: [[CMP:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
14 ; ALL-NEXT: [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i64 [[NMEMB:%.*]])
15 ; ALL-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
16 ; ALL-NEXT: [[UMUL_NOT_OV:%.*]] = xor i1 [[UMUL_OV]], true
17 ; ALL-NEXT: [[TMP0:%.*]] = select i1 [[CMP]], i1 true, i1 [[UMUL_NOT_OV]]
18 ; ALL-NEXT: ret i1 [[TMP0]]
21 %cmp = icmp eq i64 %size, 0
22 br i1 %cmp, label %land.end, label %land.rhs
24 land.rhs: ; preds = %entry
25 %umul = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %size, i64 %nmemb)
26 %umul.ov = extractvalue { i64, i1 } %umul, 1
27 %umul.not.ov = xor i1 %umul.ov, true
30 land.end: ; preds = %land.rhs, %entry
31 %0 = phi i1 [ true, %entry ], [ %umul.not.ov, %land.rhs ]
35 ; Function Attrs: nounwind readnone speculatable
36 declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0