1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -passes=instcombine -S | FileCheck %s
4 declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1
6 define i1 @t0_umul(i4 %size, i4 %nmemb) {
7 ; CHECK-LABEL: @t0_umul(
8 ; CHECK-NEXT: [[NMEMB_FR:%.*]] = freeze i4 [[NMEMB:%.*]]
9 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB_FR]])
10 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
11 ; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
12 ; CHECK-NEXT: ret i1 [[PHITMP]]
14 %cmp = icmp eq i4 %size, 0
15 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
16 %smul.ov = extractvalue { i4, i1 } %smul, 1
17 %phitmp = xor i1 %smul.ov, true
18 %or = select i1 %cmp, i1 true, i1 %phitmp
22 define i1 @t1_commutative(i4 %size, i4 %nmemb) {
23 ; CHECK-LABEL: @t1_commutative(
24 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]])
25 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
26 ; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
27 ; CHECK-NEXT: ret i1 [[PHITMP]]
29 %cmp = icmp eq i4 %size, 0
30 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
31 %smul.ov = extractvalue { i4, i1 } %smul, 1
32 %phitmp = xor i1 %smul.ov, true
33 %or = select i1 %phitmp, i1 true, i1 %cmp ; swapped
37 define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
38 ; CHECK-LABEL: @n2_wrong_size(
39 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE1:%.*]], 0
40 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
41 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
42 ; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
43 ; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP]], i1 true, i1 [[PHITMP]]
44 ; CHECK-NEXT: ret i1 [[OR]]
46 %cmp = icmp eq i4 %size1, 0 ; not %size0
47 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb)
48 %smul.ov = extractvalue { i4, i1 } %smul, 1
49 %phitmp = xor i1 %smul.ov, true
50 %or = select i1 %cmp, i1 true, i1 %phitmp
54 define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
55 ; CHECK-LABEL: @n3_wrong_pred(
56 ; CHECK-NEXT: ret i1 true
58 %cmp = icmp ne i4 %size, 0 ; not 'eq'
59 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
60 %smul.ov = extractvalue { i4, i1 } %smul, 1
61 %phitmp = xor i1 %smul.ov, true
62 %or = select i1 %cmp, i1 true, i1 %phitmp
66 define i1 @n4_not_and(i4 %size, i4 %nmemb) {
67 ; CHECK-LABEL: @n4_not_and(
68 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
69 ; CHECK-NEXT: ret i1 [[CMP]]
71 %cmp = icmp eq i4 %size, 0
72 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
73 %smul.ov = extractvalue { i4, i1 } %smul, 1
74 %phitmp = xor i1 %smul.ov, true
75 %or = select i1 %cmp, i1 %phitmp, i1 false ; not 'or'
79 define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
80 ; CHECK-LABEL: @n5_not_zero(
81 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 1
82 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
83 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
84 ; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
85 ; CHECK-NEXT: [[OR:%.*]] = select i1 [[CMP]], i1 true, i1 [[PHITMP]]
86 ; CHECK-NEXT: ret i1 [[OR]]
88 %cmp = icmp eq i4 %size, 1 ; should be '0'
89 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
90 %smul.ov = extractvalue { i4, i1 } %smul, 1
91 %phitmp = xor i1 %smul.ov, true
92 %or = select i1 %cmp, i1 true, i1 %phitmp