1 ; RUN: opt %s -instcombine -S | FileCheck %s
3 %overflow.result = type {i8, i1}
5 declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
6 declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
7 declare double @llvm.powi.f64(double, i32) nounwind readonly
8 declare i32 @llvm.cttz.i32(i32) nounwind readnone
9 declare i32 @llvm.ctlz.i32(i32) nounwind readnone
10 declare i32 @llvm.ctpop.i32(i32) nounwind readnone
11 declare i8 @llvm.ctlz.i8(i8) nounwind readnone
13 define i8 @uaddtest1(i8 %A, i8 %B) {
14 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
15 %y = extractvalue %overflow.result %x, 0
18 ; CHECK-NEXT: %y = add i8 %A, %B
19 ; CHECK-NEXT: ret i8 %y
22 define i8 @uaddtest2(i8 %A, i8 %B, i1* %overflowPtr) {
23 %and.A = and i8 %A, 127
24 %and.B = and i8 %B, 127
25 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
26 %y = extractvalue %overflow.result %x, 0
27 %z = extractvalue %overflow.result %x, 1
28 store i1 %z, i1* %overflowPtr
31 ; CHECK-NEXT: %and.A = and i8 %A, 127
32 ; CHECK-NEXT: %and.B = and i8 %B, 127
33 ; CHECK-NEXT: %1 = add nuw i8 %and.A, %and.B
34 ; CHECK-NEXT: store i1 false, i1* %overflowPtr
35 ; CHECK-NEXT: ret i8 %1
38 define i8 @uaddtest3(i8 %A, i8 %B, i1* %overflowPtr) {
39 %or.A = or i8 %A, -128
40 %or.B = or i8 %B, -128
41 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
42 %y = extractvalue %overflow.result %x, 0
43 %z = extractvalue %overflow.result %x, 1
44 store i1 %z, i1* %overflowPtr
47 ; CHECK-NEXT: %or.A = or i8 %A, -128
48 ; CHECK-NEXT: %or.B = or i8 %B, -128
49 ; CHECK-NEXT: %1 = add i8 %or.A, %or.B
50 ; CHECK-NEXT: store i1 true, i1* %overflowPtr
51 ; CHECK-NEXT: ret i8 %1
54 define i8 @uaddtest4(i8 %A, i1* %overflowPtr) {
55 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
56 %y = extractvalue %overflow.result %x, 0
57 %z = extractvalue %overflow.result %x, 1
58 store i1 %z, i1* %overflowPtr
61 ; CHECK-NEXT: ret i8 undef
64 define i8 @uaddtest5(i8 %A, i1* %overflowPtr) {
65 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
66 %y = extractvalue %overflow.result %x, 0
67 %z = extractvalue %overflow.result %x, 1
68 store i1 %z, i1* %overflowPtr
74 define i1 @uaddtest6(i8 %A, i8 %B) {
75 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 -4)
76 %z = extractvalue %overflow.result %x, 1
79 ; CHECK-NEXT: %z = icmp ugt i8 %A, 3
80 ; CHECK-NEXT: ret i1 %z
83 define i8 @uaddtest7(i8 %A, i8 %B) {
84 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
85 %z = extractvalue %overflow.result %x, 0
88 ; CHECK-NEXT: %z = add i8 %A, %B
89 ; CHECK-NEXT: ret i8 %z
93 define i8 @umultest1(i8 %A, i1* %overflowPtr) {
94 %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
95 %y = extractvalue %overflow.result %x, 0
96 %z = extractvalue %overflow.result %x, 1
97 store i1 %z, i1* %overflowPtr
100 ; CHECK-NEXT: store i1 false, i1* %overflowPtr
101 ; CHECK-NEXT: ret i8 0
104 define i8 @umultest2(i8 %A, i1* %overflowPtr) {
105 %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 1, i8 %A)
106 %y = extractvalue %overflow.result %x, 0
107 %z = extractvalue %overflow.result %x, 1
108 store i1 %z, i1* %overflowPtr
111 ; CHECK-NEXT: store i1 false, i1* %overflowPtr
112 ; CHECK-NEXT: ret i8 %A
115 define void @powi(double %V, double *%P) {
117 %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
118 volatile store double %A, double* %P
120 %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
121 volatile store double %B, double* %P
123 %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
124 volatile store double %C, double* %P
127 ; CHECK: %A = fdiv double 1.0{{.*}}, %V
128 ; CHECK: volatile store double %A,
129 ; CHECK: volatile store double 1.0
130 ; CHECK: volatile store double %V
133 define i32 @cttz(i32 %a) {
136 %and = and i32 %or, -8
137 %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
141 ; CHECK-NEXT: ret i32 3
144 define i8 @ctlz(i8 %a) {
147 %and = and i8 %or, 63
148 %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
152 ; CHECK-NEXT: ret i8 2
155 define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
157 %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
158 %lz.cmp = icmp eq i32 %lz, 32
159 volatile store i1 %lz.cmp, i1* %c
160 %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
161 %tz.cmp = icmp ne i32 %tz, 32
162 volatile store i1 %tz.cmp, i1* %c
163 %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
164 %pop.cmp = icmp eq i32 %pop, 0
165 volatile store i1 %pop.cmp, i1* %c
167 ; CHECK: @cmp.simplify
169 ; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
170 ; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
171 ; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
172 ; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
173 ; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
174 ; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
178 define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp {
179 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1]
180 %shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1]
183 ; CHECK: @cttz_simplify1
184 ; CHECK: icmp eq i32 %x, 0
185 ; CHECK-NEXT: zext i1
186 ; CHECK-NEXT: ret i32
189 declare i32 @llvm.ctlz.i32(i32) nounwind readnone