1 ; RUN: llc -mtriple=thumbv8m.main -mcpu=cortex-m33 -mattr=-use-misched %s -arm-disable-cgp=false -o - | FileCheck %s
7 define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
10 %cmp = icmp ugt i16 %or, 1024
11 %res = select i1 %cmp, i16 2, i16 5
15 ; CHECK-LABEL: overflow_sub
19 define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
22 %cmp = icmp ugt i16 %or, 1024
23 %res = select i1 %cmp, i16 2, i16 5
27 ; CHECK-LABEL: overflow_mul
31 define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
34 %cmp = icmp ugt i16 %or, 1024
35 %res = select i1 %cmp, i16 2, i16 5
39 ; CHECK-LABEL: overflow_shl
43 define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
46 %cmp = icmp ugt i16 %or, 1024
47 %res = select i1 %cmp, i16 2, i16 5
51 ; CHECK-LABEL: overflow_add_no_consts:
53 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
54 ; CHECK: cmp [[EXT]], r2
56 define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
58 %cmp = icmp ugt i8 %add, %limit
59 %res = select i1 %cmp, i32 8, i32 16
63 ; CHECK-LABEL: overflow_add_const_limit:
65 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
66 ; CHECK: cmp [[EXT]], #128
68 define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
70 %cmp = icmp ugt i8 %add, 128
71 %res = select i1 %cmp, i32 8, i32 16
75 ; CHECK-LABEL: overflow_add_positive_const_limit:
77 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
78 ; CHECK: cmp [[EXT]], #128
80 define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
82 %cmp = icmp ugt i8 %add, 128
83 %res = select i1 %cmp, i32 8, i32 16
87 ; CHECK-LABEL: unsafe_add_underflow:
93 define i32 @unsafe_add_underflow(i8 zeroext %a) {
95 %cmp = icmp ugt i8 %add, 254
96 %res = select i1 %cmp, i32 8, i32 16
100 ; CHECK-LABEL: safe_add_underflow:
101 ; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #1
103 ; CHECK: cmp [[MINUS_1]], #254
104 ; CHECK: movhi r0, #8
105 define i32 @safe_add_underflow(i8 zeroext %a) {
107 %cmp = icmp ugt i8 %add, 254
108 %res = select i1 %cmp, i32 8, i32 16
112 ; CHECK-LABEL: safe_add_underflow_neg:
113 ; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #2
115 ; CHECK: cmp [[MINUS_1]], #251
116 ; CHECK: movlo r0, #8
117 define i32 @safe_add_underflow_neg(i8 zeroext %a) {
119 %cmp = icmp ule i8 %add, -6
120 %res = select i1 %cmp, i32 8, i32 16
124 ; CHECK-LABEL: overflow_sub_negative_const_limit:
126 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
127 ; CHECK: cmp [[EXT]], #128
128 ; CHECK: movhi r0, #8
129 define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
131 %cmp = icmp ugt i8 %sub, 128
132 %res = select i1 %cmp, i32 8, i32 16
136 ; CHECK-LABEL: unsafe_sub_underflow:
138 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
139 ; CHECK: cmp [[EXT]], #250
140 ; CHECK: movhi r0, #8
141 define i32 @unsafe_sub_underflow(i8 zeroext %a) {
143 %cmp = icmp ugt i8 %sub, 250
144 %res = select i1 %cmp, i32 8, i32 16
148 ; CHECK-LABEL: safe_sub_underflow:
149 ; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #1
151 ; CHECK: cmp [[MINUS_1]], #255
152 ; CHECK: movlo r0, #8
153 define i32 @safe_sub_underflow(i8 zeroext %a) {
155 %cmp = icmp ule i8 %sub, 254
156 %res = select i1 %cmp, i32 8, i32 16
160 ; CHECK-LABEL: safe_sub_underflow_neg
161 ; CHECK: subs [[MINUS_1:r[0-9]+]], r0, #4
163 ; CHECK: cmp [[MINUS_1]], #250
164 ; CHECK: movhi r0, #8
165 define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
167 %cmp = icmp uge i8 %sub, -5
168 %res = select i1 %cmp, i32 8, i32 16
172 ; CHECK-LABEL: unsafe_sub_underflow_neg
174 ; CHECK: uxtb [[EXT:r[0-9]+]], r0
175 ; CHECK: cmp [[EXT]], #253
176 ; CHECK: movlo r0, #8
177 define i32 @unsafe_sub_underflow_neg(i8 zeroext %a) {
179 %cmp = icmp ult i8 %sub, -3
180 %res = select i1 %cmp, i32 8, i32 16
184 ; CHECK: rsb.w [[RSUB:r[0-9]+]], r0, #248
186 ; CHECK: cmp [[RSUB]], #252
187 define i32 @safe_sub_imm_var(i8* %b) {
189 %0 = load i8, i8* %b, align 1
190 %sub = sub nuw nsw i8 -8, %0
191 %cmp = icmp ugt i8 %sub, 252
192 %conv4 = zext i1 %cmp to i32
196 ; CHECK-LABEL: safe_sub_var_imm
197 ; CHECK: sub.w [[ADD:r[0-9]+]], r0, #248
199 ; CHECK: cmp [[ADD]], #252
200 define i32 @safe_sub_var_imm(i8* %b) {
202 %0 = load i8, i8* %b, align 1
203 %sub = sub nuw nsw i8 %0, -8
204 %cmp = icmp ugt i8 %sub, 252
205 %conv4 = zext i1 %cmp to i32
209 ; CHECK-LABEL: safe_add_imm_var
210 ; CHECK: add.w [[ADD:r[0-9]+]], r0, #129
212 ; CHECK: cmp [[ADD]], #127
213 define i32 @safe_add_imm_var(i8* %b) {
215 %0 = load i8, i8* %b, align 1
216 %add = add nuw nsw i8 -127, %0
217 %cmp = icmp ugt i8 %add, 127
218 %conv4 = zext i1 %cmp to i32
222 ; CHECK-LABEL: safe_add_var_imm
223 ; CHECK: add.w [[SUB:r[0-9]+]], r0, #129
225 ; CHECK: cmp [[SUB]], #127
226 define i32 @safe_add_var_imm(i8* %b) {
228 %0 = load i8, i8* %b, align 1
229 %add = add nuw nsw i8 %0, -127
230 %cmp = icmp ugt i8 %add, 127
231 %conv4 = zext i1 %cmp to i32
235 ; CHECK-LABEL: convert_add_order
236 ; CHECK: orr{{.*}}, #1
237 ; CHECK: sub{{.*}}, #40
239 define i8 @convert_add_order(i8 zeroext %arg) {
240 %mask.0 = and i8 %arg, 1
241 %mask.1 = and i8 %arg, 2
243 %add = add nuw i8 %shl, 10
244 %cmp.0 = icmp ult i8 %add, 60
245 %sub = add nsw i8 %shl, -40
246 %cmp.1 = icmp ult i8 %sub, 20
247 %mask.sel = select i1 %cmp.1, i8 %mask.0, i8 %mask.1
248 %res = select i1 %cmp.0, i8 %mask.sel, i8 %arg
252 ; CHECK-LABEL: underflow_if_sub
253 ; CHECK: add{{.}} [[ADD:r[0-9]+]], #245
254 ; CHECK: cmp [[ADD]], r1
255 define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
256 %cmp = icmp sgt i32 %arg, 0
257 %conv = zext i1 %cmp to i32
258 %and = and i32 %arg, %conv
259 %trunc = trunc i32 %and to i8
260 %conv1 = add nuw nsw i8 %trunc, -11
261 %cmp.1 = icmp ult i8 %conv1, %arg1
262 %res = select i1 %cmp.1, i8 %conv1, i8 100
266 ; CHECK-LABEL: underflow_if_sub_signext
267 ; CHECK: uxtb [[UXT1:r[0-9]+]], r1
268 ; CHECK: sub{{.*}} [[SUB:r[0-9]+]], #11
269 ; CHECK: uxtb [[UXT_SUB:r[0-9]+]], [[SUB]]
270 ; CHECK: cmp{{.*}}[[UXT_SUB]]
271 define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
272 %cmp = icmp sgt i32 %arg, 0
273 %conv = zext i1 %cmp to i32
274 %and = and i32 %arg, %conv
275 %trunc = trunc i32 %and to i8
276 %conv1 = add nuw nsw i8 %trunc, -11
277 %cmp.1 = icmp ugt i8 %arg1, %conv1
278 %res = select i1 %cmp.1, i8 %conv1, i8 100