1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=loongarch32 %s -o - | FileCheck %s --check-prefix=LA32
3 ; RUN: llc -mtriple=loongarch64 %s -o - | FileCheck %s --check-prefix=LA64
5 define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
6 ; LA32-LABEL: overflow_add:
8 ; LA32-NEXT: add.w $a0, $a1, $a0
9 ; LA32-NEXT: ori $a0, $a0, 1
10 ; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0
11 ; LA32-NEXT: ori $a1, $zero, 1024
12 ; LA32-NEXT: sltu $a0, $a1, $a0
13 ; LA32-NEXT: ori $a1, $zero, 5
14 ; LA32-NEXT: masknez $a1, $a1, $a0
15 ; LA32-NEXT: ori $a2, $zero, 2
16 ; LA32-NEXT: maskeqz $a0, $a2, $a0
17 ; LA32-NEXT: or $a0, $a0, $a1
20 ; LA64-LABEL: overflow_add:
22 ; LA64-NEXT: add.d $a0, $a1, $a0
23 ; LA64-NEXT: ori $a0, $a0, 1
24 ; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
25 ; LA64-NEXT: ori $a1, $zero, 1024
26 ; LA64-NEXT: sltu $a0, $a1, $a0
27 ; LA64-NEXT: ori $a1, $zero, 5
28 ; LA64-NEXT: masknez $a1, $a1, $a0
29 ; LA64-NEXT: ori $a2, $zero, 2
30 ; LA64-NEXT: maskeqz $a0, $a2, $a0
31 ; LA64-NEXT: or $a0, $a0, $a1
35 %cmp = icmp ugt i16 %or, 1024
36 %res = select i1 %cmp, i16 2, i16 5
40 define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
41 ; LA32-LABEL: overflow_sub:
43 ; LA32-NEXT: sub.w $a0, $a0, $a1
44 ; LA32-NEXT: ori $a0, $a0, 1
45 ; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0
46 ; LA32-NEXT: ori $a1, $zero, 1024
47 ; LA32-NEXT: sltu $a0, $a1, $a0
48 ; LA32-NEXT: ori $a1, $zero, 5
49 ; LA32-NEXT: masknez $a1, $a1, $a0
50 ; LA32-NEXT: ori $a2, $zero, 2
51 ; LA32-NEXT: maskeqz $a0, $a2, $a0
52 ; LA32-NEXT: or $a0, $a0, $a1
55 ; LA64-LABEL: overflow_sub:
57 ; LA64-NEXT: sub.d $a0, $a0, $a1
58 ; LA64-NEXT: ori $a0, $a0, 1
59 ; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
60 ; LA64-NEXT: ori $a1, $zero, 1024
61 ; LA64-NEXT: sltu $a0, $a1, $a0
62 ; LA64-NEXT: ori $a1, $zero, 5
63 ; LA64-NEXT: masknez $a1, $a1, $a0
64 ; LA64-NEXT: ori $a2, $zero, 2
65 ; LA64-NEXT: maskeqz $a0, $a2, $a0
66 ; LA64-NEXT: or $a0, $a0, $a1
70 %cmp = icmp ugt i16 %or, 1024
71 %res = select i1 %cmp, i16 2, i16 5
75 define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
76 ; LA32-LABEL: overflow_mul:
78 ; LA32-NEXT: mul.w $a0, $a1, $a0
79 ; LA32-NEXT: ori $a0, $a0, 1
80 ; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0
81 ; LA32-NEXT: ori $a1, $zero, 1024
82 ; LA32-NEXT: sltu $a0, $a1, $a0
83 ; LA32-NEXT: ori $a1, $zero, 5
84 ; LA32-NEXT: masknez $a1, $a1, $a0
85 ; LA32-NEXT: ori $a2, $zero, 2
86 ; LA32-NEXT: maskeqz $a0, $a2, $a0
87 ; LA32-NEXT: or $a0, $a0, $a1
90 ; LA64-LABEL: overflow_mul:
92 ; LA64-NEXT: mul.d $a0, $a1, $a0
93 ; LA64-NEXT: ori $a0, $a0, 1
94 ; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
95 ; LA64-NEXT: ori $a1, $zero, 1024
96 ; LA64-NEXT: sltu $a0, $a1, $a0
97 ; LA64-NEXT: ori $a1, $zero, 5
98 ; LA64-NEXT: masknez $a1, $a1, $a0
99 ; LA64-NEXT: ori $a2, $zero, 2
100 ; LA64-NEXT: maskeqz $a0, $a2, $a0
101 ; LA64-NEXT: or $a0, $a0, $a1
103 %add = mul i16 %b, %a
105 %cmp = icmp ugt i16 %or, 1024
106 %res = select i1 %cmp, i16 2, i16 5
110 define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
111 ; LA32-LABEL: overflow_shl:
113 ; LA32-NEXT: sll.w $a0, $a0, $a1
114 ; LA32-NEXT: ori $a0, $a0, 1
115 ; LA32-NEXT: bstrpick.w $a0, $a0, 15, 0
116 ; LA32-NEXT: ori $a1, $zero, 1024
117 ; LA32-NEXT: sltu $a0, $a1, $a0
118 ; LA32-NEXT: ori $a1, $zero, 5
119 ; LA32-NEXT: masknez $a1, $a1, $a0
120 ; LA32-NEXT: ori $a2, $zero, 2
121 ; LA32-NEXT: maskeqz $a0, $a2, $a0
122 ; LA32-NEXT: or $a0, $a0, $a1
125 ; LA64-LABEL: overflow_shl:
127 ; LA64-NEXT: sll.d $a0, $a0, $a1
128 ; LA64-NEXT: ori $a0, $a0, 1
129 ; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
130 ; LA64-NEXT: ori $a1, $zero, 1024
131 ; LA64-NEXT: sltu $a0, $a1, $a0
132 ; LA64-NEXT: ori $a1, $zero, 5
133 ; LA64-NEXT: masknez $a1, $a1, $a0
134 ; LA64-NEXT: ori $a2, $zero, 2
135 ; LA64-NEXT: maskeqz $a0, $a2, $a0
136 ; LA64-NEXT: or $a0, $a0, $a1
138 %add = shl i16 %a, %b
140 %cmp = icmp ugt i16 %or, 1024
141 %res = select i1 %cmp, i16 2, i16 5
145 define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
146 ; LA32-LABEL: overflow_add_no_consts:
148 ; LA32-NEXT: add.w $a0, $a1, $a0
149 ; LA32-NEXT: andi $a0, $a0, 255
150 ; LA32-NEXT: sltu $a0, $a2, $a0
151 ; LA32-NEXT: ori $a1, $zero, 16
152 ; LA32-NEXT: masknez $a1, $a1, $a0
153 ; LA32-NEXT: ori $a2, $zero, 8
154 ; LA32-NEXT: maskeqz $a0, $a2, $a0
155 ; LA32-NEXT: or $a0, $a0, $a1
158 ; LA64-LABEL: overflow_add_no_consts:
160 ; LA64-NEXT: add.d $a0, $a1, $a0
161 ; LA64-NEXT: andi $a0, $a0, 255
162 ; LA64-NEXT: sltu $a0, $a2, $a0
163 ; LA64-NEXT: ori $a1, $zero, 16
164 ; LA64-NEXT: masknez $a1, $a1, $a0
165 ; LA64-NEXT: ori $a2, $zero, 8
166 ; LA64-NEXT: maskeqz $a0, $a2, $a0
167 ; LA64-NEXT: or $a0, $a0, $a1
170 %cmp = icmp ugt i8 %add, %limit
171 %res = select i1 %cmp, i32 8, i32 16
175 define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
176 ; LA32-LABEL: overflow_add_const_limit:
178 ; LA32-NEXT: add.w $a0, $a1, $a0
179 ; LA32-NEXT: andi $a0, $a0, 255
180 ; LA32-NEXT: ori $a1, $zero, 128
181 ; LA32-NEXT: sltu $a0, $a1, $a0
182 ; LA32-NEXT: ori $a1, $zero, 16
183 ; LA32-NEXT: masknez $a1, $a1, $a0
184 ; LA32-NEXT: ori $a2, $zero, 8
185 ; LA32-NEXT: maskeqz $a0, $a2, $a0
186 ; LA32-NEXT: or $a0, $a0, $a1
189 ; LA64-LABEL: overflow_add_const_limit:
191 ; LA64-NEXT: add.d $a0, $a1, $a0
192 ; LA64-NEXT: andi $a0, $a0, 255
193 ; LA64-NEXT: ori $a1, $zero, 128
194 ; LA64-NEXT: sltu $a0, $a1, $a0
195 ; LA64-NEXT: ori $a1, $zero, 16
196 ; LA64-NEXT: masknez $a1, $a1, $a0
197 ; LA64-NEXT: ori $a2, $zero, 8
198 ; LA64-NEXT: maskeqz $a0, $a2, $a0
199 ; LA64-NEXT: or $a0, $a0, $a1
202 %cmp = icmp ugt i8 %add, -128
203 %res = select i1 %cmp, i32 8, i32 16
207 define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
208 ; LA32-LABEL: overflow_add_positive_const_limit:
210 ; LA32-NEXT: ext.w.b $a0, $a0
211 ; LA32-NEXT: slti $a0, $a0, -1
212 ; LA32-NEXT: ori $a1, $zero, 16
213 ; LA32-NEXT: masknez $a1, $a1, $a0
214 ; LA32-NEXT: ori $a2, $zero, 8
215 ; LA32-NEXT: maskeqz $a0, $a2, $a0
216 ; LA32-NEXT: or $a0, $a0, $a1
219 ; LA64-LABEL: overflow_add_positive_const_limit:
221 ; LA64-NEXT: ext.w.b $a0, $a0
222 ; LA64-NEXT: slti $a0, $a0, -1
223 ; LA64-NEXT: ori $a1, $zero, 16
224 ; LA64-NEXT: masknez $a1, $a1, $a0
225 ; LA64-NEXT: ori $a2, $zero, 8
226 ; LA64-NEXT: maskeqz $a0, $a2, $a0
227 ; LA64-NEXT: or $a0, $a0, $a1
229 %cmp = icmp slt i8 %a, -1
230 %res = select i1 %cmp, i32 8, i32 16
234 define i32 @unsafe_add_underflow(i8 zeroext %a) {
235 ; LA32-LABEL: unsafe_add_underflow:
237 ; LA32-NEXT: addi.w $a0, $a0, -1
238 ; LA32-NEXT: sltui $a0, $a0, 1
239 ; LA32-NEXT: ori $a1, $zero, 16
240 ; LA32-NEXT: masknez $a1, $a1, $a0
241 ; LA32-NEXT: ori $a2, $zero, 8
242 ; LA32-NEXT: maskeqz $a0, $a2, $a0
243 ; LA32-NEXT: or $a0, $a0, $a1
246 ; LA64-LABEL: unsafe_add_underflow:
248 ; LA64-NEXT: addi.d $a0, $a0, -1
249 ; LA64-NEXT: sltui $a0, $a0, 1
250 ; LA64-NEXT: ori $a1, $zero, 16
251 ; LA64-NEXT: masknez $a1, $a1, $a0
252 ; LA64-NEXT: ori $a2, $zero, 8
253 ; LA64-NEXT: maskeqz $a0, $a2, $a0
254 ; LA64-NEXT: or $a0, $a0, $a1
256 %cmp = icmp eq i8 %a, 1
257 %res = select i1 %cmp, i32 8, i32 16
261 define i32 @safe_add_underflow(i8 zeroext %a) {
262 ; LA32-LABEL: safe_add_underflow:
264 ; LA32-NEXT: sltui $a0, $a0, 1
265 ; LA32-NEXT: ori $a1, $zero, 16
266 ; LA32-NEXT: masknez $a1, $a1, $a0
267 ; LA32-NEXT: ori $a2, $zero, 8
268 ; LA32-NEXT: maskeqz $a0, $a2, $a0
269 ; LA32-NEXT: or $a0, $a0, $a1
272 ; LA64-LABEL: safe_add_underflow:
274 ; LA64-NEXT: sltui $a0, $a0, 1
275 ; LA64-NEXT: ori $a1, $zero, 16
276 ; LA64-NEXT: masknez $a1, $a1, $a0
277 ; LA64-NEXT: ori $a2, $zero, 8
278 ; LA64-NEXT: maskeqz $a0, $a2, $a0
279 ; LA64-NEXT: or $a0, $a0, $a1
281 %cmp = icmp eq i8 %a, 0
282 %res = select i1 %cmp, i32 8, i32 16
286 define i32 @safe_add_underflow_neg(i8 zeroext %a) {
287 ; LA32-LABEL: safe_add_underflow_neg:
289 ; LA32-NEXT: addi.w $a0, $a0, -2
290 ; LA32-NEXT: sltui $a0, $a0, 251
291 ; LA32-NEXT: ori $a1, $zero, 16
292 ; LA32-NEXT: masknez $a1, $a1, $a0
293 ; LA32-NEXT: ori $a2, $zero, 8
294 ; LA32-NEXT: maskeqz $a0, $a2, $a0
295 ; LA32-NEXT: or $a0, $a0, $a1
298 ; LA64-LABEL: safe_add_underflow_neg:
300 ; LA64-NEXT: addi.d $a0, $a0, -2
301 ; LA64-NEXT: sltui $a0, $a0, 251
302 ; LA64-NEXT: ori $a1, $zero, 16
303 ; LA64-NEXT: masknez $a1, $a1, $a0
304 ; LA64-NEXT: ori $a2, $zero, 8
305 ; LA64-NEXT: maskeqz $a0, $a2, $a0
306 ; LA64-NEXT: or $a0, $a0, $a1
309 %cmp = icmp ult i8 %add, -5
310 %res = select i1 %cmp, i32 8, i32 16
314 define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
315 ; LA32-LABEL: overflow_sub_negative_const_limit:
317 ; LA32-NEXT: ext.w.b $a0, $a0
318 ; LA32-NEXT: slti $a0, $a0, -1
319 ; LA32-NEXT: ori $a1, $zero, 16
320 ; LA32-NEXT: masknez $a1, $a1, $a0
321 ; LA32-NEXT: ori $a2, $zero, 8
322 ; LA32-NEXT: maskeqz $a0, $a2, $a0
323 ; LA32-NEXT: or $a0, $a0, $a1
326 ; LA64-LABEL: overflow_sub_negative_const_limit:
328 ; LA64-NEXT: ext.w.b $a0, $a0
329 ; LA64-NEXT: slti $a0, $a0, -1
330 ; LA64-NEXT: ori $a1, $zero, 16
331 ; LA64-NEXT: masknez $a1, $a1, $a0
332 ; LA64-NEXT: ori $a2, $zero, 8
333 ; LA64-NEXT: maskeqz $a0, $a2, $a0
334 ; LA64-NEXT: or $a0, $a0, $a1
336 %cmp = icmp slt i8 %a, -1
337 %res = select i1 %cmp, i32 8, i32 16
341 define i32 @sext_sub_underflow(i8 zeroext %a) {
342 ; LA32-LABEL: sext_sub_underflow:
344 ; LA32-NEXT: addi.w $a0, $a0, -6
345 ; LA32-NEXT: addi.w $a1, $zero, -6
346 ; LA32-NEXT: sltu $a0, $a1, $a0
347 ; LA32-NEXT: ori $a1, $zero, 16
348 ; LA32-NEXT: masknez $a1, $a1, $a0
349 ; LA32-NEXT: ori $a2, $zero, 8
350 ; LA32-NEXT: maskeqz $a0, $a2, $a0
351 ; LA32-NEXT: or $a0, $a0, $a1
354 ; LA64-LABEL: sext_sub_underflow:
356 ; LA64-NEXT: addi.d $a0, $a0, -6
357 ; LA64-NEXT: addi.w $a1, $zero, -6
358 ; LA64-NEXT: sltu $a0, $a1, $a0
359 ; LA64-NEXT: ori $a1, $zero, 16
360 ; LA64-NEXT: masknez $a1, $a1, $a0
361 ; LA64-NEXT: ori $a2, $zero, 8
362 ; LA64-NEXT: maskeqz $a0, $a2, $a0
363 ; LA64-NEXT: or $a0, $a0, $a1
366 %cmp = icmp ugt i8 %sub, -6
367 %res = select i1 %cmp, i32 8, i32 16
371 define i32 @safe_sub_underflow(i8 zeroext %a) {
372 ; LA32-LABEL: safe_sub_underflow:
374 ; LA32-NEXT: sltui $a0, $a0, 1
375 ; LA32-NEXT: ori $a1, $zero, 8
376 ; LA32-NEXT: masknez $a1, $a1, $a0
377 ; LA32-NEXT: ori $a2, $zero, 16
378 ; LA32-NEXT: maskeqz $a0, $a2, $a0
379 ; LA32-NEXT: or $a0, $a0, $a1
382 ; LA64-LABEL: safe_sub_underflow:
384 ; LA64-NEXT: sltui $a0, $a0, 1
385 ; LA64-NEXT: ori $a1, $zero, 8
386 ; LA64-NEXT: masknez $a1, $a1, $a0
387 ; LA64-NEXT: ori $a2, $zero, 16
388 ; LA64-NEXT: maskeqz $a0, $a2, $a0
389 ; LA64-NEXT: or $a0, $a0, $a1
391 %cmp.not = icmp eq i8 %a, 0
392 %res = select i1 %cmp.not, i32 16, i32 8
396 define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
397 ; LA32-LABEL: safe_sub_underflow_neg:
399 ; LA32-NEXT: addi.w $a0, $a0, -4
400 ; LA32-NEXT: ori $a1, $zero, 250
401 ; LA32-NEXT: sltu $a0, $a1, $a0
402 ; LA32-NEXT: ori $a1, $zero, 16
403 ; LA32-NEXT: masknez $a1, $a1, $a0
404 ; LA32-NEXT: ori $a2, $zero, 8
405 ; LA32-NEXT: maskeqz $a0, $a2, $a0
406 ; LA32-NEXT: or $a0, $a0, $a1
409 ; LA64-LABEL: safe_sub_underflow_neg:
411 ; LA64-NEXT: addi.d $a0, $a0, -4
412 ; LA64-NEXT: ori $a1, $zero, 250
413 ; LA64-NEXT: sltu $a0, $a1, $a0
414 ; LA64-NEXT: ori $a1, $zero, 16
415 ; LA64-NEXT: masknez $a1, $a1, $a0
416 ; LA64-NEXT: ori $a2, $zero, 8
417 ; LA64-NEXT: maskeqz $a0, $a2, $a0
418 ; LA64-NEXT: or $a0, $a0, $a1
421 %cmp = icmp ugt i8 %sub, -6
422 %res = select i1 %cmp, i32 8, i32 16
426 define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
427 ; LA32-LABEL: sext_sub_underflow_neg:
429 ; LA32-NEXT: addi.w $a0, $a0, -4
430 ; LA32-NEXT: sltui $a0, $a0, -3
431 ; LA32-NEXT: ori $a1, $zero, 16
432 ; LA32-NEXT: masknez $a1, $a1, $a0
433 ; LA32-NEXT: ori $a2, $zero, 8
434 ; LA32-NEXT: maskeqz $a0, $a2, $a0
435 ; LA32-NEXT: or $a0, $a0, $a1
438 ; LA64-LABEL: sext_sub_underflow_neg:
440 ; LA64-NEXT: addi.d $a0, $a0, -4
441 ; LA64-NEXT: sltui $a0, $a0, -3
442 ; LA64-NEXT: ori $a1, $zero, 16
443 ; LA64-NEXT: masknez $a1, $a1, $a0
444 ; LA64-NEXT: ori $a2, $zero, 8
445 ; LA64-NEXT: maskeqz $a0, $a2, $a0
446 ; LA64-NEXT: or $a0, $a0, $a1
449 %cmp = icmp ult i8 %sub, -3
450 %res = select i1 %cmp, i32 8, i32 16
454 define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
455 ; LA32-LABEL: safe_sub_imm_var:
456 ; LA32: # %bb.0: # %entry
457 ; LA32-NEXT: move $a0, $zero
460 ; LA64-LABEL: safe_sub_imm_var:
461 ; LA64: # %bb.0: # %entry
462 ; LA64-NEXT: move $a0, $zero
468 define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
469 ; LA32-LABEL: safe_sub_var_imm:
470 ; LA32: # %bb.0: # %entry
471 ; LA32-NEXT: ld.bu $a0, $a0, 0
472 ; LA32-NEXT: addi.w $a0, $a0, -248
473 ; LA32-NEXT: addi.w $a1, $zero, -4
474 ; LA32-NEXT: sltu $a0, $a1, $a0
477 ; LA64-LABEL: safe_sub_var_imm:
478 ; LA64: # %bb.0: # %entry
479 ; LA64-NEXT: ld.bu $a0, $a0, 0
480 ; LA64-NEXT: addi.d $a0, $a0, -248
481 ; LA64-NEXT: addi.w $a1, $zero, -4
482 ; LA64-NEXT: sltu $a0, $a1, $a0
485 %0 = load i8, ptr %b, align 1
486 %sub = add nsw i8 %0, 8
487 %cmp = icmp ugt i8 %sub, -4
488 %conv4 = zext i1 %cmp to i32
492 define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
493 ; LA32-LABEL: safe_add_imm_var:
494 ; LA32: # %bb.0: # %entry
495 ; LA32-NEXT: ori $a0, $zero, 1
498 ; LA64-LABEL: safe_add_imm_var:
499 ; LA64: # %bb.0: # %entry
500 ; LA64-NEXT: ori $a0, $zero, 1
506 define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
507 ; LA32-LABEL: safe_add_var_imm:
508 ; LA32: # %bb.0: # %entry
509 ; LA32-NEXT: ori $a0, $zero, 1
512 ; LA64-LABEL: safe_add_var_imm:
513 ; LA64: # %bb.0: # %entry
514 ; LA64-NEXT: ori $a0, $zero, 1
520 define i8 @convert_add_order(i8 zeroext %arg) {
521 ; LA32-LABEL: convert_add_order:
523 ; LA32-NEXT: ori $a1, $a0, 1
524 ; LA32-NEXT: sltui $a2, $a1, 50
525 ; LA32-NEXT: addi.w $a1, $a1, -40
526 ; LA32-NEXT: sltui $a1, $a1, 20
527 ; LA32-NEXT: ori $a3, $zero, 2
528 ; LA32-NEXT: sub.w $a1, $a3, $a1
529 ; LA32-NEXT: ori $a3, $zero, 255
530 ; LA32-NEXT: masknez $a3, $a3, $a2
531 ; LA32-NEXT: maskeqz $a1, $a1, $a2
532 ; LA32-NEXT: or $a1, $a1, $a3
533 ; LA32-NEXT: and $a0, $a1, $a0
536 ; LA64-LABEL: convert_add_order:
538 ; LA64-NEXT: ori $a1, $a0, 1
539 ; LA64-NEXT: sltui $a2, $a1, 50
540 ; LA64-NEXT: addi.d $a1, $a1, -40
541 ; LA64-NEXT: sltui $a1, $a1, 20
542 ; LA64-NEXT: ori $a3, $zero, 2
543 ; LA64-NEXT: sub.d $a1, $a3, $a1
544 ; LA64-NEXT: ori $a3, $zero, 255
545 ; LA64-NEXT: masknez $a3, $a3, $a2
546 ; LA64-NEXT: maskeqz $a1, $a1, $a2
547 ; LA64-NEXT: or $a1, $a1, $a3
548 ; LA64-NEXT: and $a0, $a1, $a0
551 %cmp.0 = icmp ult i8 %shl, 50
552 %sub = add nsw i8 %shl, -40
553 %cmp.1 = icmp ult i8 %sub, 20
554 %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
555 %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
556 %res = and i8 %mask.sel, %arg
560 define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
561 ; LA32-LABEL: underflow_if_sub:
563 ; LA32-NEXT: slt $a2, $zero, $a0
564 ; LA32-NEXT: and $a0, $a2, $a0
565 ; LA32-NEXT: addi.w $a0, $a0, 245
566 ; LA32-NEXT: sltu $a1, $a0, $a1
567 ; LA32-NEXT: maskeqz $a0, $a0, $a1
568 ; LA32-NEXT: ori $a2, $zero, 100
569 ; LA32-NEXT: masknez $a1, $a2, $a1
570 ; LA32-NEXT: or $a0, $a0, $a1
573 ; LA64-LABEL: underflow_if_sub:
575 ; LA64-NEXT: addi.w $a2, $a0, 0
576 ; LA64-NEXT: slt $a2, $zero, $a2
577 ; LA64-NEXT: and $a0, $a2, $a0
578 ; LA64-NEXT: addi.d $a0, $a0, 245
579 ; LA64-NEXT: sltu $a1, $a0, $a1
580 ; LA64-NEXT: maskeqz $a0, $a0, $a1
581 ; LA64-NEXT: ori $a2, $zero, 100
582 ; LA64-NEXT: masknez $a1, $a2, $a1
583 ; LA64-NEXT: or $a0, $a0, $a1
585 %cmp = icmp sgt i32 %arg, 0
586 %conv = zext i1 %cmp to i32
587 %and = and i32 %conv, %arg
588 %trunc = trunc i32 %and to i8
589 %conv1 = add nuw nsw i8 %trunc, -11
590 %cmp.1 = icmp ult i8 %conv1, %arg1
591 %res = select i1 %cmp.1, i8 %conv1, i8 100
595 define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
596 ; LA32-LABEL: underflow_if_sub_signext:
598 ; LA32-NEXT: andi $a1, $a1, 255
599 ; LA32-NEXT: slt $a2, $zero, $a0
600 ; LA32-NEXT: and $a0, $a2, $a0
601 ; LA32-NEXT: addi.w $a0, $a0, 245
602 ; LA32-NEXT: sltu $a1, $a0, $a1
603 ; LA32-NEXT: maskeqz $a0, $a0, $a1
604 ; LA32-NEXT: ori $a2, $zero, 100
605 ; LA32-NEXT: masknez $a1, $a2, $a1
606 ; LA32-NEXT: or $a0, $a0, $a1
609 ; LA64-LABEL: underflow_if_sub_signext:
611 ; LA64-NEXT: addi.w $a2, $a0, 0
612 ; LA64-NEXT: andi $a1, $a1, 255
613 ; LA64-NEXT: slt $a2, $zero, $a2
614 ; LA64-NEXT: and $a0, $a2, $a0
615 ; LA64-NEXT: addi.d $a0, $a0, 245
616 ; LA64-NEXT: sltu $a1, $a0, $a1
617 ; LA64-NEXT: maskeqz $a0, $a0, $a1
618 ; LA64-NEXT: ori $a2, $zero, 100
619 ; LA64-NEXT: masknez $a1, $a2, $a1
620 ; LA64-NEXT: or $a0, $a0, $a1
622 %cmp = icmp sgt i32 %arg, 0
623 %conv = zext i1 %cmp to i32
624 %and = and i32 %conv, %arg
625 %trunc = trunc i32 %and to i8
626 %conv1 = add nuw nsw i8 %trunc, -11
627 %cmp.1 = icmp ult i8 %conv1, %arg1
628 %res = select i1 %cmp.1, i8 %conv1, i8 100