1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
3 ; RUN: -riscv-experimental-rv64-legal-i32 | FileCheck %s -check-prefix=RV64
6 ; Get the actual value of the overflow bit.
8 define zeroext i1 @saddo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
9 ; RV64-LABEL: saddo1.i32:
10 ; RV64: # %bb.0: # %entry
11 ; RV64-NEXT: addw a3, a0, a1
12 ; RV64-NEXT: slt a0, a3, a0
13 ; RV64-NEXT: slti a1, a1, 0
14 ; RV64-NEXT: xor a0, a1, a0
15 ; RV64-NEXT: sw a3, 0(a2)
18 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
19 %val = extractvalue {i32, i1} %t, 0
20 %obit = extractvalue {i32, i1} %t, 1
21 store i32 %val, ptr %res
25 ; Test the immediate version.
26 define zeroext i1 @saddo2.i32(i32 signext %v1, ptr %res) {
27 ; RV64-LABEL: saddo2.i32:
28 ; RV64: # %bb.0: # %entry
29 ; RV64-NEXT: addiw a2, a0, 4
30 ; RV64-NEXT: slt a0, a2, a0
31 ; RV64-NEXT: sw a2, 0(a1)
34 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 4)
35 %val = extractvalue {i32, i1} %t, 0
36 %obit = extractvalue {i32, i1} %t, 1
37 store i32 %val, ptr %res
41 ; Test negative immediates.
42 define zeroext i1 @saddo3.i32(i32 signext %v1, ptr %res) {
43 ; RV64-LABEL: saddo3.i32:
44 ; RV64: # %bb.0: # %entry
45 ; RV64-NEXT: addiw a2, a0, -4
46 ; RV64-NEXT: slt a0, a2, a0
47 ; RV64-NEXT: xori a0, a0, 1
48 ; RV64-NEXT: sw a2, 0(a1)
51 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 -4)
52 %val = extractvalue {i32, i1} %t, 0
53 %obit = extractvalue {i32, i1} %t, 1
54 store i32 %val, ptr %res
58 ; Test immediates that are too large to be encoded.
59 define zeroext i1 @saddo4.i32(i32 signext %v1, ptr %res) {
60 ; RV64-LABEL: saddo4.i32:
61 ; RV64: # %bb.0: # %entry
62 ; RV64-NEXT: lui a2, 4096
63 ; RV64-NEXT: addi a2, a2, -1
64 ; RV64-NEXT: addw a2, a0, a2
65 ; RV64-NEXT: slt a0, a2, a0
66 ; RV64-NEXT: sw a2, 0(a1)
69 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 16777215)
70 %val = extractvalue {i32, i1} %t, 0
71 %obit = extractvalue {i32, i1} %t, 1
72 store i32 %val, ptr %res
76 define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, ptr %res) {
77 ; RV64-LABEL: saddo1.i64:
78 ; RV64: # %bb.0: # %entry
79 ; RV64-NEXT: add a3, a0, a1
80 ; RV64-NEXT: slt a0, a3, a0
81 ; RV64-NEXT: slti a1, a1, 0
82 ; RV64-NEXT: xor a0, a1, a0
83 ; RV64-NEXT: sd a3, 0(a2)
86 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
87 %val = extractvalue {i64, i1} %t, 0
88 %obit = extractvalue {i64, i1} %t, 1
89 store i64 %val, ptr %res
93 define zeroext i1 @saddo2.i64(i64 %v1, ptr %res) {
94 ; RV64-LABEL: saddo2.i64:
95 ; RV64: # %bb.0: # %entry
96 ; RV64-NEXT: addi a2, a0, 4
97 ; RV64-NEXT: slt a0, a2, a0
98 ; RV64-NEXT: sd a2, 0(a1)
101 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 4)
102 %val = extractvalue {i64, i1} %t, 0
103 %obit = extractvalue {i64, i1} %t, 1
104 store i64 %val, ptr %res
108 define zeroext i1 @saddo3.i64(i64 %v1, ptr %res) {
109 ; RV64-LABEL: saddo3.i64:
110 ; RV64: # %bb.0: # %entry
111 ; RV64-NEXT: addi a2, a0, -4
112 ; RV64-NEXT: slt a0, a2, a0
113 ; RV64-NEXT: xori a0, a0, 1
114 ; RV64-NEXT: sd a2, 0(a1)
117 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 -4)
118 %val = extractvalue {i64, i1} %t, 0
119 %obit = extractvalue {i64, i1} %t, 1
120 store i64 %val, ptr %res
124 define zeroext i1 @uaddo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
125 ; RV64-LABEL: uaddo.i32:
126 ; RV64: # %bb.0: # %entry
127 ; RV64-NEXT: addw a1, a0, a1
128 ; RV64-NEXT: sltu a0, a1, a0
129 ; RV64-NEXT: sw a1, 0(a2)
132 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
133 %val = extractvalue {i32, i1} %t, 0
134 %obit = extractvalue {i32, i1} %t, 1
135 store i32 %val, ptr %res
139 define zeroext i1 @uaddo.i32.constant(i32 signext %v1, ptr %res) {
140 ; RV64-LABEL: uaddo.i32.constant:
141 ; RV64: # %bb.0: # %entry
142 ; RV64-NEXT: addiw a2, a0, -2
143 ; RV64-NEXT: sltu a0, a2, a0
144 ; RV64-NEXT: sw a2, 0(a1)
147 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2)
148 %val = extractvalue {i32, i1} %t, 0
149 %obit = extractvalue {i32, i1} %t, 1
150 store i32 %val, ptr %res
154 define zeroext i1 @uaddo.i32.constant_one(i32 signext %v1, ptr %res) {
155 ; RV64-LABEL: uaddo.i32.constant_one:
156 ; RV64: # %bb.0: # %entry
157 ; RV64-NEXT: addiw a2, a0, 1
158 ; RV64-NEXT: seqz a0, a2
159 ; RV64-NEXT: sw a2, 0(a1)
162 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
163 %val = extractvalue {i32, i1} %t, 0
164 %obit = extractvalue {i32, i1} %t, 1
165 store i32 %val, ptr %res
169 define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, ptr %res) {
170 ; RV64-LABEL: uaddo.i64:
171 ; RV64: # %bb.0: # %entry
172 ; RV64-NEXT: add a1, a0, a1
173 ; RV64-NEXT: sltu a0, a1, a0
174 ; RV64-NEXT: sd a1, 0(a2)
177 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
178 %val = extractvalue {i64, i1} %t, 0
179 %obit = extractvalue {i64, i1} %t, 1
180 store i64 %val, ptr %res
184 define zeroext i1 @uaddo.i64.constant_one(i64 %v1, ptr %res) {
185 ; RV64-LABEL: uaddo.i64.constant_one:
186 ; RV64: # %bb.0: # %entry
187 ; RV64-NEXT: addi a2, a0, 1
188 ; RV64-NEXT: seqz a0, a2
189 ; RV64-NEXT: sd a2, 0(a1)
192 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 1)
193 %val = extractvalue {i64, i1} %t, 0
194 %obit = extractvalue {i64, i1} %t, 1
195 store i64 %val, ptr %res
199 define zeroext i1 @ssubo1.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
200 ; RV64-LABEL: ssubo1.i32:
201 ; RV64: # %bb.0: # %entry
202 ; RV64-NEXT: sgtz a3, a1
203 ; RV64-NEXT: subw a1, a0, a1
204 ; RV64-NEXT: slt a0, a1, a0
205 ; RV64-NEXT: xor a0, a3, a0
206 ; RV64-NEXT: sw a1, 0(a2)
209 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
210 %val = extractvalue {i32, i1} %t, 0
211 %obit = extractvalue {i32, i1} %t, 1
212 store i32 %val, ptr %res
216 define zeroext i1 @ssubo2.i32(i32 signext %v1, ptr %res) {
217 ; RV64-LABEL: ssubo2.i32:
218 ; RV64: # %bb.0: # %entry
219 ; RV64-NEXT: addiw a2, a0, 4
220 ; RV64-NEXT: slt a0, a2, a0
221 ; RV64-NEXT: sw a2, 0(a1)
224 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 -4)
225 %val = extractvalue {i32, i1} %t, 0
226 %obit = extractvalue {i32, i1} %t, 1
227 store i32 %val, ptr %res
231 define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, ptr %res) {
232 ; RV64-LABEL: ssubo.i64:
233 ; RV64: # %bb.0: # %entry
234 ; RV64-NEXT: sgtz a3, a1
235 ; RV64-NEXT: sub a1, a0, a1
236 ; RV64-NEXT: slt a0, a1, a0
237 ; RV64-NEXT: xor a0, a3, a0
238 ; RV64-NEXT: sd a1, 0(a2)
241 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
242 %val = extractvalue {i64, i1} %t, 0
243 %obit = extractvalue {i64, i1} %t, 1
244 store i64 %val, ptr %res
248 define zeroext i1 @usubo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
249 ; RV64-LABEL: usubo.i32:
250 ; RV64: # %bb.0: # %entry
251 ; RV64-NEXT: subw a1, a0, a1
252 ; RV64-NEXT: sltu a0, a0, a1
253 ; RV64-NEXT: sw a1, 0(a2)
256 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
257 %val = extractvalue {i32, i1} %t, 0
258 %obit = extractvalue {i32, i1} %t, 1
259 store i32 %val, ptr %res
263 define zeroext i1 @usubo.i32.constant.rhs(i32 signext %v1, ptr %res) {
264 ; RV64-LABEL: usubo.i32.constant.rhs:
265 ; RV64: # %bb.0: # %entry
266 ; RV64-NEXT: addiw a2, a0, 2
267 ; RV64-NEXT: sltu a0, a0, a2
268 ; RV64-NEXT: sw a2, 0(a1)
271 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2)
272 %val = extractvalue {i32, i1} %t, 0
273 %obit = extractvalue {i32, i1} %t, 1
274 store i32 %val, ptr %res
278 define zeroext i1 @usubo.i32.constant.lhs(i32 signext %v1, ptr %res) {
279 ; RV64-LABEL: usubo.i32.constant.lhs:
280 ; RV64: # %bb.0: # %entry
281 ; RV64-NEXT: li a2, -2
282 ; RV64-NEXT: subw a2, a2, a0
283 ; RV64-NEXT: addi a0, a2, 1
284 ; RV64-NEXT: seqz a0, a0
285 ; RV64-NEXT: sw a2, 0(a1)
288 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1)
289 %val = extractvalue {i32, i1} %t, 0
290 %obit = extractvalue {i32, i1} %t, 1
291 store i32 %val, ptr %res
295 define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, ptr %res) {
296 ; RV64-LABEL: usubo.i64:
297 ; RV64: # %bb.0: # %entry
298 ; RV64-NEXT: sub a1, a0, a1
299 ; RV64-NEXT: sltu a0, a0, a1
300 ; RV64-NEXT: sd a1, 0(a2)
303 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
304 %val = extractvalue {i64, i1} %t, 0
305 %obit = extractvalue {i64, i1} %t, 1
306 store i64 %val, ptr %res
310 define zeroext i1 @smulo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
311 ; RV64-LABEL: smulo.i32:
312 ; RV64: # %bb.0: # %entry
313 ; RV64-NEXT: mul a1, a0, a1
314 ; RV64-NEXT: srai a0, a1, 32
315 ; RV64-NEXT: sraiw a3, a1, 31
316 ; RV64-NEXT: xor a0, a0, a3
317 ; RV64-NEXT: snez a0, a0
318 ; RV64-NEXT: sw a1, 0(a2)
321 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
322 %val = extractvalue {i32, i1} %t, 0
323 %obit = extractvalue {i32, i1} %t, 1
324 store i32 %val, ptr %res
328 define zeroext i1 @smulo2.i32(i32 signext %v1, ptr %res) {
329 ; RV64-LABEL: smulo2.i32:
330 ; RV64: # %bb.0: # %entry
331 ; RV64-NEXT: li a2, 13
332 ; RV64-NEXT: mul a2, a0, a2
333 ; RV64-NEXT: srai a0, a2, 32
334 ; RV64-NEXT: sraiw a3, a2, 31
335 ; RV64-NEXT: xor a0, a0, a3
336 ; RV64-NEXT: snez a0, a0
337 ; RV64-NEXT: sw a2, 0(a1)
340 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 13)
341 %val = extractvalue {i32, i1} %t, 0
342 %obit = extractvalue {i32, i1} %t, 1
343 store i32 %val, ptr %res
347 define zeroext i1 @smulo.i64(i64 %v1, i64 %v2, ptr %res) {
348 ; RV64-LABEL: smulo.i64:
349 ; RV64: # %bb.0: # %entry
350 ; RV64-NEXT: mulh a3, a0, a1
351 ; RV64-NEXT: mul a1, a0, a1
352 ; RV64-NEXT: srai a0, a1, 63
353 ; RV64-NEXT: xor a0, a3, a0
354 ; RV64-NEXT: snez a0, a0
355 ; RV64-NEXT: sd a1, 0(a2)
358 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
359 %val = extractvalue {i64, i1} %t, 0
360 %obit = extractvalue {i64, i1} %t, 1
361 store i64 %val, ptr %res
365 define zeroext i1 @smulo2.i64(i64 %v1, ptr %res) {
366 ; RV64-LABEL: smulo2.i64:
367 ; RV64: # %bb.0: # %entry
368 ; RV64-NEXT: li a2, 13
369 ; RV64-NEXT: mulh a3, a0, a2
370 ; RV64-NEXT: mul a2, a0, a2
371 ; RV64-NEXT: srai a0, a2, 63
372 ; RV64-NEXT: xor a0, a3, a0
373 ; RV64-NEXT: snez a0, a0
374 ; RV64-NEXT: sd a2, 0(a1)
377 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 13)
378 %val = extractvalue {i64, i1} %t, 0
379 %obit = extractvalue {i64, i1} %t, 1
380 store i64 %val, ptr %res
384 define zeroext i1 @umulo.i32(i32 signext %v1, i32 signext %v2, ptr %res) {
385 ; RV64-LABEL: umulo.i32:
386 ; RV64: # %bb.0: # %entry
387 ; RV64-NEXT: slli a1, a1, 32
388 ; RV64-NEXT: slli a0, a0, 32
389 ; RV64-NEXT: mulhu a1, a0, a1
390 ; RV64-NEXT: srai a0, a1, 32
391 ; RV64-NEXT: snez a0, a0
392 ; RV64-NEXT: sw a1, 0(a2)
395 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
396 %val = extractvalue {i32, i1} %t, 0
397 %obit = extractvalue {i32, i1} %t, 1
398 store i32 %val, ptr %res
402 define zeroext i1 @umulo2.i32(i32 signext %v1, ptr %res) {
403 ; RV64-LABEL: umulo2.i32:
404 ; RV64: # %bb.0: # %entry
405 ; RV64-NEXT: li a2, 13
406 ; RV64-NEXT: slli a2, a2, 32
407 ; RV64-NEXT: slli a0, a0, 32
408 ; RV64-NEXT: mulhu a2, a0, a2
409 ; RV64-NEXT: srli a0, a2, 32
410 ; RV64-NEXT: snez a0, a0
411 ; RV64-NEXT: sw a2, 0(a1)
414 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 13)
415 %val = extractvalue {i32, i1} %t, 0
416 %obit = extractvalue {i32, i1} %t, 1
417 store i32 %val, ptr %res
421 ; Similar to umulo.i32, but storing the overflow and returning the result.
422 define signext i32 @umulo3.i32(i32 signext %0, i32 signext %1, ptr %2) {
423 ; RV64-LABEL: umulo3.i32:
425 ; RV64-NEXT: slli a1, a1, 32
426 ; RV64-NEXT: slli a0, a0, 32
427 ; RV64-NEXT: mulhu a0, a0, a1
428 ; RV64-NEXT: srai a1, a0, 32
429 ; RV64-NEXT: snez a1, a1
430 ; RV64-NEXT: sext.w a0, a0
431 ; RV64-NEXT: sw a1, 0(a2)
433 %4 = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %0, i32 %1)
434 %5 = extractvalue { i32, i1 } %4, 1
435 %6 = extractvalue { i32, i1 } %4, 0
436 %7 = zext i1 %5 to i32
437 store i32 %7, ptr %2, align 4
441 define zeroext i1 @umulo.i64(i64 %v1, i64 %v2, ptr %res) {
442 ; RV64-LABEL: umulo.i64:
443 ; RV64: # %bb.0: # %entry
444 ; RV64-NEXT: mulhu a3, a0, a1
445 ; RV64-NEXT: snez a3, a3
446 ; RV64-NEXT: mul a0, a0, a1
447 ; RV64-NEXT: sd a0, 0(a2)
448 ; RV64-NEXT: mv a0, a3
451 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
452 %val = extractvalue {i64, i1} %t, 0
453 %obit = extractvalue {i64, i1} %t, 1
454 store i64 %val, ptr %res
458 define zeroext i1 @umulo2.i64(i64 %v1, ptr %res) {
459 ; RV64-LABEL: umulo2.i64:
460 ; RV64: # %bb.0: # %entry
461 ; RV64-NEXT: li a3, 13
462 ; RV64-NEXT: mulhu a2, a0, a3
463 ; RV64-NEXT: snez a2, a2
464 ; RV64-NEXT: mul a0, a0, a3
465 ; RV64-NEXT: sd a0, 0(a1)
466 ; RV64-NEXT: mv a0, a2
469 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 13)
470 %val = extractvalue {i64, i1} %t, 0
471 %obit = extractvalue {i64, i1} %t, 1
472 store i64 %val, ptr %res
478 ; Check the use of the overflow bit in combination with a select instruction.
480 define i32 @saddo.select.i32(i32 signext %v1, i32 signext %v2) {
481 ; RV64-LABEL: saddo.select.i32:
482 ; RV64: # %bb.0: # %entry
483 ; RV64-NEXT: addw a2, a0, a1
484 ; RV64-NEXT: slt a2, a2, a0
485 ; RV64-NEXT: slti a3, a1, 0
486 ; RV64-NEXT: bne a3, a2, .LBB28_2
487 ; RV64-NEXT: # %bb.1: # %entry
488 ; RV64-NEXT: mv a0, a1
489 ; RV64-NEXT: .LBB28_2: # %entry
492 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
493 %obit = extractvalue {i32, i1} %t, 1
494 %ret = select i1 %obit, i32 %v1, i32 %v2
498 define i1 @saddo.not.i32(i32 signext %v1, i32 signext %v2) {
499 ; RV64-LABEL: saddo.not.i32:
500 ; RV64: # %bb.0: # %entry
501 ; RV64-NEXT: addw a2, a0, a1
502 ; RV64-NEXT: slt a0, a2, a0
503 ; RV64-NEXT: slti a1, a1, 0
504 ; RV64-NEXT: xor a0, a1, a0
505 ; RV64-NEXT: xori a0, a0, 1
508 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
509 %obit = extractvalue {i32, i1} %t, 1
510 %ret = xor i1 %obit, true
514 define i64 @saddo.select.i64(i64 %v1, i64 %v2) {
515 ; RV64-LABEL: saddo.select.i64:
516 ; RV64: # %bb.0: # %entry
517 ; RV64-NEXT: add a2, a0, a1
518 ; RV64-NEXT: slt a2, a2, a0
519 ; RV64-NEXT: slti a3, a1, 0
520 ; RV64-NEXT: bne a3, a2, .LBB30_2
521 ; RV64-NEXT: # %bb.1: # %entry
522 ; RV64-NEXT: mv a0, a1
523 ; RV64-NEXT: .LBB30_2: # %entry
526 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
527 %obit = extractvalue {i64, i1} %t, 1
528 %ret = select i1 %obit, i64 %v1, i64 %v2
532 define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
533 ; RV64-LABEL: saddo.not.i64:
534 ; RV64: # %bb.0: # %entry
535 ; RV64-NEXT: add a2, a0, a1
536 ; RV64-NEXT: slt a0, a2, a0
537 ; RV64-NEXT: slti a1, a1, 0
538 ; RV64-NEXT: xor a0, a1, a0
539 ; RV64-NEXT: xori a0, a0, 1
542 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
543 %obit = extractvalue {i64, i1} %t, 1
544 %ret = xor i1 %obit, true
548 define i32 @uaddo.select.i32(i32 signext %v1, i32 signext %v2) {
549 ; RV64-LABEL: uaddo.select.i32:
550 ; RV64: # %bb.0: # %entry
551 ; RV64-NEXT: addw a2, a0, a1
552 ; RV64-NEXT: bltu a2, a0, .LBB32_2
553 ; RV64-NEXT: # %bb.1: # %entry
554 ; RV64-NEXT: mv a0, a1
555 ; RV64-NEXT: .LBB32_2: # %entry
558 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
559 %obit = extractvalue {i32, i1} %t, 1
560 %ret = select i1 %obit, i32 %v1, i32 %v2
564 define i1 @uaddo.not.i32(i32 signext %v1, i32 signext %v2) {
565 ; RV64-LABEL: uaddo.not.i32:
566 ; RV64: # %bb.0: # %entry
567 ; RV64-NEXT: addw a1, a0, a1
568 ; RV64-NEXT: sltu a0, a1, a0
569 ; RV64-NEXT: xori a0, a0, 1
572 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
573 %obit = extractvalue {i32, i1} %t, 1
574 %ret = xor i1 %obit, true
578 define i64 @uaddo.select.i64(i64 %v1, i64 %v2) {
579 ; RV64-LABEL: uaddo.select.i64:
580 ; RV64: # %bb.0: # %entry
581 ; RV64-NEXT: add a2, a0, a1
582 ; RV64-NEXT: bltu a2, a0, .LBB34_2
583 ; RV64-NEXT: # %bb.1: # %entry
584 ; RV64-NEXT: mv a0, a1
585 ; RV64-NEXT: .LBB34_2: # %entry
588 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
589 %obit = extractvalue {i64, i1} %t, 1
590 %ret = select i1 %obit, i64 %v1, i64 %v2
594 define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
595 ; RV64-LABEL: uaddo.not.i64:
596 ; RV64: # %bb.0: # %entry
597 ; RV64-NEXT: add a1, a0, a1
598 ; RV64-NEXT: sltu a0, a1, a0
599 ; RV64-NEXT: xori a0, a0, 1
602 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
603 %obit = extractvalue {i64, i1} %t, 1
604 %ret = xor i1 %obit, true
608 define i32 @ssubo.select.i32(i32 signext %v1, i32 signext %v2) {
609 ; RV64-LABEL: ssubo.select.i32:
610 ; RV64: # %bb.0: # %entry
611 ; RV64-NEXT: sgtz a2, a1
612 ; RV64-NEXT: subw a3, a0, a1
613 ; RV64-NEXT: slt a3, a3, a0
614 ; RV64-NEXT: bne a2, a3, .LBB36_2
615 ; RV64-NEXT: # %bb.1: # %entry
616 ; RV64-NEXT: mv a0, a1
617 ; RV64-NEXT: .LBB36_2: # %entry
620 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
621 %obit = extractvalue {i32, i1} %t, 1
622 %ret = select i1 %obit, i32 %v1, i32 %v2
626 define i1 @ssubo.not.i32(i32 signext %v1, i32 signext %v2) {
627 ; RV64-LABEL: ssubo.not.i32:
628 ; RV64: # %bb.0: # %entry
629 ; RV64-NEXT: sgtz a2, a1
630 ; RV64-NEXT: subw a1, a0, a1
631 ; RV64-NEXT: slt a0, a1, a0
632 ; RV64-NEXT: xor a0, a2, a0
633 ; RV64-NEXT: xori a0, a0, 1
636 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
637 %obit = extractvalue {i32, i1} %t, 1
638 %ret = xor i1 %obit, true
642 define i64 @ssubo.select.i64(i64 %v1, i64 %v2) {
643 ; RV64-LABEL: ssubo.select.i64:
644 ; RV64: # %bb.0: # %entry
645 ; RV64-NEXT: sgtz a2, a1
646 ; RV64-NEXT: sub a3, a0, a1
647 ; RV64-NEXT: slt a3, a3, a0
648 ; RV64-NEXT: bne a2, a3, .LBB38_2
649 ; RV64-NEXT: # %bb.1: # %entry
650 ; RV64-NEXT: mv a0, a1
651 ; RV64-NEXT: .LBB38_2: # %entry
654 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
655 %obit = extractvalue {i64, i1} %t, 1
656 %ret = select i1 %obit, i64 %v1, i64 %v2
660 define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
661 ; RV64-LABEL: ssub.not.i64:
662 ; RV64: # %bb.0: # %entry
663 ; RV64-NEXT: sgtz a2, a1
664 ; RV64-NEXT: sub a1, a0, a1
665 ; RV64-NEXT: slt a0, a1, a0
666 ; RV64-NEXT: xor a0, a2, a0
667 ; RV64-NEXT: xori a0, a0, 1
670 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
671 %obit = extractvalue {i64, i1} %t, 1
672 %ret = xor i1 %obit, true
676 define i32 @usubo.select.i32(i32 signext %v1, i32 signext %v2) {
677 ; RV64-LABEL: usubo.select.i32:
678 ; RV64: # %bb.0: # %entry
679 ; RV64-NEXT: subw a2, a0, a1
680 ; RV64-NEXT: bltu a0, a2, .LBB40_2
681 ; RV64-NEXT: # %bb.1: # %entry
682 ; RV64-NEXT: mv a0, a1
683 ; RV64-NEXT: .LBB40_2: # %entry
686 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
687 %obit = extractvalue {i32, i1} %t, 1
688 %ret = select i1 %obit, i32 %v1, i32 %v2
692 define i1 @usubo.not.i32(i32 signext %v1, i32 signext %v2) {
693 ; RV64-LABEL: usubo.not.i32:
694 ; RV64: # %bb.0: # %entry
695 ; RV64-NEXT: subw a1, a0, a1
696 ; RV64-NEXT: sltu a0, a0, a1
697 ; RV64-NEXT: xori a0, a0, 1
700 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
701 %obit = extractvalue {i32, i1} %t, 1
702 %ret = xor i1 %obit, true
706 define i64 @usubo.select.i64(i64 %v1, i64 %v2) {
707 ; RV64-LABEL: usubo.select.i64:
708 ; RV64: # %bb.0: # %entry
709 ; RV64-NEXT: sub a2, a0, a1
710 ; RV64-NEXT: bltu a0, a2, .LBB42_2
711 ; RV64-NEXT: # %bb.1: # %entry
712 ; RV64-NEXT: mv a0, a1
713 ; RV64-NEXT: .LBB42_2: # %entry
716 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
717 %obit = extractvalue {i64, i1} %t, 1
718 %ret = select i1 %obit, i64 %v1, i64 %v2
722 define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
723 ; RV64-LABEL: usubo.not.i64:
724 ; RV64: # %bb.0: # %entry
725 ; RV64-NEXT: sub a1, a0, a1
726 ; RV64-NEXT: sltu a0, a0, a1
727 ; RV64-NEXT: xori a0, a0, 1
730 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
731 %obit = extractvalue {i64, i1} %t, 1
732 %ret = xor i1 %obit, true
736 define i32 @smulo.select.i32(i32 signext %v1, i32 signext %v2) {
737 ; RV64-LABEL: smulo.select.i32:
738 ; RV64: # %bb.0: # %entry
739 ; RV64-NEXT: mul a2, a0, a1
740 ; RV64-NEXT: srai a3, a2, 32
741 ; RV64-NEXT: sraiw a2, a2, 31
742 ; RV64-NEXT: bne a3, a2, .LBB44_2
743 ; RV64-NEXT: # %bb.1: # %entry
744 ; RV64-NEXT: mv a0, a1
745 ; RV64-NEXT: .LBB44_2: # %entry
748 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
749 %obit = extractvalue {i32, i1} %t, 1
750 %ret = select i1 %obit, i32 %v1, i32 %v2
754 define i1 @smulo.not.i32(i32 signext %v1, i32 signext %v2) {
755 ; RV64-LABEL: smulo.not.i32:
756 ; RV64: # %bb.0: # %entry
757 ; RV64-NEXT: mul a0, a0, a1
758 ; RV64-NEXT: srai a1, a0, 32
759 ; RV64-NEXT: sraiw a0, a0, 31
760 ; RV64-NEXT: xor a0, a1, a0
761 ; RV64-NEXT: snez a0, a0
762 ; RV64-NEXT: xori a0, a0, 1
765 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
766 %obit = extractvalue {i32, i1} %t, 1
767 %ret = xor i1 %obit, true
771 define i64 @smulo.select.i64(i64 %v1, i64 %v2) {
772 ; RV64-LABEL: smulo.select.i64:
773 ; RV64: # %bb.0: # %entry
774 ; RV64-NEXT: mulh a2, a0, a1
775 ; RV64-NEXT: mul a3, a0, a1
776 ; RV64-NEXT: srai a3, a3, 63
777 ; RV64-NEXT: bne a2, a3, .LBB46_2
778 ; RV64-NEXT: # %bb.1: # %entry
779 ; RV64-NEXT: mv a0, a1
780 ; RV64-NEXT: .LBB46_2: # %entry
783 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
784 %obit = extractvalue {i64, i1} %t, 1
785 %ret = select i1 %obit, i64 %v1, i64 %v2
789 define i1 @smulo.not.i64(i64 %v1, i64 %v2) {
790 ; RV64-LABEL: smulo.not.i64:
791 ; RV64: # %bb.0: # %entry
792 ; RV64-NEXT: mulh a2, a0, a1
793 ; RV64-NEXT: mul a0, a0, a1
794 ; RV64-NEXT: srai a0, a0, 63
795 ; RV64-NEXT: xor a0, a2, a0
796 ; RV64-NEXT: snez a0, a0
797 ; RV64-NEXT: xori a0, a0, 1
800 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
801 %obit = extractvalue {i64, i1} %t, 1
802 %ret = xor i1 %obit, true
806 define i32 @umulo.select.i32(i32 signext %v1, i32 signext %v2) {
807 ; RV64-LABEL: umulo.select.i32:
808 ; RV64: # %bb.0: # %entry
809 ; RV64-NEXT: slli a2, a1, 32
810 ; RV64-NEXT: slli a3, a0, 32
811 ; RV64-NEXT: mulhu a2, a3, a2
812 ; RV64-NEXT: srai a2, a2, 32
813 ; RV64-NEXT: bnez a2, .LBB48_2
814 ; RV64-NEXT: # %bb.1: # %entry
815 ; RV64-NEXT: mv a0, a1
816 ; RV64-NEXT: .LBB48_2: # %entry
819 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
820 %obit = extractvalue {i32, i1} %t, 1
821 %ret = select i1 %obit, i32 %v1, i32 %v2
825 define i1 @umulo.not.i32(i32 signext %v1, i32 signext %v2) {
826 ; RV64-LABEL: umulo.not.i32:
827 ; RV64: # %bb.0: # %entry
828 ; RV64-NEXT: slli a1, a1, 32
829 ; RV64-NEXT: slli a0, a0, 32
830 ; RV64-NEXT: mulhu a0, a0, a1
831 ; RV64-NEXT: srai a0, a0, 32
832 ; RV64-NEXT: snez a0, a0
833 ; RV64-NEXT: xori a0, a0, 1
836 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
837 %obit = extractvalue {i32, i1} %t, 1
838 %ret = xor i1 %obit, true
842 define i64 @umulo.select.i64(i64 %v1, i64 %v2) {
843 ; RV64-LABEL: umulo.select.i64:
844 ; RV64: # %bb.0: # %entry
845 ; RV64-NEXT: mulhu a2, a0, a1
846 ; RV64-NEXT: bnez a2, .LBB50_2
847 ; RV64-NEXT: # %bb.1: # %entry
848 ; RV64-NEXT: mv a0, a1
849 ; RV64-NEXT: .LBB50_2: # %entry
852 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
853 %obit = extractvalue {i64, i1} %t, 1
854 %ret = select i1 %obit, i64 %v1, i64 %v2
858 define i1 @umulo.not.i64(i64 %v1, i64 %v2) {
859 ; RV64-LABEL: umulo.not.i64:
860 ; RV64: # %bb.0: # %entry
861 ; RV64-NEXT: mulhu a0, a0, a1
862 ; RV64-NEXT: snez a0, a0
863 ; RV64-NEXT: xori a0, a0, 1
866 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
867 %obit = extractvalue {i64, i1} %t, 1
868 %ret = xor i1 %obit, true
874 ; Check the use of the overflow bit in combination with a branch instruction.
876 define zeroext i1 @saddo.br.i32(i32 signext %v1, i32 signext %v2) {
877 ; RV64-LABEL: saddo.br.i32:
878 ; RV64: # %bb.0: # %entry
879 ; RV64-NEXT: addw a2, a0, a1
880 ; RV64-NEXT: slt a0, a2, a0
881 ; RV64-NEXT: slti a1, a1, 0
882 ; RV64-NEXT: beq a1, a0, .LBB52_2
883 ; RV64-NEXT: # %bb.1: # %overflow
884 ; RV64-NEXT: li a0, 0
886 ; RV64-NEXT: .LBB52_2: # %continue
887 ; RV64-NEXT: li a0, 1
890 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
891 %val = extractvalue {i32, i1} %t, 0
892 %obit = extractvalue {i32, i1} %t, 1
893 br i1 %obit, label %overflow, label %continue
902 define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
903 ; RV64-LABEL: saddo.br.i64:
904 ; RV64: # %bb.0: # %entry
905 ; RV64-NEXT: add a2, a0, a1
906 ; RV64-NEXT: slt a0, a2, a0
907 ; RV64-NEXT: slti a1, a1, 0
908 ; RV64-NEXT: beq a1, a0, .LBB53_2
909 ; RV64-NEXT: # %bb.1: # %overflow
910 ; RV64-NEXT: li a0, 0
912 ; RV64-NEXT: .LBB53_2: # %continue
913 ; RV64-NEXT: li a0, 1
916 %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
917 %val = extractvalue {i64, i1} %t, 0
918 %obit = extractvalue {i64, i1} %t, 1
919 br i1 %obit, label %overflow, label %continue
928 define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
929 ; RV64-LABEL: uaddo.br.i32:
930 ; RV64: # %bb.0: # %entry
931 ; RV64-NEXT: addw a1, a0, a1
932 ; RV64-NEXT: sext.w a0, a0
933 ; RV64-NEXT: bgeu a1, a0, .LBB54_2
934 ; RV64-NEXT: # %bb.1: # %overflow
935 ; RV64-NEXT: li a0, 0
937 ; RV64-NEXT: .LBB54_2: # %continue
938 ; RV64-NEXT: li a0, 1
941 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
942 %val = extractvalue {i32, i1} %t, 0
943 %obit = extractvalue {i32, i1} %t, 1
944 br i1 %obit, label %overflow, label %continue
953 define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
954 ; RV64-LABEL: uaddo.br.i64:
955 ; RV64: # %bb.0: # %entry
956 ; RV64-NEXT: add a1, a0, a1
957 ; RV64-NEXT: bgeu a1, a0, .LBB55_2
958 ; RV64-NEXT: # %bb.1: # %overflow
959 ; RV64-NEXT: li a0, 0
961 ; RV64-NEXT: .LBB55_2: # %continue
962 ; RV64-NEXT: li a0, 1
965 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
966 %val = extractvalue {i64, i1} %t, 0
967 %obit = extractvalue {i64, i1} %t, 1
968 br i1 %obit, label %overflow, label %continue
977 define zeroext i1 @ssubo.br.i32(i32 signext %v1, i32 signext %v2) {
978 ; RV64-LABEL: ssubo.br.i32:
979 ; RV64: # %bb.0: # %entry
980 ; RV64-NEXT: sgtz a2, a1
981 ; RV64-NEXT: subw a1, a0, a1
982 ; RV64-NEXT: slt a0, a1, a0
983 ; RV64-NEXT: beq a2, a0, .LBB56_2
984 ; RV64-NEXT: # %bb.1: # %overflow
985 ; RV64-NEXT: li a0, 0
987 ; RV64-NEXT: .LBB56_2: # %continue
988 ; RV64-NEXT: li a0, 1
991 %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
992 %val = extractvalue {i32, i1} %t, 0
993 %obit = extractvalue {i32, i1} %t, 1
994 br i1 %obit, label %overflow, label %continue
1003 define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
1004 ; RV64-LABEL: ssubo.br.i64:
1005 ; RV64: # %bb.0: # %entry
1006 ; RV64-NEXT: sgtz a2, a1
1007 ; RV64-NEXT: sub a1, a0, a1
1008 ; RV64-NEXT: slt a0, a1, a0
1009 ; RV64-NEXT: beq a2, a0, .LBB57_2
1010 ; RV64-NEXT: # %bb.1: # %overflow
1011 ; RV64-NEXT: li a0, 0
1013 ; RV64-NEXT: .LBB57_2: # %continue
1014 ; RV64-NEXT: li a0, 1
1017 %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
1018 %val = extractvalue {i64, i1} %t, 0
1019 %obit = extractvalue {i64, i1} %t, 1
1020 br i1 %obit, label %overflow, label %continue
1029 define zeroext i1 @usubo.br.i32(i32 signext %v1, i32 signext %v2) {
1030 ; RV64-LABEL: usubo.br.i32:
1031 ; RV64: # %bb.0: # %entry
1032 ; RV64-NEXT: subw a1, a0, a1
1033 ; RV64-NEXT: bgeu a0, a1, .LBB58_2
1034 ; RV64-NEXT: # %bb.1: # %overflow
1035 ; RV64-NEXT: li a0, 0
1037 ; RV64-NEXT: .LBB58_2: # %continue
1038 ; RV64-NEXT: li a0, 1
1041 %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
1042 %val = extractvalue {i32, i1} %t, 0
1043 %obit = extractvalue {i32, i1} %t, 1
1044 br i1 %obit, label %overflow, label %continue
1053 define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
1054 ; RV64-LABEL: usubo.br.i64:
1055 ; RV64: # %bb.0: # %entry
1056 ; RV64-NEXT: sub a1, a0, a1
1057 ; RV64-NEXT: bgeu a0, a1, .LBB59_2
1058 ; RV64-NEXT: # %bb.1: # %overflow
1059 ; RV64-NEXT: li a0, 0
1061 ; RV64-NEXT: .LBB59_2: # %continue
1062 ; RV64-NEXT: li a0, 1
1065 %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
1066 %val = extractvalue {i64, i1} %t, 0
1067 %obit = extractvalue {i64, i1} %t, 1
1068 br i1 %obit, label %overflow, label %continue
1077 define zeroext i1 @smulo.br.i32(i32 signext %v1, i32 signext %v2) {
1078 ; RV64-LABEL: smulo.br.i32:
1079 ; RV64: # %bb.0: # %entry
1080 ; RV64-NEXT: mul a0, a0, a1
1081 ; RV64-NEXT: srai a1, a0, 32
1082 ; RV64-NEXT: sraiw a0, a0, 31
1083 ; RV64-NEXT: beq a1, a0, .LBB60_2
1084 ; RV64-NEXT: # %bb.1: # %overflow
1085 ; RV64-NEXT: li a0, 0
1087 ; RV64-NEXT: .LBB60_2: # %continue
1088 ; RV64-NEXT: li a0, 1
1091 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
1092 %val = extractvalue {i32, i1} %t, 0
1093 %obit = extractvalue {i32, i1} %t, 1
1094 br i1 %obit, label %overflow, label %continue
1103 define zeroext i1 @smulo.br.i64(i64 %v1, i64 %v2) {
1104 ; RV64-LABEL: smulo.br.i64:
1105 ; RV64: # %bb.0: # %entry
1106 ; RV64-NEXT: mulh a2, a0, a1
1107 ; RV64-NEXT: mul a0, a0, a1
1108 ; RV64-NEXT: srai a0, a0, 63
1109 ; RV64-NEXT: beq a2, a0, .LBB61_2
1110 ; RV64-NEXT: # %bb.1: # %overflow
1111 ; RV64-NEXT: li a0, 0
1113 ; RV64-NEXT: .LBB61_2: # %continue
1114 ; RV64-NEXT: li a0, 1
1117 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
1118 %val = extractvalue {i64, i1} %t, 0
1119 %obit = extractvalue {i64, i1} %t, 1
1120 br i1 %obit, label %overflow, label %continue
1129 define zeroext i1 @smulo2.br.i64(i64 %v1) {
1130 ; RV64-LABEL: smulo2.br.i64:
1131 ; RV64: # %bb.0: # %entry
1132 ; RV64-NEXT: li a1, -13
1133 ; RV64-NEXT: mulh a2, a0, a1
1134 ; RV64-NEXT: mul a0, a0, a1
1135 ; RV64-NEXT: srai a0, a0, 63
1136 ; RV64-NEXT: beq a2, a0, .LBB62_2
1137 ; RV64-NEXT: # %bb.1: # %overflow
1138 ; RV64-NEXT: li a0, 0
1140 ; RV64-NEXT: .LBB62_2: # %continue
1141 ; RV64-NEXT: li a0, 1
1144 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 -13)
1145 %val = extractvalue {i64, i1} %t, 0
1146 %obit = extractvalue {i64, i1} %t, 1
1147 br i1 %obit, label %overflow, label %continue
1156 define zeroext i1 @umulo.br.i32(i32 signext %v1, i32 signext %v2) {
1157 ; RV64-LABEL: umulo.br.i32:
1158 ; RV64: # %bb.0: # %entry
1159 ; RV64-NEXT: slli a1, a1, 32
1160 ; RV64-NEXT: slli a0, a0, 32
1161 ; RV64-NEXT: mulhu a0, a0, a1
1162 ; RV64-NEXT: srai a0, a0, 32
1163 ; RV64-NEXT: beqz a0, .LBB63_2
1164 ; RV64-NEXT: # %bb.1: # %overflow
1165 ; RV64-NEXT: li a0, 0
1167 ; RV64-NEXT: .LBB63_2: # %continue
1168 ; RV64-NEXT: li a0, 1
1171 %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
1172 %val = extractvalue {i32, i1} %t, 0
1173 %obit = extractvalue {i32, i1} %t, 1
1174 br i1 %obit, label %overflow, label %continue
1183 define zeroext i1 @umulo.br.i64(i64 %v1, i64 %v2) {
1184 ; RV64-LABEL: umulo.br.i64:
1185 ; RV64: # %bb.0: # %entry
1186 ; RV64-NEXT: mulhu a0, a0, a1
1187 ; RV64-NEXT: beqz a0, .LBB64_2
1188 ; RV64-NEXT: # %bb.1: # %overflow
1189 ; RV64-NEXT: li a0, 0
1191 ; RV64-NEXT: .LBB64_2: # %continue
1192 ; RV64-NEXT: li a0, 1
1195 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
1196 %val = extractvalue {i64, i1} %t, 0
1197 %obit = extractvalue {i64, i1} %t, 1
1198 br i1 %obit, label %overflow, label %continue
1207 define zeroext i1 @umulo2.br.i64(i64 %v1) {
1208 ; RV64-LABEL: umulo2.br.i64:
1209 ; RV64: # %bb.0: # %entry
1210 ; RV64-NEXT: add a1, a0, a0
1211 ; RV64-NEXT: bgeu a1, a0, .LBB65_2
1212 ; RV64-NEXT: # %bb.1: # %overflow
1213 ; RV64-NEXT: li a0, 0
1215 ; RV64-NEXT: .LBB65_2: # %continue
1216 ; RV64-NEXT: li a0, 1
1219 %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)
1220 %val = extractvalue {i64, i1} %t, 0
1221 %obit = extractvalue {i64, i1} %t, 1
1222 br i1 %obit, label %overflow, label %continue
1231 define zeroext i1 @uaddo.i64.constant(i64 %v1, ptr %res) {
1232 ; RV64-LABEL: uaddo.i64.constant:
1233 ; RV64: # %bb.0: # %entry
1234 ; RV64-NEXT: addi a2, a0, 2
1235 ; RV64-NEXT: sltu a0, a2, a0
1236 ; RV64-NEXT: sd a2, 0(a1)
1239 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2)
1240 %val = extractvalue {i64, i1} %t, 0
1241 %obit = extractvalue {i64, i1} %t, 1
1242 store i64 %val, ptr %res
1246 define zeroext i1 @uaddo.i64.constant_2048(i64 %v1, ptr %res) {
1247 ; RV64-LABEL: uaddo.i64.constant_2048:
1248 ; RV64: # %bb.0: # %entry
1249 ; RV64-NEXT: addi a2, a0, 2047
1250 ; RV64-NEXT: addi a2, a2, 1
1251 ; RV64-NEXT: sltu a0, a2, a0
1252 ; RV64-NEXT: sd a2, 0(a1)
1255 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2048)
1256 %val = extractvalue {i64, i1} %t, 0
1257 %obit = extractvalue {i64, i1} %t, 1
1258 store i64 %val, ptr %res
1262 define zeroext i1 @uaddo.i64.constant_2049(i64 %v1, ptr %res) {
1263 ; RV64-LABEL: uaddo.i64.constant_2049:
1264 ; RV64: # %bb.0: # %entry
1265 ; RV64-NEXT: addi a2, a0, 2047
1266 ; RV64-NEXT: addi a2, a2, 2
1267 ; RV64-NEXT: sltu a0, a2, a0
1268 ; RV64-NEXT: sd a2, 0(a1)
1271 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2049)
1272 %val = extractvalue {i64, i1} %t, 0
1273 %obit = extractvalue {i64, i1} %t, 1
1274 store i64 %val, ptr %res
1278 define i64 @uaddo.i64.constant_setcc_on_overflow_flag(ptr %p) {
1279 ; RV64-LABEL: uaddo.i64.constant_setcc_on_overflow_flag:
1280 ; RV64: # %bb.0: # %entry
1281 ; RV64-NEXT: ld a1, 0(a0)
1282 ; RV64-NEXT: addi a0, a1, 2
1283 ; RV64-NEXT: bltu a0, a1, .LBB69_2
1284 ; RV64-NEXT: # %bb.1: # %IfOverflow
1285 ; RV64-NEXT: li a0, 0
1286 ; RV64-NEXT: .LBB69_2: # %IfNoOverflow
1289 %v1 = load i64, ptr %p
1290 %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 2)
1291 %val = extractvalue {i64, i1} %t, 0
1292 %obit = extractvalue {i64, i1} %t, 1
1293 br i1 %obit, label %IfNoOverflow, label %IfOverflow
1300 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
1301 declare {i64, i1} @llvm.sadd.with.overflow.i64(i64, i64) nounwind readnone
1302 declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
1303 declare {i64, i1} @llvm.uadd.with.overflow.i64(i64, i64) nounwind readnone
1304 declare {i32, i1} @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
1305 declare {i64, i1} @llvm.ssub.with.overflow.i64(i64, i64) nounwind readnone
1306 declare {i32, i1} @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
1307 declare {i64, i1} @llvm.usub.with.overflow.i64(i64, i64) nounwind readnone
1308 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
1309 declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
1310 declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
1311 declare {i64, i1} @llvm.umul.with.overflow.i64(i64, i64) nounwind readnone