1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
4 declare { i128, i1 } @llvm.uadd.with.overflow.i128(i128, i128)
5 declare i128 @llvm.uadd.sat.i128(i128, i128)
7 declare { i128, i1 } @llvm.usub.with.overflow.i128(i128, i128)
8 declare i128 @llvm.usub.sat.i128(i128, i128)
10 declare { i128, i1 } @llvm.umul.with.overflow.i128(i128, i128)
11 declare i128 @llvm.umul.sat.i128(i128, i128)
13 declare { i128, i1 } @llvm.sadd.with.overflow.i128(i128, i128)
14 declare i128 @llvm.sadd.sat.i128(i128, i128)
16 declare { i128, i1 } @llvm.ssub.with.overflow.i128(i128, i128)
17 declare i128 @llvm.ssub.sat.i128(i128, i128)
19 declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128)
20 declare i128 @llvm.smul.sat.i128(i128, i128)
22 define i128 @u128_add(i128 %x, i128 %y) {
23 ; CHECK-LABEL: u128_add:
25 ; CHECK-NEXT: adds x0, x0, x2
26 ; CHECK-NEXT: adc x1, x1, x3
32 define { i128, i8 } @u128_checked_add(i128 %x, i128 %y) {
33 ; CHECK-LABEL: u128_checked_add:
35 ; CHECK-NEXT: adds x0, x0, x2
36 ; CHECK-NEXT: adcs x1, x1, x3
37 ; CHECK-NEXT: cset w8, hs
38 ; CHECK-NEXT: eor w2, w8, #0x1
40 %1 = tail call { i128, i1 } @llvm.uadd.with.overflow.i128(i128 %x, i128 %y)
41 %2 = extractvalue { i128, i1 } %1, 0
42 %3 = extractvalue { i128, i1 } %1, 1
45 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
46 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
50 define { i128, i8 } @u128_overflowing_add(i128 %x, i128 %y) {
51 ; CHECK-LABEL: u128_overflowing_add:
53 ; CHECK-NEXT: adds x0, x0, x2
54 ; CHECK-NEXT: adcs x1, x1, x3
55 ; CHECK-NEXT: cset w2, hs
57 %1 = tail call { i128, i1 } @llvm.uadd.with.overflow.i128(i128 %x, i128 %y)
58 %2 = extractvalue { i128, i1 } %1, 0
59 %3 = extractvalue { i128, i1 } %1, 1
61 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
62 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
66 define i128 @u128_saturating_add(i128 %x, i128 %y) {
67 ; CHECK-LABEL: u128_saturating_add:
69 ; CHECK-NEXT: adds x8, x0, x2
70 ; CHECK-NEXT: adcs x9, x1, x3
71 ; CHECK-NEXT: csinv x0, x8, xzr, lo
72 ; CHECK-NEXT: csinv x1, x9, xzr, lo
74 %1 = tail call i128 @llvm.uadd.sat.i128(i128 %x, i128 %y)
78 define i128 @u128_sub(i128 %x, i128 %y) {
79 ; CHECK-LABEL: u128_sub:
81 ; CHECK-NEXT: subs x0, x0, x2
82 ; CHECK-NEXT: sbc x1, x1, x3
88 define { i128, i8 } @u128_checked_sub(i128 %x, i128 %y) {
89 ; CHECK-LABEL: u128_checked_sub:
91 ; CHECK-NEXT: subs x0, x0, x2
92 ; CHECK-NEXT: sbcs x1, x1, x3
93 ; CHECK-NEXT: cset w8, lo
94 ; CHECK-NEXT: eor w2, w8, #0x1
96 %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y)
97 %2 = extractvalue { i128, i1 } %1, 0
98 %3 = extractvalue { i128, i1 } %1, 1
100 %5 = zext i1 %4 to i8
101 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
102 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
106 define { i128, i8 } @u128_overflowing_sub(i128 %x, i128 %y) {
107 ; CHECK-LABEL: u128_overflowing_sub:
109 ; CHECK-NEXT: subs x0, x0, x2
110 ; CHECK-NEXT: sbcs x1, x1, x3
111 ; CHECK-NEXT: cset w2, lo
113 %1 = tail call { i128, i1 } @llvm.usub.with.overflow.i128(i128 %x, i128 %y)
114 %2 = extractvalue { i128, i1 } %1, 0
115 %3 = extractvalue { i128, i1 } %1, 1
116 %4 = zext i1 %3 to i8
117 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
118 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
122 define i128 @u128_saturating_sub(i128 %x, i128 %y) {
123 ; CHECK-LABEL: u128_saturating_sub:
125 ; CHECK-NEXT: subs x8, x0, x2
126 ; CHECK-NEXT: sbcs x9, x1, x3
127 ; CHECK-NEXT: csel x0, xzr, x8, lo
128 ; CHECK-NEXT: csel x1, xzr, x9, lo
130 %1 = tail call i128 @llvm.usub.sat.i128(i128 %x, i128 %y)
134 define i128 @i128_add(i128 %x, i128 %y) {
135 ; CHECK-LABEL: i128_add:
137 ; CHECK-NEXT: adds x0, x0, x2
138 ; CHECK-NEXT: adc x1, x1, x3
144 define { i128, i8 } @i128_checked_add(i128 %x, i128 %y) {
145 ; CHECK-LABEL: i128_checked_add:
147 ; CHECK-NEXT: adds x0, x0, x2
148 ; CHECK-NEXT: adcs x1, x1, x3
149 ; CHECK-NEXT: cset w8, vs
150 ; CHECK-NEXT: eor w2, w8, #0x1
152 %1 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %x, i128 %y)
153 %2 = extractvalue { i128, i1 } %1, 0
154 %3 = extractvalue { i128, i1 } %1, 1
156 %5 = zext i1 %4 to i8
157 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
158 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
162 define { i128, i8 } @i128_overflowing_add(i128 %x, i128 %y) {
163 ; CHECK-LABEL: i128_overflowing_add:
165 ; CHECK-NEXT: adds x0, x0, x2
166 ; CHECK-NEXT: adcs x1, x1, x3
167 ; CHECK-NEXT: cset w2, vs
169 %1 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %x, i128 %y)
170 %2 = extractvalue { i128, i1 } %1, 0
171 %3 = extractvalue { i128, i1 } %1, 1
172 %4 = zext i1 %3 to i8
173 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
174 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
178 define i128 @i128_saturating_add(i128 %x, i128 %y) {
179 ; CHECK-LABEL: i128_saturating_add:
181 ; CHECK-NEXT: adds x8, x0, x2
182 ; CHECK-NEXT: adcs x9, x1, x3
183 ; CHECK-NEXT: asr x10, x9, #63
184 ; CHECK-NEXT: eor x11, x10, #0x8000000000000000
185 ; CHECK-NEXT: csel x0, x10, x8, vs
186 ; CHECK-NEXT: csel x1, x11, x9, vs
188 %1 = tail call i128 @llvm.sadd.sat.i128(i128 %x, i128 %y)
192 define i128 @i128_sub(i128 %x, i128 %y) {
193 ; CHECK-LABEL: i128_sub:
195 ; CHECK-NEXT: subs x0, x0, x2
196 ; CHECK-NEXT: sbc x1, x1, x3
202 define { i128, i8 } @i128_checked_sub(i128 %x, i128 %y) {
203 ; CHECK-LABEL: i128_checked_sub:
205 ; CHECK-NEXT: subs x0, x0, x2
206 ; CHECK-NEXT: sbcs x1, x1, x3
207 ; CHECK-NEXT: cset w8, vs
208 ; CHECK-NEXT: eor w2, w8, #0x1
210 %1 = tail call { i128, i1 } @llvm.ssub.with.overflow.i128(i128 %x, i128 %y)
211 %2 = extractvalue { i128, i1 } %1, 0
212 %3 = extractvalue { i128, i1 } %1, 1
214 %5 = zext i1 %4 to i8
215 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
216 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
220 define { i128, i8 } @i128_overflowing_sub(i128 %x, i128 %y) {
221 ; CHECK-LABEL: i128_overflowing_sub:
223 ; CHECK-NEXT: subs x0, x0, x2
224 ; CHECK-NEXT: sbcs x1, x1, x3
225 ; CHECK-NEXT: cset w2, vs
227 %1 = tail call { i128, i1 } @llvm.ssub.with.overflow.i128(i128 %x, i128 %y)
228 %2 = extractvalue { i128, i1 } %1, 0
229 %3 = extractvalue { i128, i1 } %1, 1
230 %4 = zext i1 %3 to i8
231 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
232 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
236 define i128 @i128_saturating_sub(i128 %x, i128 %y) {
237 ; CHECK-LABEL: i128_saturating_sub:
239 ; CHECK-NEXT: subs x8, x0, x2
240 ; CHECK-NEXT: sbcs x9, x1, x3
241 ; CHECK-NEXT: asr x10, x9, #63
242 ; CHECK-NEXT: eor x11, x10, #0x8000000000000000
243 ; CHECK-NEXT: csel x0, x10, x8, vs
244 ; CHECK-NEXT: csel x1, x11, x9, vs
246 %1 = tail call i128 @llvm.ssub.sat.i128(i128 %x, i128 %y)
250 define i128 @u128_mul(i128 %x, i128 %y) {
251 ; CHECK-LABEL: u128_mul:
253 ; CHECK-NEXT: umulh x8, x0, x2
254 ; CHECK-NEXT: madd x8, x0, x3, x8
255 ; CHECK-NEXT: mul x0, x0, x2
256 ; CHECK-NEXT: madd x1, x1, x2, x8
262 define { i128, i8 } @u128_checked_mul(i128 %x, i128 %y) {
263 ; CHECK-LABEL: u128_checked_mul:
265 ; CHECK-NEXT: mul x9, x3, x0
266 ; CHECK-NEXT: cmp x1, #0
267 ; CHECK-NEXT: ccmp x3, #0, #4, ne
268 ; CHECK-NEXT: umulh x8, x1, x2
269 ; CHECK-NEXT: umulh x10, x3, x0
270 ; CHECK-NEXT: madd x9, x1, x2, x9
271 ; CHECK-NEXT: ccmp xzr, x8, #0, eq
272 ; CHECK-NEXT: umulh x11, x0, x2
273 ; CHECK-NEXT: ccmp xzr, x10, #0, eq
274 ; CHECK-NEXT: mul x0, x0, x2
275 ; CHECK-NEXT: cset w8, ne
276 ; CHECK-NEXT: adds x1, x11, x9
277 ; CHECK-NEXT: csinc w8, w8, wzr, lo
278 ; CHECK-NEXT: eor w2, w8, #0x1
280 %1 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %x, i128 %y)
281 %2 = extractvalue { i128, i1 } %1, 0
282 %3 = extractvalue { i128, i1 } %1, 1
284 %5 = zext i1 %4 to i8
285 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
286 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
290 define { i128, i8 } @u128_overflowing_mul(i128 %x, i128 %y) {
291 ; CHECK-LABEL: u128_overflowing_mul:
293 ; CHECK-NEXT: mul x9, x3, x0
294 ; CHECK-NEXT: cmp x1, #0
295 ; CHECK-NEXT: ccmp x3, #0, #4, ne
296 ; CHECK-NEXT: umulh x8, x1, x2
297 ; CHECK-NEXT: umulh x10, x3, x0
298 ; CHECK-NEXT: madd x9, x1, x2, x9
299 ; CHECK-NEXT: ccmp xzr, x8, #0, eq
300 ; CHECK-NEXT: umulh x11, x0, x2
301 ; CHECK-NEXT: ccmp xzr, x10, #0, eq
302 ; CHECK-NEXT: mul x0, x0, x2
303 ; CHECK-NEXT: cset w8, ne
304 ; CHECK-NEXT: adds x1, x11, x9
305 ; CHECK-NEXT: csinc w2, w8, wzr, lo
307 %1 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %x, i128 %y)
308 %2 = extractvalue { i128, i1 } %1, 0
309 %3 = extractvalue { i128, i1 } %1, 1
310 %4 = zext i1 %3 to i8
311 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
312 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
316 define i128 @u128_saturating_mul(i128 %x, i128 %y) {
317 ; CHECK-LABEL: u128_saturating_mul:
319 ; CHECK-NEXT: mul x9, x3, x0
320 ; CHECK-NEXT: cmp x1, #0
321 ; CHECK-NEXT: ccmp x3, #0, #4, ne
322 ; CHECK-NEXT: umulh x8, x1, x2
323 ; CHECK-NEXT: umulh x10, x3, x0
324 ; CHECK-NEXT: madd x9, x1, x2, x9
325 ; CHECK-NEXT: ccmp xzr, x8, #0, eq
326 ; CHECK-NEXT: umulh x11, x0, x2
327 ; CHECK-NEXT: ccmp xzr, x10, #0, eq
328 ; CHECK-NEXT: mul x8, x0, x2
329 ; CHECK-NEXT: cset w10, ne
330 ; CHECK-NEXT: adds x9, x11, x9
331 ; CHECK-NEXT: csinc w10, w10, wzr, lo
332 ; CHECK-NEXT: cmp w10, #0
333 ; CHECK-NEXT: csinv x0, x8, xzr, eq
334 ; CHECK-NEXT: csinv x1, x9, xzr, eq
336 %1 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %x, i128 %y)
337 %2 = extractvalue { i128, i1 } %1, 0
338 %3 = extractvalue { i128, i1 } %1, 1
339 %4 = select i1 %3, i128 -1, i128 %2
343 define i128 @i128_mul(i128 %x, i128 %y) {
344 ; CHECK-LABEL: i128_mul:
346 ; CHECK-NEXT: umulh x8, x0, x2
347 ; CHECK-NEXT: madd x8, x0, x3, x8
348 ; CHECK-NEXT: mul x0, x0, x2
349 ; CHECK-NEXT: madd x1, x1, x2, x8
355 define { i128, i8 } @i128_checked_mul(i128 %x, i128 %y) {
356 ; CHECK-LABEL: i128_checked_mul:
358 ; CHECK-NEXT: asr x8, x1, #63
359 ; CHECK-NEXT: asr x11, x3, #63
360 ; CHECK-NEXT: umulh x13, x0, x2
361 ; CHECK-NEXT: mul x9, x2, x8
362 ; CHECK-NEXT: umulh x10, x2, x8
363 ; CHECK-NEXT: umulh x12, x11, x0
364 ; CHECK-NEXT: mul x14, x1, x2
365 ; CHECK-NEXT: add x10, x10, x9
366 ; CHECK-NEXT: madd x8, x3, x8, x10
367 ; CHECK-NEXT: madd x10, x11, x1, x12
368 ; CHECK-NEXT: mul x11, x11, x0
369 ; CHECK-NEXT: umulh x12, x1, x2
370 ; CHECK-NEXT: mul x15, x0, x3
371 ; CHECK-NEXT: add x10, x10, x11
372 ; CHECK-NEXT: adds x9, x11, x9
373 ; CHECK-NEXT: umulh x16, x0, x3
374 ; CHECK-NEXT: adc x10, x10, x8
375 ; CHECK-NEXT: adds x8, x14, x13
376 ; CHECK-NEXT: cinc x12, x12, hs
377 ; CHECK-NEXT: mul x11, x1, x3
378 ; CHECK-NEXT: adds x8, x15, x8
379 ; CHECK-NEXT: umulh x13, x1, x3
380 ; CHECK-NEXT: mov x1, x8
381 ; CHECK-NEXT: cinc x14, x16, hs
382 ; CHECK-NEXT: adds x12, x12, x14
383 ; CHECK-NEXT: mul x0, x0, x2
384 ; CHECK-NEXT: cset w14, hs
385 ; CHECK-NEXT: adds x11, x11, x12
386 ; CHECK-NEXT: asr x12, x8, #63
387 ; CHECK-NEXT: adc x13, x13, x14
388 ; CHECK-NEXT: adds x9, x11, x9
389 ; CHECK-NEXT: adc x10, x13, x10
390 ; CHECK-NEXT: cmp x9, x12
391 ; CHECK-NEXT: ccmp x10, x12, #0, eq
392 ; CHECK-NEXT: cset w2, eq
394 %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
395 %2 = extractvalue { i128, i1 } %1, 0
396 %3 = extractvalue { i128, i1 } %1, 1
398 %5 = zext i1 %4 to i8
399 %6 = insertvalue { i128, i8 } undef, i128 %2, 0
400 %7 = insertvalue { i128, i8 } %6, i8 %5, 1
404 define { i128, i8 } @i128_overflowing_mul(i128 %x, i128 %y) {
405 ; CHECK-LABEL: i128_overflowing_mul:
407 ; CHECK-NEXT: asr x8, x1, #63
408 ; CHECK-NEXT: asr x11, x3, #63
409 ; CHECK-NEXT: umulh x13, x0, x2
410 ; CHECK-NEXT: mul x9, x2, x8
411 ; CHECK-NEXT: umulh x10, x2, x8
412 ; CHECK-NEXT: umulh x12, x11, x0
413 ; CHECK-NEXT: mul x14, x1, x2
414 ; CHECK-NEXT: add x10, x10, x9
415 ; CHECK-NEXT: madd x8, x3, x8, x10
416 ; CHECK-NEXT: madd x10, x11, x1, x12
417 ; CHECK-NEXT: mul x11, x11, x0
418 ; CHECK-NEXT: umulh x12, x1, x2
419 ; CHECK-NEXT: mul x15, x0, x3
420 ; CHECK-NEXT: add x10, x10, x11
421 ; CHECK-NEXT: adds x9, x11, x9
422 ; CHECK-NEXT: umulh x16, x0, x3
423 ; CHECK-NEXT: adc x10, x10, x8
424 ; CHECK-NEXT: adds x8, x14, x13
425 ; CHECK-NEXT: cinc x12, x12, hs
426 ; CHECK-NEXT: mul x11, x1, x3
427 ; CHECK-NEXT: adds x8, x15, x8
428 ; CHECK-NEXT: umulh x13, x1, x3
429 ; CHECK-NEXT: mov x1, x8
430 ; CHECK-NEXT: cinc x14, x16, hs
431 ; CHECK-NEXT: adds x12, x12, x14
432 ; CHECK-NEXT: mul x0, x0, x2
433 ; CHECK-NEXT: cset w14, hs
434 ; CHECK-NEXT: adds x11, x11, x12
435 ; CHECK-NEXT: asr x12, x8, #63
436 ; CHECK-NEXT: adc x13, x13, x14
437 ; CHECK-NEXT: adds x9, x11, x9
438 ; CHECK-NEXT: adc x10, x13, x10
439 ; CHECK-NEXT: cmp x9, x12
440 ; CHECK-NEXT: ccmp x10, x12, #0, eq
441 ; CHECK-NEXT: cset w2, ne
443 %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
444 %2 = extractvalue { i128, i1 } %1, 0
445 %3 = extractvalue { i128, i1 } %1, 1
446 %4 = zext i1 %3 to i8
447 %5 = insertvalue { i128, i8 } undef, i128 %2, 0
448 %6 = insertvalue { i128, i8 } %5, i8 %4, 1
452 define i128 @i128_saturating_mul(i128 %x, i128 %y) {
453 ; CHECK-LABEL: i128_saturating_mul:
455 ; CHECK-NEXT: asr x8, x1, #63
456 ; CHECK-NEXT: asr x11, x3, #63
457 ; CHECK-NEXT: umulh x13, x0, x2
458 ; CHECK-NEXT: mul x9, x2, x8
459 ; CHECK-NEXT: umulh x10, x2, x8
460 ; CHECK-NEXT: umulh x12, x11, x0
461 ; CHECK-NEXT: mul x14, x1, x2
462 ; CHECK-NEXT: add x10, x10, x9
463 ; CHECK-NEXT: madd x8, x3, x8, x10
464 ; CHECK-NEXT: madd x10, x11, x1, x12
465 ; CHECK-NEXT: mul x11, x11, x0
466 ; CHECK-NEXT: umulh x12, x1, x2
467 ; CHECK-NEXT: mul x16, x0, x3
468 ; CHECK-NEXT: add x10, x10, x11
469 ; CHECK-NEXT: adds x9, x11, x9
470 ; CHECK-NEXT: umulh x15, x0, x3
471 ; CHECK-NEXT: adc x8, x10, x8
472 ; CHECK-NEXT: adds x10, x14, x13
473 ; CHECK-NEXT: cinc x12, x12, hs
474 ; CHECK-NEXT: mul x17, x1, x3
475 ; CHECK-NEXT: adds x10, x16, x10
476 ; CHECK-NEXT: umulh x11, x1, x3
477 ; CHECK-NEXT: cinc x13, x15, hs
478 ; CHECK-NEXT: adds x12, x12, x13
479 ; CHECK-NEXT: cset w13, hs
480 ; CHECK-NEXT: adds x12, x17, x12
481 ; CHECK-NEXT: adc x11, x11, x13
482 ; CHECK-NEXT: adds x9, x12, x9
483 ; CHECK-NEXT: asr x12, x10, #63
484 ; CHECK-NEXT: mul x13, x0, x2
485 ; CHECK-NEXT: adc x8, x11, x8
486 ; CHECK-NEXT: eor x11, x3, x1
487 ; CHECK-NEXT: eor x8, x8, x12
488 ; CHECK-NEXT: eor x9, x9, x12
489 ; CHECK-NEXT: asr x11, x11, #63
490 ; CHECK-NEXT: orr x8, x9, x8
491 ; CHECK-NEXT: eor x9, x11, #0x7fffffffffffffff
492 ; CHECK-NEXT: cmp x8, #0
493 ; CHECK-NEXT: csel x1, x9, x10, ne
494 ; CHECK-NEXT: csinv x0, x13, x11, eq
496 %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %x, i128 %y)
497 %2 = extractvalue { i128, i1 } %1, 0
498 %3 = extractvalue { i128, i1 } %1, 1
500 %5 = icmp sgt i128 %4, -1
501 %6 = select i1 %5, i128 170141183460469231731687303715884105727, i128 -170141183460469231731687303715884105728
502 %7 = select i1 %3, i128 %6, i128 %2
506 define { i128, i1 } @saddo_not_1(i128 %x) nounwind {
507 ; CHECK-LABEL: saddo_not_1:
509 ; CHECK-NEXT: negs x0, x0
510 ; CHECK-NEXT: ngcs x1, x1
511 ; CHECK-NEXT: cset w2, vs
513 %not = xor i128 %x, -1
514 %r = call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %not, i128 1)
518 define { i128, i1 } @saddo_carry_not_1(i128 %x) nounwind {
519 ; CHECK-LABEL: saddo_carry_not_1:
521 ; CHECK-NEXT: mov w8, #1 // =0x1
522 ; CHECK-NEXT: negs x0, x0
523 ; CHECK-NEXT: sbcs x1, x8, x1
524 ; CHECK-NEXT: cset w2, vs
526 %not = xor i128 %x, -1
527 %r = call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %not, i128 u0x10000000000000001)