1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mcpu=generic -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
3 ; RUN: llc < %s -mcpu=generic -mtriple=x86_64-linux | FileCheck %s --check-prefixes=X64,X64-LINUX
4 ; RUN: llc < %s -mcpu=generic -mtriple=x86_64-win32 | FileCheck %s --check-prefixes=X64,X64-WIN32
6 declare {i32, i1} @llvm.sadd.with.overflow.i32(i32, i32)
7 declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32)
9 ; The immediate can be encoded in a smaller way if the
10 ; instruction is a sub instead of an add.
11 define i32 @test1(i32 inreg %a) nounwind {
13 ; X32: # %bb.0: # %entry
14 ; X32-NEXT: subl $-128, %eax
17 ; X64-LINUX-LABEL: test1:
18 ; X64-LINUX: # %bb.0: # %entry
19 ; X64-LINUX-NEXT: movl %edi, %eax
20 ; X64-LINUX-NEXT: subl $-128, %eax
21 ; X64-LINUX-NEXT: retq
23 ; X64-WIN32-LABEL: test1:
24 ; X64-WIN32: # %bb.0: # %entry
25 ; X64-WIN32-NEXT: movl %ecx, %eax
26 ; X64-WIN32-NEXT: subl $-128, %eax
27 ; X64-WIN32-NEXT: retq
33 define i32 @test1b(i32* %p) nounwind {
35 ; X32: # %bb.0: # %entry
36 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
37 ; X32-NEXT: movl (%eax), %eax
38 ; X32-NEXT: subl $-128, %eax
41 ; X64-LINUX-LABEL: test1b:
42 ; X64-LINUX: # %bb.0: # %entry
43 ; X64-LINUX-NEXT: movl (%rdi), %eax
44 ; X64-LINUX-NEXT: subl $-128, %eax
45 ; X64-LINUX-NEXT: retq
47 ; X64-WIN32-LABEL: test1b:
48 ; X64-WIN32: # %bb.0: # %entry
49 ; X64-WIN32-NEXT: movl (%rcx), %eax
50 ; X64-WIN32-NEXT: subl $-128, %eax
51 ; X64-WIN32-NEXT: retq
53 %a = load i32, i32* %p
58 define i64 @test2(i64 inreg %a) nounwind {
60 ; X32: # %bb.0: # %entry
61 ; X32-NEXT: addl $-2147483648, %eax # imm = 0x80000000
62 ; X32-NEXT: adcl $0, %edx
65 ; X64-LINUX-LABEL: test2:
66 ; X64-LINUX: # %bb.0: # %entry
67 ; X64-LINUX-NEXT: movq %rdi, %rax
68 ; X64-LINUX-NEXT: subq $-2147483648, %rax # imm = 0x80000000
69 ; X64-LINUX-NEXT: retq
71 ; X64-WIN32-LABEL: test2:
72 ; X64-WIN32: # %bb.0: # %entry
73 ; X64-WIN32-NEXT: movq %rcx, %rax
74 ; X64-WIN32-NEXT: subq $-2147483648, %rax # imm = 0x80000000
75 ; X64-WIN32-NEXT: retq
77 %b = add i64 %a, 2147483648
80 define i64 @test3(i64 inreg %a) nounwind {
82 ; X32: # %bb.0: # %entry
83 ; X32-NEXT: addl $128, %eax
84 ; X32-NEXT: adcl $0, %edx
87 ; X64-LINUX-LABEL: test3:
88 ; X64-LINUX: # %bb.0: # %entry
89 ; X64-LINUX-NEXT: movq %rdi, %rax
90 ; X64-LINUX-NEXT: subq $-128, %rax
91 ; X64-LINUX-NEXT: retq
93 ; X64-WIN32-LABEL: test3:
94 ; X64-WIN32: # %bb.0: # %entry
95 ; X64-WIN32-NEXT: movq %rcx, %rax
96 ; X64-WIN32-NEXT: subq $-128, %rax
97 ; X64-WIN32-NEXT: retq
103 define i64 @test3b(i64* %p) nounwind {
105 ; X32: # %bb.0: # %entry
106 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
107 ; X32-NEXT: movl 4(%ecx), %edx
108 ; X32-NEXT: movl $128, %eax
109 ; X32-NEXT: addl (%ecx), %eax
110 ; X32-NEXT: adcl $0, %edx
113 ; X64-LINUX-LABEL: test3b:
114 ; X64-LINUX: # %bb.0: # %entry
115 ; X64-LINUX-NEXT: movq (%rdi), %rax
116 ; X64-LINUX-NEXT: subq $-128, %rax
117 ; X64-LINUX-NEXT: retq
119 ; X64-WIN32-LABEL: test3b:
120 ; X64-WIN32: # %bb.0: # %entry
121 ; X64-WIN32-NEXT: movq (%rcx), %rax
122 ; X64-WIN32-NEXT: subq $-128, %rax
123 ; X64-WIN32-NEXT: retq
125 %a = load i64, i64* %p
130 define i1 @test4(i32 %v1, i32 %v2, i32* %X) nounwind {
132 ; X32: # %bb.0: # %entry
133 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
134 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
135 ; X32-NEXT: jo .LBB5_2
136 ; X32-NEXT: # %bb.1: # %normal
137 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
138 ; X32-NEXT: movl $0, (%eax)
139 ; X32-NEXT: .LBB5_2: # %overflow
140 ; X32-NEXT: xorl %eax, %eax
143 ; X64-LINUX-LABEL: test4:
144 ; X64-LINUX: # %bb.0: # %entry
145 ; X64-LINUX-NEXT: addl %esi, %edi
146 ; X64-LINUX-NEXT: jo .LBB5_2
147 ; X64-LINUX-NEXT: # %bb.1: # %normal
148 ; X64-LINUX-NEXT: movl $0, (%rdx)
149 ; X64-LINUX-NEXT: .LBB5_2: # %overflow
150 ; X64-LINUX-NEXT: xorl %eax, %eax
151 ; X64-LINUX-NEXT: retq
153 ; X64-WIN32-LABEL: test4:
154 ; X64-WIN32: # %bb.0: # %entry
155 ; X64-WIN32-NEXT: addl %edx, %ecx
156 ; X64-WIN32-NEXT: jo .LBB5_2
157 ; X64-WIN32-NEXT: # %bb.1: # %normal
158 ; X64-WIN32-NEXT: movl $0, (%r8)
159 ; X64-WIN32-NEXT: .LBB5_2: # %overflow
160 ; X64-WIN32-NEXT: xorl %eax, %eax
161 ; X64-WIN32-NEXT: retq
163 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
164 %sum = extractvalue {i32, i1} %t, 0
165 %obit = extractvalue {i32, i1} %t, 1
166 br i1 %obit, label %overflow, label %normal
176 define i1 @test5(i32 %v1, i32 %v2, i32* %X) nounwind {
178 ; X32: # %bb.0: # %entry
179 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
180 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
181 ; X32-NEXT: jb .LBB6_2
182 ; X32-NEXT: # %bb.1: # %normal
183 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
184 ; X32-NEXT: movl $0, (%eax)
185 ; X32-NEXT: .LBB6_2: # %carry
186 ; X32-NEXT: xorl %eax, %eax
189 ; X64-LINUX-LABEL: test5:
190 ; X64-LINUX: # %bb.0: # %entry
191 ; X64-LINUX-NEXT: addl %esi, %edi
192 ; X64-LINUX-NEXT: jb .LBB6_2
193 ; X64-LINUX-NEXT: # %bb.1: # %normal
194 ; X64-LINUX-NEXT: movl $0, (%rdx)
195 ; X64-LINUX-NEXT: .LBB6_2: # %carry
196 ; X64-LINUX-NEXT: xorl %eax, %eax
197 ; X64-LINUX-NEXT: retq
199 ; X64-WIN32-LABEL: test5:
200 ; X64-WIN32: # %bb.0: # %entry
201 ; X64-WIN32-NEXT: addl %edx, %ecx
202 ; X64-WIN32-NEXT: jb .LBB6_2
203 ; X64-WIN32-NEXT: # %bb.1: # %normal
204 ; X64-WIN32-NEXT: movl $0, (%r8)
205 ; X64-WIN32-NEXT: .LBB6_2: # %carry
206 ; X64-WIN32-NEXT: xorl %eax, %eax
207 ; X64-WIN32-NEXT: retq
209 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
210 %sum = extractvalue {i32, i1} %t, 0
211 %obit = extractvalue {i32, i1} %t, 1
212 br i1 %obit, label %carry, label %normal
222 define i64 @test6(i64 %A, i32 %B) nounwind {
224 ; X32: # %bb.0: # %entry
225 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
226 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
227 ; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
230 ; X64-LINUX-LABEL: test6:
231 ; X64-LINUX: # %bb.0: # %entry
232 ; X64-LINUX-NEXT: # kill: def $esi killed $esi def $rsi
233 ; X64-LINUX-NEXT: shlq $32, %rsi
234 ; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
235 ; X64-LINUX-NEXT: retq
237 ; X64-WIN32-LABEL: test6:
238 ; X64-WIN32: # %bb.0: # %entry
239 ; X64-WIN32-NEXT: # kill: def $edx killed $edx def $rdx
240 ; X64-WIN32-NEXT: shlq $32, %rdx
241 ; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
242 ; X64-WIN32-NEXT: retq
244 %tmp12 = zext i32 %B to i64
245 %tmp3 = shl i64 %tmp12, 32
246 %tmp5 = add i64 %tmp3, %A
250 define {i32, i1} @test7(i32 %v1, i32 %v2) nounwind {
252 ; X32: # %bb.0: # %entry
253 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
254 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
258 ; X64-LINUX-LABEL: test7:
259 ; X64-LINUX: # %bb.0: # %entry
260 ; X64-LINUX-NEXT: movl %edi, %eax
261 ; X64-LINUX-NEXT: addl %esi, %eax
262 ; X64-LINUX-NEXT: setb %dl
263 ; X64-LINUX-NEXT: retq
265 ; X64-WIN32-LABEL: test7:
266 ; X64-WIN32: # %bb.0: # %entry
267 ; X64-WIN32-NEXT: movl %ecx, %eax
268 ; X64-WIN32-NEXT: addl %edx, %eax
269 ; X64-WIN32-NEXT: setb %dl
270 ; X64-WIN32-NEXT: retq
272 %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
277 define {i64, i1} @test8(i64 %left, i64 %right) nounwind {
279 ; X32: # %bb.0: # %entry
280 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
281 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
282 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
283 ; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
287 ; X64-LINUX-LABEL: test8:
288 ; X64-LINUX: # %bb.0: # %entry
289 ; X64-LINUX-NEXT: movq %rdi, %rax
290 ; X64-LINUX-NEXT: addq %rsi, %rax
291 ; X64-LINUX-NEXT: setb %dl
292 ; X64-LINUX-NEXT: retq
294 ; X64-WIN32-LABEL: test8:
295 ; X64-WIN32: # %bb.0: # %entry
296 ; X64-WIN32-NEXT: movq %rcx, %rax
297 ; X64-WIN32-NEXT: addq %rdx, %rax
298 ; X64-WIN32-NEXT: setb %dl
299 ; X64-WIN32-NEXT: retq
301 %extleft = zext i64 %left to i65
302 %extright = zext i64 %right to i65
303 %sum = add i65 %extleft, %extright
304 %res.0 = trunc i65 %sum to i64
305 %overflow = and i65 %sum, -18446744073709551616
306 %res.1 = icmp ne i65 %overflow, 0
307 %final0 = insertvalue {i64, i1} undef, i64 %res.0, 0
308 %final1 = insertvalue {i64, i1} %final0, i1 %res.1, 1
309 ret {i64, i1} %final1
312 define i32 @test9(i32 %x, i32 %y) nounwind readnone {
314 ; X32: # %bb.0: # %entry
315 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
316 ; X32-NEXT: xorl %ecx, %ecx
317 ; X32-NEXT: cmpl $10, {{[0-9]+}}(%esp)
319 ; X32-NEXT: subl %ecx, %eax
322 ; X64-LINUX-LABEL: test9:
323 ; X64-LINUX: # %bb.0: # %entry
324 ; X64-LINUX-NEXT: movl %esi, %eax
325 ; X64-LINUX-NEXT: xorl %ecx, %ecx
326 ; X64-LINUX-NEXT: cmpl $10, %edi
327 ; X64-LINUX-NEXT: sete %cl
328 ; X64-LINUX-NEXT: subl %ecx, %eax
329 ; X64-LINUX-NEXT: retq
331 ; X64-WIN32-LABEL: test9:
332 ; X64-WIN32: # %bb.0: # %entry
333 ; X64-WIN32-NEXT: movl %edx, %eax
334 ; X64-WIN32-NEXT: xorl %edx, %edx
335 ; X64-WIN32-NEXT: cmpl $10, %ecx
336 ; X64-WIN32-NEXT: sete %dl
337 ; X64-WIN32-NEXT: subl %edx, %eax
338 ; X64-WIN32-NEXT: retq
340 %cmp = icmp eq i32 %x, 10
341 %sub = sext i1 %cmp to i32
342 %cond = add i32 %sub, %y
346 define i1 @test10(i32 %x) nounwind {
348 ; X32: # %bb.0: # %entry
349 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
350 ; X32-NEXT: incl %eax
354 ; X64-LINUX-LABEL: test10:
355 ; X64-LINUX: # %bb.0: # %entry
356 ; X64-LINUX-NEXT: incl %edi
357 ; X64-LINUX-NEXT: seto %al
358 ; X64-LINUX-NEXT: retq
360 ; X64-WIN32-LABEL: test10:
361 ; X64-WIN32: # %bb.0: # %entry
362 ; X64-WIN32-NEXT: incl %ecx
363 ; X64-WIN32-NEXT: seto %al
364 ; X64-WIN32-NEXT: retq
366 %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %x, i32 1)
367 %obit = extractvalue {i32, i1} %t, 1
371 define void @test11(i32* inreg %a) nounwind {
373 ; X32: # %bb.0: # %entry
374 ; X32-NEXT: subl $-128, (%eax)
377 ; X64-LINUX-LABEL: test11:
378 ; X64-LINUX: # %bb.0: # %entry
379 ; X64-LINUX-NEXT: subl $-128, (%rdi)
380 ; X64-LINUX-NEXT: retq
382 ; X64-WIN32-LABEL: test11:
383 ; X64-WIN32: # %bb.0: # %entry
384 ; X64-WIN32-NEXT: subl $-128, (%rcx)
385 ; X64-WIN32-NEXT: retq
387 %aa = load i32, i32* %a
388 %b = add i32 %aa, 128
389 store i32 %b, i32* %a
393 define void @test12(i64* inreg %a) nounwind {
395 ; X32: # %bb.0: # %entry
396 ; X32-NEXT: addl $-2147483648, (%eax) # imm = 0x80000000
397 ; X32-NEXT: adcl $0, 4(%eax)
400 ; X64-LINUX-LABEL: test12:
401 ; X64-LINUX: # %bb.0: # %entry
402 ; X64-LINUX-NEXT: subq $-2147483648, (%rdi) # imm = 0x80000000
403 ; X64-LINUX-NEXT: retq
405 ; X64-WIN32-LABEL: test12:
406 ; X64-WIN32: # %bb.0: # %entry
407 ; X64-WIN32-NEXT: subq $-2147483648, (%rcx) # imm = 0x80000000
408 ; X64-WIN32-NEXT: retq
410 %aa = load i64, i64* %a
411 %b = add i64 %aa, 2147483648
412 store i64 %b, i64* %a
416 define void @test13(i64* inreg %a) nounwind {
418 ; X32: # %bb.0: # %entry
419 ; X32-NEXT: addl $128, (%eax)
420 ; X32-NEXT: adcl $0, 4(%eax)
423 ; X64-LINUX-LABEL: test13:
424 ; X64-LINUX: # %bb.0: # %entry
425 ; X64-LINUX-NEXT: subq $-128, (%rdi)
426 ; X64-LINUX-NEXT: retq
428 ; X64-WIN32-LABEL: test13:
429 ; X64-WIN32: # %bb.0: # %entry
430 ; X64-WIN32-NEXT: subq $-128, (%rcx)
431 ; X64-WIN32-NEXT: retq
433 %aa = load i64, i64* %a
434 %b = add i64 %aa, 128
435 store i64 %b, i64* %a
439 define i32 @inc_not(i32 %a) {
440 ; X32-LABEL: inc_not:
442 ; X32-NEXT: xorl %eax, %eax
443 ; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
446 ; X64-LINUX-LABEL: inc_not:
447 ; X64-LINUX: # %bb.0:
448 ; X64-LINUX-NEXT: movl %edi, %eax
449 ; X64-LINUX-NEXT: negl %eax
450 ; X64-LINUX-NEXT: retq
452 ; X64-WIN32-LABEL: inc_not:
453 ; X64-WIN32: # %bb.0:
454 ; X64-WIN32-NEXT: movl %ecx, %eax
455 ; X64-WIN32-NEXT: negl %eax
456 ; X64-WIN32-NEXT: retq
457 %nota = xor i32 %a, -1
458 %r = add i32 %nota, 1
462 define <4 x i32> @inc_not_vec(<4 x i32> %a) nounwind {
463 ; X32-LABEL: inc_not_vec:
465 ; X32-NEXT: pushl %edi
466 ; X32-NEXT: pushl %esi
467 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
468 ; X32-NEXT: xorl %ecx, %ecx
469 ; X32-NEXT: xorl %edx, %edx
470 ; X32-NEXT: subl {{[0-9]+}}(%esp), %edx
471 ; X32-NEXT: xorl %esi, %esi
472 ; X32-NEXT: subl {{[0-9]+}}(%esp), %esi
473 ; X32-NEXT: xorl %edi, %edi
474 ; X32-NEXT: subl {{[0-9]+}}(%esp), %edi
475 ; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
476 ; X32-NEXT: movl %ecx, 12(%eax)
477 ; X32-NEXT: movl %edi, 8(%eax)
478 ; X32-NEXT: movl %esi, 4(%eax)
479 ; X32-NEXT: movl %edx, (%eax)
480 ; X32-NEXT: popl %esi
481 ; X32-NEXT: popl %edi
484 ; X64-LINUX-LABEL: inc_not_vec:
485 ; X64-LINUX: # %bb.0:
486 ; X64-LINUX-NEXT: pxor %xmm1, %xmm1
487 ; X64-LINUX-NEXT: psubd %xmm0, %xmm1
488 ; X64-LINUX-NEXT: movdqa %xmm1, %xmm0
489 ; X64-LINUX-NEXT: retq
491 ; X64-WIN32-LABEL: inc_not_vec:
492 ; X64-WIN32: # %bb.0:
493 ; X64-WIN32-NEXT: pxor %xmm0, %xmm0
494 ; X64-WIN32-NEXT: psubd (%rcx), %xmm0
495 ; X64-WIN32-NEXT: retq
496 %nota = xor <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
497 %r = add <4 x i32> %nota, <i32 1, i32 1, i32 1, i32 1>
501 define void @uaddo1_not(i32 %a, i32* %p0, i1* %p1) {
502 ; X32-LABEL: uaddo1_not:
504 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
505 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
506 ; X32-NEXT: xorl %edx, %edx
507 ; X32-NEXT: subl {{[0-9]+}}(%esp), %edx
508 ; X32-NEXT: movl %edx, (%ecx)
509 ; X32-NEXT: setae (%eax)
512 ; X64-LINUX-LABEL: uaddo1_not:
513 ; X64-LINUX: # %bb.0:
514 ; X64-LINUX-NEXT: negl %edi
515 ; X64-LINUX-NEXT: movl %edi, (%rsi)
516 ; X64-LINUX-NEXT: setae (%rdx)
517 ; X64-LINUX-NEXT: retq
519 ; X64-WIN32-LABEL: uaddo1_not:
520 ; X64-WIN32: # %bb.0:
521 ; X64-WIN32-NEXT: negl %ecx
522 ; X64-WIN32-NEXT: movl %ecx, (%rdx)
523 ; X64-WIN32-NEXT: setae (%r8)
524 ; X64-WIN32-NEXT: retq
525 %nota = xor i32 %a, -1
526 %uaddo = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %nota, i32 1)
527 %r0 = extractvalue {i32, i1} %uaddo, 0
528 %r1 = extractvalue {i32, i1} %uaddo, 1
529 store i32 %r0, i32* %p0
530 store i1 %r1, i1* %p1
534 define i32 @add_to_sub(i32 %a, i32 %b) {
535 ; X32-LABEL: add_to_sub:
537 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
538 ; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
541 ; X64-LINUX-LABEL: add_to_sub:
542 ; X64-LINUX: # %bb.0:
543 ; X64-LINUX-NEXT: movl %esi, %eax
544 ; X64-LINUX-NEXT: subl %edi, %eax
545 ; X64-LINUX-NEXT: retq
547 ; X64-WIN32-LABEL: add_to_sub:
548 ; X64-WIN32: # %bb.0:
549 ; X64-WIN32-NEXT: movl %edx, %eax
550 ; X64-WIN32-NEXT: subl %ecx, %eax
551 ; X64-WIN32-NEXT: retq
552 %nota = xor i32 %a, -1
553 %add = add i32 %nota, %b
558 declare void @bar_i32(i32)
559 declare void @bar_i64(i64)
561 ; Make sure we can use sub -128 for add 128 when the flags are used.
562 define void @add_i32_128_flag(i32 %x) {
563 ; X32-LABEL: add_i32_128_flag:
564 ; X32: # %bb.0: # %entry
565 ; X32-NEXT: movl $128, %eax
566 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
567 ; X32-NEXT: je .LBB19_2
568 ; X32-NEXT: # %bb.1: # %if.then
569 ; X32-NEXT: pushl %eax
570 ; X32-NEXT: .cfi_adjust_cfa_offset 4
571 ; X32-NEXT: calll bar_i32
572 ; X32-NEXT: addl $4, %esp
573 ; X32-NEXT: .cfi_adjust_cfa_offset -4
574 ; X32-NEXT: .LBB19_2: # %if.end
577 ; X64-LINUX-LABEL: add_i32_128_flag:
578 ; X64-LINUX: # %bb.0: # %entry
579 ; X64-LINUX-NEXT: subl $-128, %edi
580 ; X64-LINUX-NEXT: je .LBB19_1
581 ; X64-LINUX-NEXT: # %bb.2: # %if.then
582 ; X64-LINUX-NEXT: jmp bar_i32 # TAILCALL
583 ; X64-LINUX-NEXT: .LBB19_1: # %if.end
584 ; X64-LINUX-NEXT: retq
586 ; X64-WIN32-LABEL: add_i32_128_flag:
587 ; X64-WIN32: # %bb.0: # %entry
588 ; X64-WIN32-NEXT: subl $-128, %ecx
589 ; X64-WIN32-NEXT: je .LBB19_1
590 ; X64-WIN32-NEXT: # %bb.2: # %if.then
591 ; X64-WIN32-NEXT: jmp bar_i32 # TAILCALL
592 ; X64-WIN32-NEXT: .LBB19_1: # %if.end
593 ; X64-WIN32-NEXT: retq
595 %add = add i32 %x, 128
596 %tobool = icmp eq i32 %add, 0
597 br i1 %tobool, label %if.end, label %if.then
600 tail call void @bar_i32(i32 %add)
607 ; Make sure we can use sub -128 for add 128 when the flags are used.
608 define void @add_i64_128_flag(i64 %x) {
609 ; X32-LABEL: add_i64_128_flag:
610 ; X32: # %bb.0: # %entry
611 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
612 ; X32-NEXT: movl $128, %eax
613 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
614 ; X32-NEXT: adcl $0, %ecx
615 ; X32-NEXT: movl %eax, %edx
616 ; X32-NEXT: orl %ecx, %edx
617 ; X32-NEXT: je .LBB20_2
618 ; X32-NEXT: # %bb.1: # %if.then
619 ; X32-NEXT: pushl %ecx
620 ; X32-NEXT: .cfi_adjust_cfa_offset 4
621 ; X32-NEXT: pushl %eax
622 ; X32-NEXT: .cfi_adjust_cfa_offset 4
623 ; X32-NEXT: calll bar_i64
624 ; X32-NEXT: addl $8, %esp
625 ; X32-NEXT: .cfi_adjust_cfa_offset -8
626 ; X32-NEXT: .LBB20_2: # %if.end
629 ; X64-LINUX-LABEL: add_i64_128_flag:
630 ; X64-LINUX: # %bb.0: # %entry
631 ; X64-LINUX-NEXT: subq $-128, %rdi
632 ; X64-LINUX-NEXT: je .LBB20_1
633 ; X64-LINUX-NEXT: # %bb.2: # %if.then
634 ; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
635 ; X64-LINUX-NEXT: .LBB20_1: # %if.end
636 ; X64-LINUX-NEXT: retq
638 ; X64-WIN32-LABEL: add_i64_128_flag:
639 ; X64-WIN32: # %bb.0: # %entry
640 ; X64-WIN32-NEXT: subq $-128, %rcx
641 ; X64-WIN32-NEXT: je .LBB20_1
642 ; X64-WIN32-NEXT: # %bb.2: # %if.then
643 ; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
644 ; X64-WIN32-NEXT: .LBB20_1: # %if.end
645 ; X64-WIN32-NEXT: retq
647 %add = add i64 %x, 128
648 %tobool = icmp eq i64 %add, 0
649 br i1 %tobool, label %if.end, label %if.then
652 tail call void @bar_i64(i64 %add)
659 ; Make sure we can use sub -2147483648 for add 2147483648 when the flags are used.
660 define void @add_i64_2147483648_flag(i64 %x) {
661 ; X32-LABEL: add_i64_2147483648_flag:
662 ; X32: # %bb.0: # %entry
663 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
664 ; X32-NEXT: movl $-2147483648, %eax # imm = 0x80000000
665 ; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
666 ; X32-NEXT: adcl $0, %ecx
667 ; X32-NEXT: movl %eax, %edx
668 ; X32-NEXT: orl %ecx, %edx
669 ; X32-NEXT: je .LBB21_2
670 ; X32-NEXT: # %bb.1: # %if.then
671 ; X32-NEXT: pushl %ecx
672 ; X32-NEXT: .cfi_adjust_cfa_offset 4
673 ; X32-NEXT: pushl %eax
674 ; X32-NEXT: .cfi_adjust_cfa_offset 4
675 ; X32-NEXT: calll bar_i64
676 ; X32-NEXT: addl $8, %esp
677 ; X32-NEXT: .cfi_adjust_cfa_offset -8
678 ; X32-NEXT: .LBB21_2: # %if.end
681 ; X64-LINUX-LABEL: add_i64_2147483648_flag:
682 ; X64-LINUX: # %bb.0: # %entry
683 ; X64-LINUX-NEXT: subq $-2147483648, %rdi # imm = 0x80000000
684 ; X64-LINUX-NEXT: je .LBB21_1
685 ; X64-LINUX-NEXT: # %bb.2: # %if.then
686 ; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
687 ; X64-LINUX-NEXT: .LBB21_1: # %if.end
688 ; X64-LINUX-NEXT: retq
690 ; X64-WIN32-LABEL: add_i64_2147483648_flag:
691 ; X64-WIN32: # %bb.0: # %entry
692 ; X64-WIN32-NEXT: subq $-2147483648, %rcx # imm = 0x80000000
693 ; X64-WIN32-NEXT: je .LBB21_1
694 ; X64-WIN32-NEXT: # %bb.2: # %if.then
695 ; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
696 ; X64-WIN32-NEXT: .LBB21_1: # %if.end
697 ; X64-WIN32-NEXT: retq
699 %add = add i64 %x, 2147483648
700 %tobool = icmp eq i64 %add, 0
701 br i1 %tobool, label %if.end, label %if.then
704 tail call void @bar_i64(i64 %add)