1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs < %s -mtriple=i686-linux -mcpu=core2 -mattr=+sse2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-linux -mcpu=core2 -mattr=+sse2 | FileCheck %s --check-prefix=X64
4 ; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-linux-gnux32 -mcpu=core2 -mattr=+sse2 | FileCheck %s --check-prefix=X32
6 define void @t1(i32 %x) nounwind ssp {
9 ; X86-NEXT: jmp foo # TAILCALL
13 ; X64-NEXT: jmp foo # TAILCALL
17 ; X32-NEXT: jmp foo # TAILCALL
18 tail call void @foo() nounwind
24 define void @t2() nounwind ssp {
27 ; X86-NEXT: jmp foo2 # TAILCALL
31 ; X64-NEXT: jmp foo2 # TAILCALL
35 ; X32-NEXT: jmp foo2 # TAILCALL
36 %t0 = tail call i32 @foo2() nounwind
42 define void @t3() nounwind ssp {
45 ; X86-NEXT: jmp foo3 # TAILCALL
49 ; X64-NEXT: jmp foo3 # TAILCALL
53 ; X32-NEXT: jmp foo3 # TAILCALL
54 %t0 = tail call i32 @foo3() nounwind
60 define void @t4(void (i32)* nocapture %x) nounwind ssp {
63 ; X86-NEXT: subl $12, %esp
64 ; X86-NEXT: movl $0, (%esp)
65 ; X86-NEXT: calll *{{[0-9]+}}(%esp)
66 ; X86-NEXT: addl $12, %esp
71 ; X64-NEXT: movq %rdi, %rax
72 ; X64-NEXT: xorl %edi, %edi
73 ; X64-NEXT: jmpq *%rax # TAILCALL
77 ; X32-NEXT: movl %edi, %eax
78 ; X32-NEXT: xorl %edi, %edi
79 ; X32-NEXT: jmpq *%rax # TAILCALL
80 tail call void %x(i32 0) nounwind
84 ; FIXME: This isn't needed since x32 psABI specifies that callers must
85 ; zero-extend pointers passed in registers.
87 define void @t5(void ()* nocapture %x) nounwind ssp {
90 ; X86-NEXT: jmpl *{{[0-9]+}}(%esp) # TAILCALL
94 ; X64-NEXT: jmpq *%rdi # TAILCALL
98 ; X32-NEXT: movl %edi, %eax
99 ; X32-NEXT: jmpq *%rax # TAILCALL
100 tail call void %x() nounwind
104 ; Basically the same test as t5, except pass the function pointer on the stack
107 define void @t5_x64(i32, i32, i32, i32, i32, i32, void ()* nocapture %x) nounwind ssp {
110 ; X86-NEXT: jmpl *{{[0-9]+}}(%esp) # TAILCALL
114 ; X64-NEXT: jmpq *{{[0-9]+}}(%rsp) # TAILCALL
118 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
119 ; X32-NEXT: jmpq *%rax # TAILCALL
120 tail call void %x() nounwind
125 define i32 @t6(i32 %x) nounwind ssp {
128 ; X86-NEXT: subl $12, %esp
129 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
130 ; X86-NEXT: cmpl $9, %eax
131 ; X86-NEXT: jg .LBB6_2
132 ; X86-NEXT: # %bb.1: # %bb
133 ; X86-NEXT: decl %eax
134 ; X86-NEXT: movl %eax, (%esp)
136 ; X86-NEXT: addl $12, %esp
138 ; X86-NEXT: .LBB6_2: # %bb1
139 ; X86-NEXT: addl $12, %esp
140 ; X86-NEXT: jmp bar # TAILCALL
144 ; X64-NEXT: cmpl $9, %edi
145 ; X64-NEXT: jg .LBB6_2
146 ; X64-NEXT: # %bb.1: # %bb
147 ; X64-NEXT: decl %edi
148 ; X64-NEXT: jmp t6 # TAILCALL
149 ; X64-NEXT: .LBB6_2: # %bb1
150 ; X64-NEXT: jmp bar # TAILCALL
154 ; X32-NEXT: cmpl $9, %edi
155 ; X32-NEXT: jg .LBB6_2
156 ; X32-NEXT: # %bb.1: # %bb
157 ; X32-NEXT: decl %edi
158 ; X32-NEXT: jmp t6 # TAILCALL
159 ; X32-NEXT: .LBB6_2: # %bb1
160 ; X32-NEXT: jmp bar # TAILCALL
161 %t0 = icmp slt i32 %x, 10
162 br i1 %t0, label %bb, label %bb1
165 %t1 = add nsw i32 %x, -1
166 %t2 = tail call i32 @t6(i32 %t1) nounwind ssp
170 %t3 = tail call i32 @bar(i32 %x) nounwind
174 declare i32 @bar(i32)
176 define i32 @t7(i32 %a, i32 %b, i32 %c) nounwind ssp {
179 ; X86-NEXT: jmp bar2 # TAILCALL
183 ; X64-NEXT: jmp bar2 # TAILCALL
187 ; X32-NEXT: jmp bar2 # TAILCALL
188 %t0 = tail call i32 @bar2(i32 %a, i32 %b, i32 %c) nounwind
192 declare i32 @bar2(i32, i32, i32)
194 define signext i16 @t8() nounwind ssp {
196 ; X86: # %bb.0: # %entry
197 ; X86-NEXT: jmp bar3 # TAILCALL
200 ; X64: # %bb.0: # %entry
201 ; X64-NEXT: jmp bar3 # TAILCALL
204 ; X32: # %bb.0: # %entry
205 ; X32-NEXT: jmp bar3 # TAILCALL
207 %0 = tail call signext i16 @bar3() nounwind ; <i16> [#uses=1]
211 declare signext i16 @bar3()
213 define signext i16 @t9(i32 (i32)* nocapture %x) nounwind ssp {
215 ; X86: # %bb.0: # %entry
216 ; X86-NEXT: subl $12, %esp
217 ; X86-NEXT: movl $0, (%esp)
218 ; X86-NEXT: calll *{{[0-9]+}}(%esp)
219 ; X86-NEXT: addl $12, %esp
223 ; X64: # %bb.0: # %entry
224 ; X64-NEXT: movq %rdi, %rax
225 ; X64-NEXT: xorl %edi, %edi
226 ; X64-NEXT: jmpq *%rax # TAILCALL
229 ; X32: # %bb.0: # %entry
230 ; X32-NEXT: movl %edi, %eax
231 ; X32-NEXT: xorl %edi, %edi
232 ; X32-NEXT: jmpq *%rax # TAILCALL
234 %0 = bitcast i32 (i32)* %x to i16 (i32)*
235 %1 = tail call signext i16 %0(i32 0) nounwind
239 define void @t10() nounwind ssp {
241 ; X86: # %bb.0: # %entry
242 ; X86-NEXT: subl $12, %esp
243 ; X86-NEXT: calll foo4
246 ; X64: # %bb.0: # %entry
247 ; X64-NEXT: pushq %rax
248 ; X64-NEXT: callq foo4
251 ; X32: # %bb.0: # %entry
252 ; X32-NEXT: pushq %rax
253 ; X32-NEXT: callq foo4
255 %0 = tail call i32 @foo4() noreturn nounwind
261 ; In 32-bit mode, it's emitting a bunch of dead loads that are not being
262 ; eliminated currently.
264 define i32 @t11(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind ssp {
266 ; X86: # %bb.0: # %entry
267 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
268 ; X86-NEXT: testl %eax, %eax
269 ; X86-NEXT: je .LBB11_1
270 ; X86-NEXT: # %bb.2: # %bb
271 ; X86-NEXT: jmp foo5 # TAILCALL
272 ; X86-NEXT: .LBB11_1: # %bb6
273 ; X86-NEXT: xorl %eax, %eax
277 ; X64: # %bb.0: # %entry
278 ; X64-NEXT: testl %edi, %edi
279 ; X64-NEXT: je .LBB11_1
280 ; X64-NEXT: # %bb.2: # %bb
281 ; X64-NEXT: jmp foo5 # TAILCALL
282 ; X64-NEXT: .LBB11_1: # %bb6
283 ; X64-NEXT: xorl %eax, %eax
287 ; X32: # %bb.0: # %entry
288 ; X32-NEXT: testl %edi, %edi
289 ; X32-NEXT: je .LBB11_1
290 ; X32-NEXT: # %bb.2: # %bb
291 ; X32-NEXT: jmp foo5 # TAILCALL
292 ; X32-NEXT: .LBB11_1: # %bb6
293 ; X32-NEXT: xorl %eax, %eax
296 %0 = icmp eq i32 %x, 0
297 br i1 %0, label %bb6, label %bb
300 %1 = tail call i32 @foo5(i32 %x, i32 %y, i32 %z.0, i32 %z.1, i32 %z.2) nounwind
307 declare i32 @foo5(i32, i32, i32, i32, i32)
309 %struct.t = type { i32, i32, i32, i32, i32 }
311 define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp {
313 ; X86: # %bb.0: # %entry
314 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
315 ; X86-NEXT: testl %eax, %eax
316 ; X86-NEXT: je .LBB12_1
317 ; X86-NEXT: # %bb.2: # %bb
318 ; X86-NEXT: jmp foo6 # TAILCALL
319 ; X86-NEXT: .LBB12_1: # %bb2
320 ; X86-NEXT: xorl %eax, %eax
324 ; X64: # %bb.0: # %entry
325 ; X64-NEXT: testl %edi, %edi
326 ; X64-NEXT: je .LBB12_1
327 ; X64-NEXT: # %bb.2: # %bb
328 ; X64-NEXT: jmp foo6 # TAILCALL
329 ; X64-NEXT: .LBB12_1: # %bb2
330 ; X64-NEXT: xorl %eax, %eax
334 ; X32: # %bb.0: # %entry
335 ; X32-NEXT: testl %edi, %edi
336 ; X32-NEXT: je .LBB12_1
337 ; X32-NEXT: # %bb.2: # %bb
338 ; X32-NEXT: jmp foo6 # TAILCALL
339 ; X32-NEXT: .LBB12_1: # %bb2
340 ; X32-NEXT: xorl %eax, %eax
343 %0 = icmp eq i32 %x, 0
344 br i1 %0, label %bb2, label %bb
347 %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind
354 declare i32 @foo6(i32, i32, %struct.t* byval align 4)
357 %struct.ns = type { i32, i32 }
358 %struct.cp = type { float, float, float, float, float }
360 define %struct.ns* @t13(%struct.cp* %yy) nounwind ssp {
362 ; X86: # %bb.0: # %entry
363 ; X86-NEXT: subl $28, %esp
364 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
365 ; X86-NEXT: movl 16(%eax), %ecx
366 ; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
367 ; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
368 ; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
369 ; X86-NEXT: movsd %xmm1, {{[0-9]+}}(%esp)
370 ; X86-NEXT: movsd %xmm0, (%esp)
371 ; X86-NEXT: xorl %ecx, %ecx
372 ; X86-NEXT: calll foo7
373 ; X86-NEXT: addl $28, %esp
377 ; X64: # %bb.0: # %entry
378 ; X64-NEXT: pushq %rax
379 ; X64-NEXT: subq $8, %rsp
380 ; X64-NEXT: movl 16(%rdi), %eax
381 ; X64-NEXT: movq (%rdi), %rcx
382 ; X64-NEXT: movq 8(%rdi), %rdx
383 ; X64-NEXT: xorl %edi, %edi
384 ; X64-NEXT: pushq %rax
385 ; X64-NEXT: pushq %rdx
386 ; X64-NEXT: pushq %rcx
387 ; X64-NEXT: callq foo7
388 ; X64-NEXT: addq $32, %rsp
389 ; X64-NEXT: popq %rcx
393 ; X32: # %bb.0: # %entry
394 ; X32-NEXT: pushq %rax
395 ; X32-NEXT: subl $8, %esp
396 ; X32-NEXT: movl 16(%edi), %eax
397 ; X32-NEXT: movq (%edi), %rcx
398 ; X32-NEXT: movq 8(%edi), %rdx
399 ; X32-NEXT: xorl %edi, %edi
400 ; X32-NEXT: pushq %rax
401 ; X32-NEXT: pushq %rdx
402 ; X32-NEXT: pushq %rcx
403 ; X32-NEXT: callq foo7
404 ; X32-NEXT: addl $32, %esp
405 ; X32-NEXT: popq %rcx
408 %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind
413 ; llvm can't do sibcall for this in 32-bit mode (yet).
414 declare fastcc %struct.ns* @foo7(%struct.cp* byval align 4, i8 signext) nounwind ssp
416 %struct.__block_descriptor = type { i64, i64 }
417 %struct.__block_descriptor_withcopydispose = type { i64, i64, i8*, i8* }
418 %struct.__block_literal_1 = type { i8*, i32, i32, i8*, %struct.__block_descriptor* }
419 %struct.__block_literal_2 = type { i8*, i32, i32, i8*, %struct.__block_descriptor_withcopydispose*, void ()* }
421 define void @t14(%struct.__block_literal_2* nocapture %.block_descriptor) nounwind ssp {
423 ; X86: # %bb.0: # %entry
424 ; X86-NEXT: subl $12, %esp
425 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
426 ; X86-NEXT: movl 20(%eax), %eax
427 ; X86-NEXT: movl %eax, (%esp)
428 ; X86-NEXT: calll *12(%eax)
429 ; X86-NEXT: addl $12, %esp
433 ; X64: # %bb.0: # %entry
434 ; X64-NEXT: movq 32(%rdi), %rdi
435 ; X64-NEXT: jmpq *16(%rdi) # TAILCALL
438 ; X32: # %bb.0: # %entry
439 ; X32-NEXT: movl 20(%edi), %edi
440 ; X32-NEXT: movl 12(%edi), %eax
441 ; X32-NEXT: jmpq *%rax # TAILCALL
443 %0 = getelementptr inbounds %struct.__block_literal_2, %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1]
444 %1 = load void ()*, void ()** %0, align 8 ; <void ()*> [#uses=2]
445 %2 = bitcast void ()* %1 to %struct.__block_literal_1* ; <%struct.__block_literal_1*> [#uses=1]
446 %3 = getelementptr inbounds %struct.__block_literal_1, %struct.__block_literal_1* %2, i64 0, i32 3 ; <i8**> [#uses=1]
447 %4 = load i8*, i8** %3, align 8 ; <i8*> [#uses=1]
448 %5 = bitcast i8* %4 to void (i8*)* ; <void (i8*)*> [#uses=1]
449 %6 = bitcast void ()* %1 to i8* ; <i8*> [#uses=1]
450 tail call void %5(i8* %6) nounwind
455 %struct.foo = type { [4 x i32] }
457 define void @t15(%struct.foo* noalias sret %agg.result) nounwind {
460 ; X86-NEXT: pushl %esi
461 ; X86-NEXT: subl $8, %esp
462 ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
463 ; X86-NEXT: movl %esi, %ecx
465 ; X86-NEXT: movl %esi, %eax
466 ; X86-NEXT: addl $8, %esp
467 ; X86-NEXT: popl %esi
472 ; X64-NEXT: pushq %rbx
473 ; X64-NEXT: movq %rdi, %rbx
475 ; X64-NEXT: movq %rbx, %rax
476 ; X64-NEXT: popq %rbx
481 ; X32-NEXT: pushq %rbx
482 ; X32-NEXT: movl %edi, %ebx
484 ; X32-NEXT: movl %ebx, %eax
485 ; X32-NEXT: popq %rbx
487 tail call fastcc void @f(%struct.foo* noalias sret %agg.result) nounwind
491 declare void @f(%struct.foo* noalias sret) nounwind
493 define void @t16() nounwind ssp {
495 ; X86: # %bb.0: # %entry
496 ; X86-NEXT: subl $12, %esp
497 ; X86-NEXT: calll bar4
498 ; X86-NEXT: fstp %st(0)
499 ; X86-NEXT: addl $12, %esp
503 ; X64: # %bb.0: # %entry
504 ; X64-NEXT: jmp bar4 # TAILCALL
507 ; X32: # %bb.0: # %entry
508 ; X32-NEXT: jmp bar4 # TAILCALL
510 %0 = tail call double @bar4() nounwind
514 declare double @bar4()
517 define void @t17() nounwind ssp {
519 ; X86: # %bb.0: # %entry
520 ; X86-NEXT: jmp bar5 # TAILCALL
523 ; X64: # %bb.0: # %entry
524 ; X64-NEXT: xorl %eax, %eax
525 ; X64-NEXT: jmp bar5 # TAILCALL
528 ; X32: # %bb.0: # %entry
529 ; X32-NEXT: xorl %eax, %eax
530 ; X32-NEXT: jmp bar5 # TAILCALL
532 tail call void (...) @bar5() nounwind
536 declare void @bar5(...)
539 define void @t18() nounwind ssp {
541 ; X86: # %bb.0: # %entry
542 ; X86-NEXT: subl $12, %esp
543 ; X86-NEXT: calll bar6
544 ; X86-NEXT: fstp %st(0)
545 ; X86-NEXT: addl $12, %esp
549 ; X64: # %bb.0: # %entry
550 ; X64-NEXT: xorl %eax, %eax
551 ; X64-NEXT: jmp bar6 # TAILCALL
554 ; X32: # %bb.0: # %entry
555 ; X32-NEXT: xorl %eax, %eax
556 ; X32-NEXT: jmp bar6 # TAILCALL
558 %0 = tail call double (...) @bar6() nounwind
562 declare double @bar6(...)
564 define void @t19() alignstack(32) nounwind {
566 ; X86: # %bb.0: # %entry
567 ; X86-NEXT: pushl %ebp
568 ; X86-NEXT: movl %esp, %ebp
569 ; X86-NEXT: andl $-32, %esp
570 ; X86-NEXT: subl $32, %esp
571 ; X86-NEXT: calll foo
572 ; X86-NEXT: movl %ebp, %esp
573 ; X86-NEXT: popl %ebp
577 ; X64: # %bb.0: # %entry
578 ; X64-NEXT: pushq %rbp
579 ; X64-NEXT: movq %rsp, %rbp
580 ; X64-NEXT: andq $-32, %rsp
581 ; X64-NEXT: subq $32, %rsp
582 ; X64-NEXT: callq foo
583 ; X64-NEXT: movq %rbp, %rsp
584 ; X64-NEXT: popq %rbp
588 ; X32: # %bb.0: # %entry
589 ; X32-NEXT: pushq %rbp
590 ; X32-NEXT: movl %esp, %ebp
591 ; X32-NEXT: andl $-32, %esp
592 ; X32-NEXT: subl $32, %esp
593 ; X32-NEXT: callq foo
594 ; X32-NEXT: movl %ebp, %esp
595 ; X32-NEXT: popq %rbp
598 tail call void @foo() nounwind
602 ; If caller / callee calling convention mismatch then check if the return
603 ; values are returned in the same registers.
606 define double @t20(double %x) nounwind {
608 ; X86: # %bb.0: # %entry
609 ; X86-NEXT: subl $12, %esp
610 ; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
611 ; X86-NEXT: calll foo20
612 ; X86-NEXT: movsd %xmm0, (%esp)
613 ; X86-NEXT: fldl (%esp)
614 ; X86-NEXT: addl $12, %esp
618 ; X64: # %bb.0: # %entry
619 ; X64-NEXT: jmp foo20 # TAILCALL
622 ; X32: # %bb.0: # %entry
623 ; X32-NEXT: jmp foo20 # TAILCALL
625 %0 = tail call fastcc double @foo20(double %x) nounwind
629 declare fastcc double @foo20(double) nounwind
632 define fastcc void @t21_sret_to_sret(%struct.foo* noalias sret %agg.result) nounwind {
633 ; X86-LABEL: t21_sret_to_sret:
635 ; X86-NEXT: pushl %esi
636 ; X86-NEXT: subl $8, %esp
637 ; X86-NEXT: movl %ecx, %esi
638 ; X86-NEXT: calll t21_f_sret
639 ; X86-NEXT: movl %esi, %eax
640 ; X86-NEXT: addl $8, %esp
641 ; X86-NEXT: popl %esi
644 ; X64-LABEL: t21_sret_to_sret:
646 ; X64-NEXT: pushq %rbx
647 ; X64-NEXT: movq %rdi, %rbx
648 ; X64-NEXT: callq t21_f_sret
649 ; X64-NEXT: movq %rbx, %rax
650 ; X64-NEXT: popq %rbx
653 ; X32-LABEL: t21_sret_to_sret:
655 ; X32-NEXT: pushq %rbx
656 ; X32-NEXT: movl %edi, %ebx
657 ; X32-NEXT: callq t21_f_sret
658 ; X32-NEXT: movl %ebx, %eax
659 ; X32-NEXT: popq %rbx
661 tail call fastcc void @t21_f_sret(%struct.foo* noalias sret %agg.result) nounwind
665 define fastcc void @t21_sret_to_non_sret(%struct.foo* noalias sret %agg.result) nounwind {
666 ; X86-LABEL: t21_sret_to_non_sret:
668 ; X86-NEXT: pushl %esi
669 ; X86-NEXT: subl $8, %esp
670 ; X86-NEXT: movl %ecx, %esi
671 ; X86-NEXT: calll t21_f_non_sret
672 ; X86-NEXT: movl %esi, %eax
673 ; X86-NEXT: addl $8, %esp
674 ; X86-NEXT: popl %esi
677 ; X64-LABEL: t21_sret_to_non_sret:
679 ; X64-NEXT: pushq %rbx
680 ; X64-NEXT: movq %rdi, %rbx
681 ; X64-NEXT: callq t21_f_non_sret
682 ; X64-NEXT: movq %rbx, %rax
683 ; X64-NEXT: popq %rbx
686 ; X32-LABEL: t21_sret_to_non_sret:
688 ; X32-NEXT: pushq %rbx
689 ; X32-NEXT: movl %edi, %ebx
690 ; X32-NEXT: callq t21_f_non_sret
691 ; X32-NEXT: movl %ebx, %eax
692 ; X32-NEXT: popq %rbx
694 tail call fastcc void @t21_f_non_sret(%struct.foo* %agg.result) nounwind
698 define fastcc void @t21_non_sret_to_sret(%struct.foo* %agg.result) nounwind {
699 ; X86-LABEL: t21_non_sret_to_sret:
701 ; X86-NEXT: subl $12, %esp
702 ; X86-NEXT: calll t21_f_sret
703 ; X86-NEXT: addl $12, %esp
706 ; X64-LABEL: t21_non_sret_to_sret:
708 ; X64-NEXT: pushq %rax
709 ; X64-NEXT: callq t21_f_sret
710 ; X64-NEXT: popq %rax
713 ; X32-LABEL: t21_non_sret_to_sret:
715 ; X32-NEXT: pushq %rax
716 ; X32-NEXT: callq t21_f_sret
717 ; X32-NEXT: popq %rax
719 tail call fastcc void @t21_f_sret(%struct.foo* noalias sret %agg.result) nounwind
723 declare fastcc void @t21_f_sret(%struct.foo* noalias sret) nounwind
724 declare fastcc void @t21_f_non_sret(%struct.foo*) nounwind
726 define ccc void @t22_non_sret_to_sret(%struct.foo* %agg.result) nounwind {
727 ; X86-LABEL: t22_non_sret_to_sret:
729 ; X86-NEXT: subl $12, %esp
730 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
731 ; X86-NEXT: movl %eax, (%esp)
732 ; X86-NEXT: calll t22_f_sret
733 ; X86-NEXT: addl $8, %esp
736 ; X64-LABEL: t22_non_sret_to_sret:
738 ; X64-NEXT: pushq %rax
739 ; X64-NEXT: callq t22_f_sret
740 ; X64-NEXT: popq %rax
743 ; X32-LABEL: t22_non_sret_to_sret:
745 ; X32-NEXT: pushq %rax
746 ; X32-NEXT: callq t22_f_sret
747 ; X32-NEXT: popq %rax
749 tail call ccc void @t22_f_sret(%struct.foo* noalias sret %agg.result) nounwind
753 declare ccc void @t22_f_sret(%struct.foo* noalias sret) nounwind