1 ; RUN: sed -e "s/RETTYPE/void/;s/RETVAL//" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefixes=ALL,SSE,VOID %s
2 ; RUN: sed -e "s/RETTYPE/i32/;s/RETVAL/undef/" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefixes=ALL,SSE,INT %s
3 ; RUN: sed -e "s/RETTYPE/\{i64\,i64\}/;s/RETVAL/undef/" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck --check-prefixes=ALL,SSE,INT128 %s
5 ; RUN: sed -e "s/RETTYPE/void/;s/RETVAL//" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefixes=ALL,AVX,VOID %s
6 ; RUN: sed -e "s/RETTYPE/i32/;s/RETVAL/undef/" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefixes=ALL,AVX,INT %s
7 ; RUN: sed -e "s/RETTYPE/\{i64\,i64\}/;s/RETVAL/undef/" %s | llc -mtriple=x86_64-apple-darwin -mcpu=corei7-avx | FileCheck --check-prefixes=ALL,AVX,INT128 %s
9 define preserve_allcc RETTYPE @preserve_allcc1(i64, i64, double, double) nounwind {
11 ;ALL-LABEL: preserve_allcc1
17 ;VOID-NEXT: pushq %rdx
19 ;INT128-NOT: pushq %rdx
21 ;VOID-NEXT: pushq %rax
23 ;INT128-NOT: pushq %rax
31 ;SSE-NEXT: movaps %xmm14
32 ;SSE-NEXT: movaps %xmm13
33 ;SSE-NEXT: movaps %xmm12
34 ;SSE-NEXT: movaps %xmm11
35 ;SSE-NEXT: movaps %xmm10
36 ;SSE-NEXT: movaps %xmm9
37 ;SSE-NEXT: movaps %xmm8
38 ;SSE-NEXT: movaps %xmm7
39 ;SSE-NEXT: movaps %xmm6
40 ;SSE-NEXT: movaps %xmm5
41 ;SSE-NEXT: movaps %xmm4
42 ;SSE-NEXT: movaps %xmm3
43 ;SSE-NEXT: movaps %xmm2
44 ;SSE-NEXT: movaps %xmm1
45 ;SSE-NEXT: movaps %xmm0
47 ;AVX-NEXT: vmovups %ymm14
48 ;AVX-NEXT: vmovups %ymm13
49 ;AVX-NEXT: vmovups %ymm12
50 ;AVX-NEXT: vmovups %ymm11
51 ;AVX-NEXT: vmovups %ymm10
52 ;AVX-NEXT: vmovups %ymm9
53 ;AVX-NEXT: vmovups %ymm8
54 ;AVX-NEXT: vmovups %ymm7
55 ;AVX-NEXT: vmovups %ymm6
56 ;AVX-NEXT: vmovups %ymm5
57 ;AVX-NEXT: vmovups %ymm4
58 ;AVX-NEXT: vmovups %ymm3
59 ;AVX-NEXT: vmovups %ymm2
60 ;AVX-NEXT: vmovups %ymm1
61 ;AVX-NEXT: vmovups %ymm0
62 ;SSE: movaps {{.*}} %xmm0
63 ;SSE-NEXT: movaps {{.*}} %xmm1
64 ;SSE-NEXT: movaps {{.*}} %xmm2
65 ;SSE-NEXT: movaps {{.*}} %xmm3
66 ;SSE-NEXT: movaps {{.*}} %xmm4
67 ;SSE-NEXT: movaps {{.*}} %xmm5
68 ;SSE-NEXT: movaps {{.*}} %xmm6
69 ;SSE-NEXT: movaps {{.*}} %xmm7
70 ;SSE-NEXT: movaps {{.*}} %xmm8
71 ;SSE-NEXT: movaps {{.*}} %xmm9
72 ;SSE-NEXT: movaps {{.*}} %xmm10
73 ;SSE-NEXT: movaps {{.*}} %xmm11
74 ;SSE-NEXT: movaps {{.*}} %xmm12
75 ;SSE-NEXT: movaps {{.*}} %xmm13
76 ;SSE-NEXT: movaps {{.*}} %xmm14
77 ;SSE-NEXT: movaps {{.*}} %xmm15
78 ;AVX: vmovups {{.*}} %ymm0
79 ;AVX-NEXT: vmovups {{.*}} %ymm1
80 ;AVX-NEXT: vmovups {{.*}} %ymm2
81 ;AVX-NEXT: vmovups {{.*}} %ymm3
82 ;AVX-NEXT: vmovups {{.*}} %ymm4
83 ;AVX-NEXT: vmovups {{.*}} %ymm5
84 ;AVX-NEXT: vmovups {{.*}} %ymm6
85 ;AVX-NEXT: vmovups {{.*}} %ymm7
86 ;AVX-NEXT: vmovups {{.*}} %ymm8
87 ;AVX-NEXT: vmovups {{.*}} %ymm9
88 ;AVX-NEXT: vmovups {{.*}} %ymm10
89 ;AVX-NEXT: vmovups {{.*}} %ymm11
90 ;AVX-NEXT: vmovups {{.*}} %ymm12
91 ;AVX-NEXT: vmovups {{.*}} %ymm13
92 ;AVX-NEXT: vmovups {{.*}} %ymm14
93 ;AVX-NEXT: vmovups {{.*}} %ymm15
100 ;VOID-NEXT: popq %rax
102 ;INT128-NOT: popq %rax
104 ;VOID-NEXT: popq %rdx
106 ;INT128-NOT: popq %rdx
112 call void asm sideeffect "", "~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{rbp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15}"()
116 ; Make sure R11 and return registers are saved before the call
117 declare preserve_allcc RETTYPE @bar(i64, i64, double, double)
118 define void @preserve_allcc2() nounwind {
120 ;ALL-LABEL: preserve_allcc2
121 ;VOID-NOT: movq %rax, [[REG1:%[a-z0-9]+]]
122 ;INT: movq %rax, [[REG1:%[a-z0-9]+]]
123 ;INT128: movq %rax, [[REG1:%[a-z0-9]+]]
124 ;VOID-NOT: movq %rdx, [[REG2:%[a-z0-9]+]]
125 ;INT-NOT: movq %rdx, [[REG2:%[a-z0-9]+]]
126 ;INT128: movq %rdx, [[REG2:%[a-z0-9]+]]
127 ;ALL: movq %r11, [[REG3:%[a-z0-9]+]]
128 ;ALL-NOT: movaps %xmm
129 ;VOID-NOT: movq {{.*}}, %rax
130 ;INT: movq [[REG1]], %rax
131 ;INT128: movq [[REG1]], %rax
132 ;VOID-NOT: movq {{.*}}, %rdx
133 ;INT-NOT: movq {{.*}}, %rdx
134 ;INT128: movq [[REG2]], %rdx
135 ;ALL: movq [[REG3]], %r11
136 %a0 = call i64 asm sideeffect "", "={rax}"() nounwind
137 %a1 = call i64 asm sideeffect "", "={rcx}"() nounwind
138 %a2 = call i64 asm sideeffect "", "={rdx}"() nounwind
139 %a3 = call i64 asm sideeffect "", "={r8}"() nounwind
140 %a4 = call i64 asm sideeffect "", "={r9}"() nounwind
141 %a5 = call i64 asm sideeffect "", "={r10}"() nounwind
142 %a6 = call i64 asm sideeffect "", "={r11}"() nounwind
143 %a10 = call <2 x double> asm sideeffect "", "={xmm2}"() nounwind
144 %a11 = call <2 x double> asm sideeffect "", "={xmm3}"() nounwind
145 %a12 = call <2 x double> asm sideeffect "", "={xmm4}"() nounwind
146 %a13 = call <2 x double> asm sideeffect "", "={xmm5}"() nounwind
147 %a14 = call <2 x double> asm sideeffect "", "={xmm6}"() nounwind
148 %a15 = call <2 x double> asm sideeffect "", "={xmm7}"() nounwind
149 %a16 = call <2 x double> asm sideeffect "", "={xmm8}"() nounwind
150 %a17 = call <2 x double> asm sideeffect "", "={xmm9}"() nounwind
151 %a18 = call <2 x double> asm sideeffect "", "={xmm10}"() nounwind
152 %a19 = call <2 x double> asm sideeffect "", "={xmm11}"() nounwind
153 %a20 = call <2 x double> asm sideeffect "", "={xmm12}"() nounwind
154 %a21 = call <2 x double> asm sideeffect "", "={xmm13}"() nounwind
155 %a22 = call <2 x double> asm sideeffect "", "={xmm14}"() nounwind
156 %a23 = call <2 x double> asm sideeffect "", "={xmm15}"() nounwind
157 call preserve_allcc RETTYPE @bar(i64 1, i64 2, double 3.0, double 4.0)
158 call void asm sideeffect "", "{rax},{rcx},{rdx},{r8},{r9},{r10},{r11},{xmm2},{xmm3},{xmm4},{xmm5},{xmm6},{xmm7},{xmm8},{xmm9},{xmm10},{xmm11},{xmm12},{xmm13},{xmm14},{xmm15}"(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64 %a6, <2 x double> %a10, <2 x double> %a11, <2 x double> %a12, <2 x double> %a13, <2 x double> %a14, <2 x double> %a15, <2 x double> %a16, <2 x double> %a17, <2 x double> %a18, <2 x double> %a19, <2 x double> %a20, <2 x double> %a21, <2 x double> %a22, <2 x double> %a23)