1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -O0 -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
3 ; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
5 @a1_8bit = external global i8
6 @a7_8bit = external global i8
7 @a8_8bit = external global i8
9 define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4,
10 ; X32-LABEL: name: test_i8_args_8
12 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
13 ; X32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.7)
14 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
15 ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.6)
16 ; X32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
17 ; X32: [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 1 from %fixed-stack.5)
18 ; X32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
19 ; X32: [[LOAD3:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 1 from %fixed-stack.4)
20 ; X32: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
21 ; X32: [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 1 from %fixed-stack.3)
22 ; X32: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
23 ; X32: [[LOAD5:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 1 from %fixed-stack.2)
24 ; X32: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
25 ; X32: [[LOAD6:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 1 from %fixed-stack.1)
26 ; X32: [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
27 ; X32: [[LOAD7:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 1 from %fixed-stack.0)
28 ; X32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
29 ; X32: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
30 ; X32: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
31 ; X32: G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
32 ; X32: G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
33 ; X32: G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
34 ; X32: $al = COPY [[LOAD]](s8)
35 ; X32: RET 0, implicit $al
36 ; X64-LABEL: name: test_i8_args_8
38 ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
39 ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
40 ; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
41 ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
42 ; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
43 ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
44 ; X64: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
45 ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
46 ; X64: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
47 ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
48 ; X64: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[COPY4]](s32)
49 ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
50 ; X64: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
51 ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
52 ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1)
53 ; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
54 ; X64: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0)
55 ; X64: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
56 ; X64: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
57 ; X64: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
58 ; X64: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
59 ; X64: G_STORE [[LOAD]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
60 ; X64: G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
61 ; X64: $al = COPY [[TRUNC]](s8)
62 ; X64: RET 0, implicit $al
63 i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
65 store i8 %arg1, i8* @a1_8bit
66 store i8 %arg7, i8* @a7_8bit
67 store i8 %arg8, i8* @a8_8bit
71 @a1_32bit = external global i32
72 @a7_32bit = external global i32
73 @a8_32bit = external global i32
75 define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4,
76 ; X32-LABEL: name: test_i32_args_8
78 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
79 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.7, align 1)
80 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
81 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.6, align 1)
82 ; X32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
83 ; X32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.5, align 1)
84 ; X32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
85 ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.4, align 1)
86 ; X32: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
87 ; X32: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.3, align 1)
88 ; X32: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
89 ; X32: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.2, align 1)
90 ; X32: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
91 ; X32: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
92 ; X32: [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
93 ; X32: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
94 ; X32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_32bit
95 ; X32: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_32bit
96 ; X32: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_32bit
97 ; X32: G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
98 ; X32: G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
99 ; X32: G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
100 ; X32: $eax = COPY [[LOAD]](s32)
101 ; X32: RET 0, implicit $eax
102 ; X64-LABEL: name: test_i32_args_8
104 ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
105 ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
106 ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
107 ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
108 ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
109 ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
110 ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
111 ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
112 ; X64: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
113 ; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
114 ; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
115 ; X64: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_32bit
116 ; X64: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_32bit
117 ; X64: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_32bit
118 ; X64: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
119 ; X64: G_STORE [[LOAD]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
120 ; X64: G_STORE [[LOAD1]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
121 ; X64: $eax = COPY [[COPY]](s32)
122 ; X64: RET 0, implicit $eax
123 i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
125 store i32 %arg1, i32* @a1_32bit
126 store i32 %arg7, i32* @a7_32bit
127 store i32 %arg8, i32* @a8_32bit
131 @a1_64bit = external global i64
132 @a7_64bit = external global i64
133 @a8_64bit = external global i64
135 define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
136 ; X32-LABEL: name: test_i64_args_8
138 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.15
139 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.15, align 1)
140 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.14
141 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.14, align 1)
142 ; X32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.13
143 ; X32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.13, align 1)
144 ; X32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.12
145 ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.12, align 1)
146 ; X32: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.11
147 ; X32: [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.11, align 1)
148 ; X32: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.10
149 ; X32: [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.10, align 1)
150 ; X32: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.9
151 ; X32: [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.9, align 1)
152 ; X32: [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.8
153 ; X32: [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.8, align 1)
154 ; X32: [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
155 ; X32: [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p0) :: (invariant load 4 from %fixed-stack.7, align 1)
156 ; X32: [[FRAME_INDEX9:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
157 ; X32: [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p0) :: (invariant load 4 from %fixed-stack.6, align 1)
158 ; X32: [[FRAME_INDEX10:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
159 ; X32: [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p0) :: (invariant load 4 from %fixed-stack.5, align 1)
160 ; X32: [[FRAME_INDEX11:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
161 ; X32: [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p0) :: (invariant load 4 from %fixed-stack.4, align 1)
162 ; X32: [[FRAME_INDEX12:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
163 ; X32: [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p0) :: (invariant load 4 from %fixed-stack.3, align 1)
164 ; X32: [[FRAME_INDEX13:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
165 ; X32: [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p0) :: (invariant load 4 from %fixed-stack.2, align 1)
166 ; X32: [[FRAME_INDEX14:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
167 ; X32: [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
168 ; X32: [[FRAME_INDEX15:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
169 ; X32: [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
170 ; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
171 ; X32: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
172 ; X32: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
173 ; X32: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
174 ; X32: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD8]](s32), [[LOAD9]](s32)
175 ; X32: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD10]](s32), [[LOAD11]](s32)
176 ; X32: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD12]](s32), [[LOAD13]](s32)
177 ; X32: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD14]](s32), [[LOAD15]](s32)
178 ; X32: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_64bit
179 ; X32: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_64bit
180 ; X32: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_64bit
181 ; X32: G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @a1_64bit, align 4)
182 ; X32: G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4)
183 ; X32: G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4)
184 ; X32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
185 ; X32: $eax = COPY [[UV]](s32)
186 ; X32: $edx = COPY [[UV1]](s32)
187 ; X32: RET 0, implicit $eax, implicit $edx
188 ; X64-LABEL: name: test_i64_args_8
190 ; X64: liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
191 ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
192 ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
193 ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
194 ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY $rcx
195 ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY $r8
196 ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY $r9
197 ; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
198 ; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 1)
199 ; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
200 ; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
201 ; X64: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_64bit
202 ; X64: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_64bit
203 ; X64: [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_64bit
204 ; X64: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @a1_64bit)
205 ; X64: G_STORE [[LOAD]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit)
206 ; X64: G_STORE [[LOAD1]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit)
207 ; X64: $rax = COPY [[COPY]](s64)
208 ; X64: RET 0, implicit $rax
209 i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
210 ; ... a bunch more that we don't track ...
212 store i64 %arg1, i64* @a1_64bit
213 store i64 %arg7, i64* @a7_64bit
214 store i64 %arg8, i64* @a8_64bit
218 define float @test_float_args(float %arg1, float %arg2) {
219 ; X32-LABEL: name: test_float_args
220 ; X32: bb.1 (%ir-block.0):
221 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
222 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
223 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
224 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
225 ; X32: [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s32)
226 ; X32: $fp0 = COPY [[ANYEXT]](s80)
227 ; X32: RET 0, implicit $fp0
228 ; X64-LABEL: name: test_float_args
229 ; X64: bb.1 (%ir-block.0):
230 ; X64: liveins: $xmm0, $xmm1
231 ; X64: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
232 ; X64: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s128)
233 ; X64: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
234 ; X64: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128)
235 ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s32)
236 ; X64: $xmm0 = COPY [[ANYEXT]](s128)
237 ; X64: RET 0, implicit $xmm0
241 define double @test_double_args(double %arg1, double %arg2) {
242 ; X32-LABEL: name: test_double_args
243 ; X32: bb.1 (%ir-block.0):
244 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
245 ; X32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 1)
246 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
247 ; X32: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 1)
248 ; X32: [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s64)
249 ; X32: $fp0 = COPY [[ANYEXT]](s80)
250 ; X32: RET 0, implicit $fp0
251 ; X64-LABEL: name: test_double_args
252 ; X64: bb.1 (%ir-block.0):
253 ; X64: liveins: $xmm0, $xmm1
254 ; X64: [[COPY:%[0-9]+]]:_(s128) = COPY $xmm0
255 ; X64: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY]](s128)
256 ; X64: [[COPY1:%[0-9]+]]:_(s128) = COPY $xmm1
257 ; X64: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
258 ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[TRUNC1]](s64)
259 ; X64: $xmm0 = COPY [[ANYEXT]](s128)
260 ; X64: RET 0, implicit $xmm0
264 define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
265 ; X32-LABEL: name: test_v4i32_args
266 ; X32: bb.1 (%ir-block.0):
267 ; X32: liveins: $xmm0, $xmm1
268 ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
269 ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
270 ; X32: $xmm0 = COPY [[COPY1]](<4 x s32>)
271 ; X32: RET 0, implicit $xmm0
272 ; X64-LABEL: name: test_v4i32_args
273 ; X64: bb.1 (%ir-block.0):
274 ; X64: liveins: $xmm0, $xmm1
275 ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
276 ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
277 ; X64: $xmm0 = COPY [[COPY1]](<4 x s32>)
278 ; X64: RET 0, implicit $xmm0
282 define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
283 ; X32-LABEL: name: test_v8i32_args
284 ; X32: bb.1 (%ir-block.0):
285 ; X32: liveins: $xmm0, $xmm1
286 ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
287 ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
288 ; X32: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
289 ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
290 ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
291 ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
292 ; X32: RET 0, implicit $xmm0, implicit $xmm1
293 ; X64-LABEL: name: test_v8i32_args
294 ; X64: bb.1 (%ir-block.0):
295 ; X64: liveins: $xmm0, $xmm1
296 ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
297 ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
298 ; X64: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
299 ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
300 ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
301 ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
302 ; X64: RET 0, implicit $xmm0, implicit $xmm1
306 define void @test_void_return() {
307 ; X32-LABEL: name: test_void_return
310 ; X64-LABEL: name: test_void_return
317 define i32 * @test_memop_i32(i32 * %p1) {
319 ; X32-LABEL: name: test_memop_i32
320 ; X32: bb.1 (%ir-block.0):
321 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
322 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
323 ; X32: $eax = COPY [[LOAD]](p0)
324 ; X32: RET 0, implicit $eax
325 ; X64-LABEL: name: test_memop_i32
326 ; X64: bb.1 (%ir-block.0):
328 ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
329 ; X64: $rax = COPY [[COPY]](p0)
330 ; X64: RET 0, implicit $rax
334 declare void @trivial_callee()
335 define void @test_trivial_call() {
336 ; X32-LABEL: name: test_trivial_call
337 ; X32: bb.1 (%ir-block.0):
338 ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
339 ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
340 ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
342 ; X64-LABEL: name: test_trivial_call
343 ; X64: bb.1 (%ir-block.0):
344 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
345 ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit $rsp, implicit $ssp
346 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
348 call void @trivial_callee()
352 declare void @simple_arg_callee(i32 %in0, i32 %in1)
353 define void @test_simple_arg(i32 %in0, i32 %in1) {
354 ; X32-LABEL: name: test_simple_arg
355 ; X32: bb.1 (%ir-block.0):
356 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
357 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
358 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
359 ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
360 ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
361 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
362 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
363 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
364 ; X32: G_STORE [[LOAD1]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
365 ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
366 ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
367 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
368 ; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
369 ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
370 ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
372 ; X64-LABEL: name: test_simple_arg
373 ; X64: bb.1 (%ir-block.0):
374 ; X64: liveins: $edi, $esi
375 ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
376 ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
377 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
378 ; X64: $edi = COPY [[COPY1]](s32)
379 ; X64: $esi = COPY [[COPY]](s32)
380 ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi
381 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
383 call void @simple_arg_callee(i32 %in1, i32 %in0)
387 declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
388 define void @test_simple_arg8_call(i32 %in0) {
389 ; X32-LABEL: name: test_simple_arg8_call
390 ; X32: bb.1 (%ir-block.0):
391 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
392 ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
393 ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
394 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
395 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
396 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
397 ; X32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
398 ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
399 ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
400 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
401 ; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
402 ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
403 ; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
404 ; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32)
405 ; X32: G_STORE [[LOAD]](s32), [[GEP2]](p0) :: (store 4 into stack + 8, align 1)
406 ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
407 ; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
408 ; X32: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY3]], [[C3]](s32)
409 ; X32: G_STORE [[LOAD]](s32), [[GEP3]](p0) :: (store 4 into stack + 12, align 1)
410 ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY $esp
411 ; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
412 ; X32: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C4]](s32)
413 ; X32: G_STORE [[LOAD]](s32), [[GEP4]](p0) :: (store 4 into stack + 16, align 1)
414 ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY $esp
415 ; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
416 ; X32: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY5]], [[C5]](s32)
417 ; X32: G_STORE [[LOAD]](s32), [[GEP5]](p0) :: (store 4 into stack + 20, align 1)
418 ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY $esp
419 ; X32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
420 ; X32: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY6]], [[C6]](s32)
421 ; X32: G_STORE [[LOAD]](s32), [[GEP6]](p0) :: (store 4 into stack + 24, align 1)
422 ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY $esp
423 ; X32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
424 ; X32: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY7]], [[C7]](s32)
425 ; X32: G_STORE [[LOAD]](s32), [[GEP7]](p0) :: (store 4 into stack + 28, align 1)
426 ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
427 ; X32: ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
429 ; X64-LABEL: name: test_simple_arg8_call
430 ; X64: bb.1 (%ir-block.0):
432 ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
433 ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
434 ; X64: $edi = COPY [[COPY]](s32)
435 ; X64: $esi = COPY [[COPY]](s32)
436 ; X64: $edx = COPY [[COPY]](s32)
437 ; X64: $ecx = COPY [[COPY]](s32)
438 ; X64: $r8d = COPY [[COPY]](s32)
439 ; X64: $r9d = COPY [[COPY]](s32)
440 ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsp
441 ; X64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
442 ; X64: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
443 ; X64: G_STORE [[COPY]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
444 ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY $rsp
445 ; X64: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
446 ; X64: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C1]](s64)
447 ; X64: G_STORE [[COPY]](s32), [[GEP1]](p0) :: (store 4 into stack + 8, align 1)
448 ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $ecx, implicit $r8d, implicit $r9d
449 ; X64: ADJCALLSTACKUP64 16, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
451 call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
455 declare i32 @simple_return_callee(i32 %in0)
456 define i32 @test_simple_return_callee() {
457 ; X32-LABEL: name: test_simple_return_callee
458 ; X32: bb.1 (%ir-block.0):
459 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
460 ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
461 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
462 ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
463 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s32)
464 ; X32: G_STORE [[C]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
465 ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax
466 ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY $eax
467 ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
468 ; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
469 ; X32: $eax = COPY [[ADD]](s32)
470 ; X32: RET 0, implicit $eax
471 ; X64-LABEL: name: test_simple_return_callee
472 ; X64: bb.1 (%ir-block.0):
473 ; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
474 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
475 ; X64: $edi = COPY [[C]](s32)
476 ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $eax
477 ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $eax
478 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
479 ; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
480 ; X64: $eax = COPY [[ADD]](s32)
481 ; X64: RET 0, implicit $eax
482 %call = call i32 @simple_return_callee(i32 5)
483 %r = add i32 %call, %call
487 declare <8 x i32> @split_return_callee(<8 x i32> %in0)
488 define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
489 ; X32-LABEL: name: test_split_return_callee
490 ; X32: bb.1 (%ir-block.0):
491 ; X32: liveins: $xmm0, $xmm1, $xmm2
492 ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
493 ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
494 ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
495 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
496 ; X32: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0, align 1)
497 ; X32: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
498 ; X32: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>)
499 ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
500 ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS1]](<8 x s32>)
501 ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
502 ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
503 ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
504 ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
505 ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
506 ; X32: [[CONCAT_VECTORS2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>)
507 ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
508 ; X32: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS2]]
509 ; X32: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
510 ; X32: $xmm0 = COPY [[UV2]](<4 x s32>)
511 ; X32: $xmm1 = COPY [[UV3]](<4 x s32>)
512 ; X32: RET 0, implicit $xmm0, implicit $xmm1
513 ; X64-LABEL: name: test_split_return_callee
514 ; X64: bb.1 (%ir-block.0):
515 ; X64: liveins: $xmm0, $xmm1, $xmm2, $xmm3
516 ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
517 ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
518 ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
519 ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm3
520 ; X64: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
521 ; X64: [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>)
522 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
523 ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS1]](<8 x s32>)
524 ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
525 ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
526 ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
527 ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
528 ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
529 ; X64: [[CONCAT_VECTORS2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>)
530 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
531 ; X64: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS2]]
532 ; X64: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
533 ; X64: $xmm0 = COPY [[UV2]](<4 x s32>)
534 ; X64: $xmm1 = COPY [[UV3]](<4 x s32>)
535 ; X64: RET 0, implicit $xmm0, implicit $xmm1
536 %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2)
537 %r = add <8 x i32> %arg1, %call
541 define void @test_indirect_call(void()* %func) {
542 ; X32-LABEL: name: test_indirect_call
543 ; X32: bb.1 (%ir-block.0):
544 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
545 ; X32: [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
546 ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
547 ; X32: CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
548 ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
550 ; X64-LABEL: name: test_indirect_call
551 ; X64: bb.1 (%ir-block.0):
553 ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY $rdi
554 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
555 ; X64: CALL64r [[COPY]](p0), csr_64, implicit $rsp, implicit $ssp
556 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
563 declare void @take_char(i8)
564 define void @test_abi_exts_call(i8* %addr) {
565 ; X32-LABEL: name: test_abi_exts_call
566 ; X32: bb.1 (%ir-block.0):
567 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
568 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
569 ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr)
570 ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
571 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
572 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
573 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
574 ; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
575 ; X32: G_STORE [[ANYEXT]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
576 ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
577 ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
578 ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
579 ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
580 ; X32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
581 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[COPY2]](s32)
582 ; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
583 ; X32: G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 1)
584 ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
585 ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
586 ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
587 ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
588 ; X32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C]](s32)
589 ; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY3]], [[COPY4]](s32)
590 ; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
591 ; X32: G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 1)
592 ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
593 ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
595 ; X64-LABEL: name: test_abi_exts_call
596 ; X64: bb.1 (%ir-block.0):
598 ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
599 ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr)
600 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
601 ; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
602 ; X64: $edi = COPY [[ANYEXT]](s32)
603 ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
604 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
605 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
606 ; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
607 ; X64: $edi = COPY [[SEXT]](s32)
608 ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
609 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
610 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
611 ; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
612 ; X64: $edi = COPY [[ZEXT]](s32)
613 ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
614 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
616 %val = load i8, i8* %addr
617 call void @take_char(i8 %val)
618 call void @take_char(i8 signext %val)
619 call void @take_char(i8 zeroext %val)
623 declare void @variadic_callee(i8*, ...)
624 define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
625 ; X32-LABEL: name: test_variadic_call_1
626 ; X32: bb.1 (%ir-block.0):
627 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
628 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
629 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
630 ; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
631 ; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
632 ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr)
633 ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
634 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
635 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
636 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
637 ; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 1)
638 ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
639 ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
640 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
641 ; X32: G_STORE [[LOAD3]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
642 ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
643 ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
645 ; X64-LABEL: name: test_variadic_call_1
646 ; X64: bb.1 (%ir-block.0):
647 ; X64: liveins: $rdi, $rsi
648 ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
649 ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
650 ; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
651 ; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.val_ptr)
652 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
653 ; X64: $rdi = COPY [[LOAD]](p0)
654 ; X64: $esi = COPY [[LOAD1]](s32)
655 ; X64: $al = MOV8ri 0
656 ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $al
657 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
659 %addr = load i8*, i8** %addr_ptr
660 %val = load i32, i32* %val_ptr
661 call void (i8*, ...) @variadic_callee(i8* %addr, i32 %val)
665 define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
666 ; X32-LABEL: name: test_variadic_call_2
667 ; X32: bb.1 (%ir-block.0):
668 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
669 ; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 1)
670 ; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
671 ; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 1)
672 ; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
673 ; X32: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4)
674 ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
675 ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
676 ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
677 ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
678 ; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 1)
679 ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
680 ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
681 ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
682 ; X32: G_STORE [[LOAD3]](s64), [[GEP1]](p0) :: (store 8 into stack + 4, align 1)
683 ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
684 ; X32: ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
686 ; X64-LABEL: name: test_variadic_call_2
687 ; X64: bb.1 (%ir-block.0):
688 ; X64: liveins: $rdi, $rsi
689 ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
690 ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
691 ; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
692 ; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr)
693 ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
694 ; X64: $rdi = COPY [[LOAD]](p0)
695 ; X64: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD1]](s64)
696 ; X64: $xmm0 = COPY [[ANYEXT]](s128)
697 ; X64: $al = MOV8ri 1
698 ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al
699 ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
701 %addr = load i8*, i8** %addr_ptr
702 %val = load double, double* %val_ptr
703 call void (i8*, ...) @variadic_callee(i8* %addr, double %val)