1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -verify-machineinstrs \
3 ; RUN: | FileCheck -check-prefixes=RV32,ILP32 %s
4 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -verify-machineinstrs \
5 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
6 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -target-abi ilp32f \
7 ; RUN: -verify-machineinstrs \
8 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
9 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -target-abi ilp32d \
10 ; RUN: -verify-machineinstrs \
11 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
12 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -verify-machineinstrs \
13 ; RUN: | FileCheck -check-prefixes=RV64,LP64 %s
14 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64f \
15 ; RUN: -verify-machineinstrs \
16 ; RUN: | FileCheck -check-prefixes=RV64,LP64F %s
17 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64d \
18 ; RUN: -verify-machineinstrs \
19 ; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
20 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv32 -global-isel \
21 ; RUN: -frame-pointer=all -target-abi ilp32 -verify-machineinstrs \
22 ; RUN: | FileCheck -check-prefixes=RV32-WITHFP %s
23 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel \
24 ; RUN: -frame-pointer=all -target-abi lp64 -verify-machineinstrs \
25 ; RUN: | FileCheck -check-prefixes=RV64-WITHFP %s
27 ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
28 ; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
29 ; codegen differences due to the way the f64 load operations are lowered and
30 ; because the PseudoCALL specifies the calling convention.
31 ; The nounwind attribute is omitted for some of the tests, to check that CFI
32 ; directives are correctly generated.
34 declare void @llvm.va_start(ptr)
35 declare void @llvm.va_end(ptr)
37 declare void @notdead(ptr)
39 ; Although frontends are recommended to not generate va_arg due to the lack of
40 ; support for aggregate types, we test simple cases here to ensure they are
43 define i32 @va1(ptr %fmt, ...) {
46 ; RV32-NEXT: addi sp, sp, -48
47 ; RV32-NEXT: .cfi_def_cfa_offset 48
48 ; RV32-NEXT: sw a1, 20(sp)
49 ; RV32-NEXT: sw a2, 24(sp)
50 ; RV32-NEXT: sw a3, 28(sp)
51 ; RV32-NEXT: sw a4, 32(sp)
52 ; RV32-NEXT: addi a0, sp, 20
53 ; RV32-NEXT: sw a0, 12(sp)
54 ; RV32-NEXT: lw a0, 12(sp)
55 ; RV32-NEXT: sw a5, 36(sp)
56 ; RV32-NEXT: sw a6, 40(sp)
57 ; RV32-NEXT: sw a7, 44(sp)
58 ; RV32-NEXT: addi a1, a0, 4
59 ; RV32-NEXT: sw a1, 12(sp)
60 ; RV32-NEXT: lw a0, 0(a0)
61 ; RV32-NEXT: addi sp, sp, 48
66 ; RV64-NEXT: addi sp, sp, -80
67 ; RV64-NEXT: .cfi_def_cfa_offset 80
68 ; RV64-NEXT: sd a1, 24(sp)
69 ; RV64-NEXT: sd a2, 32(sp)
70 ; RV64-NEXT: sd a3, 40(sp)
71 ; RV64-NEXT: sd a4, 48(sp)
72 ; RV64-NEXT: sd a5, 56(sp)
73 ; RV64-NEXT: addi a0, sp, 24
74 ; RV64-NEXT: sd a0, 8(sp)
75 ; RV64-NEXT: lw a0, 12(sp)
76 ; RV64-NEXT: lwu a1, 8(sp)
77 ; RV64-NEXT: sd a6, 64(sp)
78 ; RV64-NEXT: sd a7, 72(sp)
79 ; RV64-NEXT: slli a0, a0, 32
80 ; RV64-NEXT: or a0, a0, a1
81 ; RV64-NEXT: addi a1, a0, 4
82 ; RV64-NEXT: srli a2, a1, 32
83 ; RV64-NEXT: sw a1, 8(sp)
84 ; RV64-NEXT: sw a2, 12(sp)
85 ; RV64-NEXT: lw a0, 0(a0)
86 ; RV64-NEXT: addi sp, sp, 80
89 ; RV32-WITHFP-LABEL: va1:
90 ; RV32-WITHFP: # %bb.0:
91 ; RV32-WITHFP-NEXT: addi sp, sp, -48
92 ; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
93 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
94 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
95 ; RV32-WITHFP-NEXT: .cfi_offset ra, -36
96 ; RV32-WITHFP-NEXT: .cfi_offset s0, -40
97 ; RV32-WITHFP-NEXT: addi s0, sp, 16
98 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
99 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
100 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
101 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
102 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
103 ; RV32-WITHFP-NEXT: addi a0, s0, 4
104 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
105 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
106 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
107 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
108 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
109 ; RV32-WITHFP-NEXT: addi a1, a0, 4
110 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
111 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
112 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
113 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
114 ; RV32-WITHFP-NEXT: addi sp, sp, 48
115 ; RV32-WITHFP-NEXT: ret
117 ; RV64-WITHFP-LABEL: va1:
118 ; RV64-WITHFP: # %bb.0:
119 ; RV64-WITHFP-NEXT: addi sp, sp, -96
120 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
121 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
122 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
123 ; RV64-WITHFP-NEXT: .cfi_offset ra, -72
124 ; RV64-WITHFP-NEXT: .cfi_offset s0, -80
125 ; RV64-WITHFP-NEXT: addi s0, sp, 32
126 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
127 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
128 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
129 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
130 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
131 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
132 ; RV64-WITHFP-NEXT: addi a0, s0, 8
133 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
134 ; RV64-WITHFP-NEXT: lw a0, -20(s0)
135 ; RV64-WITHFP-NEXT: lwu a1, -24(s0)
136 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
137 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
138 ; RV64-WITHFP-NEXT: slli a0, a0, 32
139 ; RV64-WITHFP-NEXT: or a0, a0, a1
140 ; RV64-WITHFP-NEXT: addi a1, a0, 4
141 ; RV64-WITHFP-NEXT: srli a2, a1, 32
142 ; RV64-WITHFP-NEXT: sw a1, -24(s0)
143 ; RV64-WITHFP-NEXT: sw a2, -20(s0)
144 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
145 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
146 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
147 ; RV64-WITHFP-NEXT: addi sp, sp, 96
148 ; RV64-WITHFP-NEXT: ret
150 call void @llvm.va_start(ptr %va)
151 %argp.cur = load ptr, ptr %va, align 4
152 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
153 store ptr %argp.next, ptr %va, align 4
154 %1 = load i32, ptr %argp.cur, align 4
155 call void @llvm.va_end(ptr %va)
159 define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
160 ; RV32-LABEL: va1_va_arg:
162 ; RV32-NEXT: addi sp, sp, -48
163 ; RV32-NEXT: sw a1, 20(sp)
164 ; RV32-NEXT: sw a2, 24(sp)
165 ; RV32-NEXT: sw a3, 28(sp)
166 ; RV32-NEXT: sw a4, 32(sp)
167 ; RV32-NEXT: sw a5, 36(sp)
168 ; RV32-NEXT: sw a6, 40(sp)
169 ; RV32-NEXT: sw a7, 44(sp)
170 ; RV32-NEXT: addi a0, sp, 20
171 ; RV32-NEXT: sw a0, 12(sp)
172 ; RV32-NEXT: lw a0, 12(sp)
173 ; RV32-NEXT: addi a0, a0, 3
174 ; RV32-NEXT: andi a0, a0, -4
175 ; RV32-NEXT: addi a1, a0, 4
176 ; RV32-NEXT: sw a1, 12(sp)
177 ; RV32-NEXT: lw a0, 0(a0)
178 ; RV32-NEXT: addi sp, sp, 48
181 ; RV64-LABEL: va1_va_arg:
183 ; RV64-NEXT: addi sp, sp, -80
184 ; RV64-NEXT: sd a1, 24(sp)
185 ; RV64-NEXT: sd a2, 32(sp)
186 ; RV64-NEXT: sd a3, 40(sp)
187 ; RV64-NEXT: sd a4, 48(sp)
188 ; RV64-NEXT: sd a5, 56(sp)
189 ; RV64-NEXT: sd a6, 64(sp)
190 ; RV64-NEXT: sd a7, 72(sp)
191 ; RV64-NEXT: addi a0, sp, 24
192 ; RV64-NEXT: sd a0, 8(sp)
193 ; RV64-NEXT: ld a0, 8(sp)
194 ; RV64-NEXT: addi a0, a0, 3
195 ; RV64-NEXT: andi a0, a0, -4
196 ; RV64-NEXT: addi a1, a0, 4
197 ; RV64-NEXT: sd a1, 8(sp)
198 ; RV64-NEXT: lw a0, 0(a0)
199 ; RV64-NEXT: addi sp, sp, 80
202 ; RV32-WITHFP-LABEL: va1_va_arg:
203 ; RV32-WITHFP: # %bb.0:
204 ; RV32-WITHFP-NEXT: addi sp, sp, -48
205 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
206 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
207 ; RV32-WITHFP-NEXT: addi s0, sp, 16
208 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
209 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
210 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
211 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
212 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
213 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
214 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
215 ; RV32-WITHFP-NEXT: addi a0, s0, 4
216 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
217 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
218 ; RV32-WITHFP-NEXT: addi a0, a0, 3
219 ; RV32-WITHFP-NEXT: andi a0, a0, -4
220 ; RV32-WITHFP-NEXT: addi a1, a0, 4
221 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
222 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
223 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
224 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
225 ; RV32-WITHFP-NEXT: addi sp, sp, 48
226 ; RV32-WITHFP-NEXT: ret
228 ; RV64-WITHFP-LABEL: va1_va_arg:
229 ; RV64-WITHFP: # %bb.0:
230 ; RV64-WITHFP-NEXT: addi sp, sp, -96
231 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
232 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
233 ; RV64-WITHFP-NEXT: addi s0, sp, 32
234 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
235 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
236 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
237 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
238 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
239 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
240 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
241 ; RV64-WITHFP-NEXT: addi a0, s0, 8
242 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
243 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
244 ; RV64-WITHFP-NEXT: addi a0, a0, 3
245 ; RV64-WITHFP-NEXT: andi a0, a0, -4
246 ; RV64-WITHFP-NEXT: addi a1, a0, 4
247 ; RV64-WITHFP-NEXT: sd a1, -24(s0)
248 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
249 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
250 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
251 ; RV64-WITHFP-NEXT: addi sp, sp, 96
252 ; RV64-WITHFP-NEXT: ret
254 call void @llvm.va_start(ptr %va)
255 %1 = va_arg ptr %va, i32
256 call void @llvm.va_end(ptr %va)
260 ; Ensure the adjustment when restoring the stack pointer using the frame
262 define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
263 ; RV32-LABEL: va1_va_arg_alloca:
265 ; RV32-NEXT: addi sp, sp, -48
266 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
267 ; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
268 ; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
269 ; RV32-NEXT: addi s0, sp, 16
270 ; RV32-NEXT: sw a1, 4(s0)
271 ; RV32-NEXT: sw a2, 8(s0)
272 ; RV32-NEXT: sw a3, 12(s0)
273 ; RV32-NEXT: sw a4, 16(s0)
274 ; RV32-NEXT: sw a5, 20(s0)
275 ; RV32-NEXT: sw a6, 24(s0)
276 ; RV32-NEXT: sw a7, 28(s0)
277 ; RV32-NEXT: addi a0, s0, 4
278 ; RV32-NEXT: sw a0, -16(s0)
279 ; RV32-NEXT: lw a0, -16(s0)
280 ; RV32-NEXT: addi a0, a0, 3
281 ; RV32-NEXT: andi a0, a0, -4
282 ; RV32-NEXT: addi a1, a0, 4
283 ; RV32-NEXT: sw a1, -16(s0)
284 ; RV32-NEXT: lw s1, 0(a0)
285 ; RV32-NEXT: addi a0, s1, 15
286 ; RV32-NEXT: andi a0, a0, -16
287 ; RV32-NEXT: sub a0, sp, a0
288 ; RV32-NEXT: mv sp, a0
289 ; RV32-NEXT: call notdead
290 ; RV32-NEXT: mv a0, s1
291 ; RV32-NEXT: addi sp, s0, -16
292 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
293 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
294 ; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
295 ; RV32-NEXT: addi sp, sp, 48
298 ; RV64-LABEL: va1_va_arg_alloca:
300 ; RV64-NEXT: addi sp, sp, -96
301 ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
302 ; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
303 ; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
304 ; RV64-NEXT: addi s0, sp, 32
305 ; RV64-NEXT: sd a1, 8(s0)
306 ; RV64-NEXT: sd a2, 16(s0)
307 ; RV64-NEXT: sd a3, 24(s0)
308 ; RV64-NEXT: sd a4, 32(s0)
309 ; RV64-NEXT: sd a5, 40(s0)
310 ; RV64-NEXT: sd a6, 48(s0)
311 ; RV64-NEXT: sd a7, 56(s0)
312 ; RV64-NEXT: addi a0, s0, 8
313 ; RV64-NEXT: sd a0, -32(s0)
314 ; RV64-NEXT: ld a0, -32(s0)
315 ; RV64-NEXT: addi a0, a0, 3
316 ; RV64-NEXT: andi a0, a0, -4
317 ; RV64-NEXT: addi a1, a0, 4
318 ; RV64-NEXT: sd a1, -32(s0)
319 ; RV64-NEXT: lw s1, 0(a0)
320 ; RV64-NEXT: slli a0, s1, 32
321 ; RV64-NEXT: srli a0, a0, 32
322 ; RV64-NEXT: addi a0, a0, 15
323 ; RV64-NEXT: andi a0, a0, -16
324 ; RV64-NEXT: sub a0, sp, a0
325 ; RV64-NEXT: mv sp, a0
326 ; RV64-NEXT: call notdead
327 ; RV64-NEXT: mv a0, s1
328 ; RV64-NEXT: addi sp, s0, -32
329 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
330 ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
331 ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
332 ; RV64-NEXT: addi sp, sp, 96
335 ; RV32-WITHFP-LABEL: va1_va_arg_alloca:
336 ; RV32-WITHFP: # %bb.0:
337 ; RV32-WITHFP-NEXT: addi sp, sp, -48
338 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
339 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
340 ; RV32-WITHFP-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
341 ; RV32-WITHFP-NEXT: addi s0, sp, 16
342 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
343 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
344 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
345 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
346 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
347 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
348 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
349 ; RV32-WITHFP-NEXT: addi a0, s0, 4
350 ; RV32-WITHFP-NEXT: sw a0, -16(s0)
351 ; RV32-WITHFP-NEXT: lw a0, -16(s0)
352 ; RV32-WITHFP-NEXT: addi a0, a0, 3
353 ; RV32-WITHFP-NEXT: andi a0, a0, -4
354 ; RV32-WITHFP-NEXT: addi a1, a0, 4
355 ; RV32-WITHFP-NEXT: sw a1, -16(s0)
356 ; RV32-WITHFP-NEXT: lw s1, 0(a0)
357 ; RV32-WITHFP-NEXT: addi a0, s1, 15
358 ; RV32-WITHFP-NEXT: andi a0, a0, -16
359 ; RV32-WITHFP-NEXT: sub a0, sp, a0
360 ; RV32-WITHFP-NEXT: mv sp, a0
361 ; RV32-WITHFP-NEXT: call notdead
362 ; RV32-WITHFP-NEXT: mv a0, s1
363 ; RV32-WITHFP-NEXT: addi sp, s0, -16
364 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
365 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
366 ; RV32-WITHFP-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
367 ; RV32-WITHFP-NEXT: addi sp, sp, 48
368 ; RV32-WITHFP-NEXT: ret
370 ; RV64-WITHFP-LABEL: va1_va_arg_alloca:
371 ; RV64-WITHFP: # %bb.0:
372 ; RV64-WITHFP-NEXT: addi sp, sp, -96
373 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
374 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
375 ; RV64-WITHFP-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
376 ; RV64-WITHFP-NEXT: addi s0, sp, 32
377 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
378 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
379 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
380 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
381 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
382 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
383 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
384 ; RV64-WITHFP-NEXT: addi a0, s0, 8
385 ; RV64-WITHFP-NEXT: sd a0, -32(s0)
386 ; RV64-WITHFP-NEXT: ld a0, -32(s0)
387 ; RV64-WITHFP-NEXT: addi a0, a0, 3
388 ; RV64-WITHFP-NEXT: andi a0, a0, -4
389 ; RV64-WITHFP-NEXT: addi a1, a0, 4
390 ; RV64-WITHFP-NEXT: sd a1, -32(s0)
391 ; RV64-WITHFP-NEXT: lw s1, 0(a0)
392 ; RV64-WITHFP-NEXT: slli a0, s1, 32
393 ; RV64-WITHFP-NEXT: srli a0, a0, 32
394 ; RV64-WITHFP-NEXT: addi a0, a0, 15
395 ; RV64-WITHFP-NEXT: andi a0, a0, -16
396 ; RV64-WITHFP-NEXT: sub a0, sp, a0
397 ; RV64-WITHFP-NEXT: mv sp, a0
398 ; RV64-WITHFP-NEXT: call notdead
399 ; RV64-WITHFP-NEXT: mv a0, s1
400 ; RV64-WITHFP-NEXT: addi sp, s0, -32
401 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
402 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
403 ; RV64-WITHFP-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
404 ; RV64-WITHFP-NEXT: addi sp, sp, 96
405 ; RV64-WITHFP-NEXT: ret
407 call void @llvm.va_start(ptr %va)
408 %1 = va_arg ptr %va, i32
409 %2 = alloca i8, i32 %1
410 call void @notdead(ptr %2)
411 call void @llvm.va_end(ptr %va)
415 define void @va1_caller() nounwind {
416 ; RV32-LABEL: va1_caller:
418 ; RV32-NEXT: addi sp, sp, -16
419 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
420 ; RV32-NEXT: lui a3, 261888
421 ; RV32-NEXT: li a4, 2
422 ; RV32-NEXT: li a2, 0
423 ; RV32-NEXT: call va1
424 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
425 ; RV32-NEXT: addi sp, sp, 16
428 ; LP64-LABEL: va1_caller:
430 ; LP64-NEXT: addi sp, sp, -16
431 ; LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
432 ; LP64-NEXT: lui a0, %hi(.LCPI3_0)
433 ; LP64-NEXT: ld a1, %lo(.LCPI3_0)(a0)
434 ; LP64-NEXT: li a2, 2
435 ; LP64-NEXT: call va1
436 ; LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
437 ; LP64-NEXT: addi sp, sp, 16
440 ; LP64F-LABEL: va1_caller:
442 ; LP64F-NEXT: addi sp, sp, -16
443 ; LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
444 ; LP64F-NEXT: li a0, 1023
445 ; LP64F-NEXT: slli a0, a0, 52
446 ; LP64F-NEXT: fmv.d.x fa5, a0
447 ; LP64F-NEXT: li a2, 2
448 ; LP64F-NEXT: fmv.x.d a1, fa5
449 ; LP64F-NEXT: call va1
450 ; LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
451 ; LP64F-NEXT: addi sp, sp, 16
454 ; LP64D-LABEL: va1_caller:
456 ; LP64D-NEXT: addi sp, sp, -16
457 ; LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
458 ; LP64D-NEXT: li a0, 1023
459 ; LP64D-NEXT: slli a0, a0, 52
460 ; LP64D-NEXT: fmv.d.x fa5, a0
461 ; LP64D-NEXT: li a2, 2
462 ; LP64D-NEXT: fmv.x.d a1, fa5
463 ; LP64D-NEXT: call va1
464 ; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
465 ; LP64D-NEXT: addi sp, sp, 16
468 ; RV32-WITHFP-LABEL: va1_caller:
469 ; RV32-WITHFP: # %bb.0:
470 ; RV32-WITHFP-NEXT: addi sp, sp, -16
471 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
472 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
473 ; RV32-WITHFP-NEXT: addi s0, sp, 16
474 ; RV32-WITHFP-NEXT: lui a3, 261888
475 ; RV32-WITHFP-NEXT: li a4, 2
476 ; RV32-WITHFP-NEXT: li a2, 0
477 ; RV32-WITHFP-NEXT: call va1
478 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
479 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
480 ; RV32-WITHFP-NEXT: addi sp, sp, 16
481 ; RV32-WITHFP-NEXT: ret
483 ; RV64-WITHFP-LABEL: va1_caller:
484 ; RV64-WITHFP: # %bb.0:
485 ; RV64-WITHFP-NEXT: addi sp, sp, -16
486 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
487 ; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
488 ; RV64-WITHFP-NEXT: addi s0, sp, 16
489 ; RV64-WITHFP-NEXT: lui a0, %hi(.LCPI3_0)
490 ; RV64-WITHFP-NEXT: ld a1, %lo(.LCPI3_0)(a0)
491 ; RV64-WITHFP-NEXT: li a2, 2
492 ; RV64-WITHFP-NEXT: call va1
493 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
494 ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
495 ; RV64-WITHFP-NEXT: addi sp, sp, 16
496 ; RV64-WITHFP-NEXT: ret
497 %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
501 ; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
502 ; register pair (where the first register is even-numbered).
504 define i64 @va2(ptr %fmt, ...) nounwind {
507 ; ILP32-NEXT: addi sp, sp, -48
508 ; ILP32-NEXT: sw a1, 20(sp)
509 ; ILP32-NEXT: sw a2, 24(sp)
510 ; ILP32-NEXT: sw a3, 28(sp)
511 ; ILP32-NEXT: sw a4, 32(sp)
512 ; ILP32-NEXT: addi a0, sp, 20
513 ; ILP32-NEXT: sw a0, 12(sp)
514 ; ILP32-NEXT: lw a0, 12(sp)
515 ; ILP32-NEXT: sw a5, 36(sp)
516 ; ILP32-NEXT: sw a6, 40(sp)
517 ; ILP32-NEXT: sw a7, 44(sp)
518 ; ILP32-NEXT: addi a1, a0, 7
519 ; ILP32-NEXT: andi a1, a1, -8
520 ; ILP32-NEXT: addi a0, a0, 15
521 ; ILP32-NEXT: sw a0, 12(sp)
522 ; ILP32-NEXT: lw a0, 0(a1)
523 ; ILP32-NEXT: lw a1, 4(a1)
524 ; ILP32-NEXT: addi sp, sp, 48
527 ; RV32D-ILP32-LABEL: va2:
528 ; RV32D-ILP32: # %bb.0:
529 ; RV32D-ILP32-NEXT: addi sp, sp, -48
530 ; RV32D-ILP32-NEXT: sw a1, 20(sp)
531 ; RV32D-ILP32-NEXT: sw a2, 24(sp)
532 ; RV32D-ILP32-NEXT: sw a3, 28(sp)
533 ; RV32D-ILP32-NEXT: sw a4, 32(sp)
534 ; RV32D-ILP32-NEXT: addi a0, sp, 20
535 ; RV32D-ILP32-NEXT: sw a0, 12(sp)
536 ; RV32D-ILP32-NEXT: lw a0, 12(sp)
537 ; RV32D-ILP32-NEXT: sw a5, 36(sp)
538 ; RV32D-ILP32-NEXT: sw a6, 40(sp)
539 ; RV32D-ILP32-NEXT: sw a7, 44(sp)
540 ; RV32D-ILP32-NEXT: addi a1, a0, 7
541 ; RV32D-ILP32-NEXT: andi a1, a1, -8
542 ; RV32D-ILP32-NEXT: fld fa5, 0(a1)
543 ; RV32D-ILP32-NEXT: addi a0, a0, 15
544 ; RV32D-ILP32-NEXT: sw a0, 12(sp)
545 ; RV32D-ILP32-NEXT: fsd fa5, 0(sp)
546 ; RV32D-ILP32-NEXT: lw a0, 0(sp)
547 ; RV32D-ILP32-NEXT: lw a1, 4(sp)
548 ; RV32D-ILP32-NEXT: addi sp, sp, 48
549 ; RV32D-ILP32-NEXT: ret
551 ; RV32D-ILP32F-LABEL: va2:
552 ; RV32D-ILP32F: # %bb.0:
553 ; RV32D-ILP32F-NEXT: addi sp, sp, -48
554 ; RV32D-ILP32F-NEXT: sw a1, 20(sp)
555 ; RV32D-ILP32F-NEXT: sw a2, 24(sp)
556 ; RV32D-ILP32F-NEXT: sw a3, 28(sp)
557 ; RV32D-ILP32F-NEXT: sw a4, 32(sp)
558 ; RV32D-ILP32F-NEXT: addi a0, sp, 20
559 ; RV32D-ILP32F-NEXT: sw a0, 12(sp)
560 ; RV32D-ILP32F-NEXT: lw a0, 12(sp)
561 ; RV32D-ILP32F-NEXT: sw a5, 36(sp)
562 ; RV32D-ILP32F-NEXT: sw a6, 40(sp)
563 ; RV32D-ILP32F-NEXT: sw a7, 44(sp)
564 ; RV32D-ILP32F-NEXT: addi a1, a0, 7
565 ; RV32D-ILP32F-NEXT: andi a1, a1, -8
566 ; RV32D-ILP32F-NEXT: fld fa5, 0(a1)
567 ; RV32D-ILP32F-NEXT: addi a0, a0, 15
568 ; RV32D-ILP32F-NEXT: sw a0, 12(sp)
569 ; RV32D-ILP32F-NEXT: fsd fa5, 0(sp)
570 ; RV32D-ILP32F-NEXT: lw a0, 0(sp)
571 ; RV32D-ILP32F-NEXT: lw a1, 4(sp)
572 ; RV32D-ILP32F-NEXT: addi sp, sp, 48
573 ; RV32D-ILP32F-NEXT: ret
575 ; RV32D-ILP32D-LABEL: va2:
576 ; RV32D-ILP32D: # %bb.0:
577 ; RV32D-ILP32D-NEXT: addi sp, sp, -48
578 ; RV32D-ILP32D-NEXT: sw a1, 20(sp)
579 ; RV32D-ILP32D-NEXT: sw a2, 24(sp)
580 ; RV32D-ILP32D-NEXT: sw a3, 28(sp)
581 ; RV32D-ILP32D-NEXT: sw a4, 32(sp)
582 ; RV32D-ILP32D-NEXT: addi a0, sp, 20
583 ; RV32D-ILP32D-NEXT: sw a0, 12(sp)
584 ; RV32D-ILP32D-NEXT: lw a0, 12(sp)
585 ; RV32D-ILP32D-NEXT: sw a5, 36(sp)
586 ; RV32D-ILP32D-NEXT: sw a6, 40(sp)
587 ; RV32D-ILP32D-NEXT: sw a7, 44(sp)
588 ; RV32D-ILP32D-NEXT: addi a1, a0, 7
589 ; RV32D-ILP32D-NEXT: andi a1, a1, -8
590 ; RV32D-ILP32D-NEXT: fld fa5, 0(a1)
591 ; RV32D-ILP32D-NEXT: addi a0, a0, 15
592 ; RV32D-ILP32D-NEXT: sw a0, 12(sp)
593 ; RV32D-ILP32D-NEXT: fsd fa5, 0(sp)
594 ; RV32D-ILP32D-NEXT: lw a0, 0(sp)
595 ; RV32D-ILP32D-NEXT: lw a1, 4(sp)
596 ; RV32D-ILP32D-NEXT: addi sp, sp, 48
597 ; RV32D-ILP32D-NEXT: ret
601 ; RV64-NEXT: addi sp, sp, -80
602 ; RV64-NEXT: sd a1, 24(sp)
603 ; RV64-NEXT: sd a2, 32(sp)
604 ; RV64-NEXT: sd a3, 40(sp)
605 ; RV64-NEXT: sd a4, 48(sp)
606 ; RV64-NEXT: addi a0, sp, 24
607 ; RV64-NEXT: sd a0, 8(sp)
608 ; RV64-NEXT: ld a0, 8(sp)
609 ; RV64-NEXT: sd a5, 56(sp)
610 ; RV64-NEXT: sd a6, 64(sp)
611 ; RV64-NEXT: sd a7, 72(sp)
612 ; RV64-NEXT: addi a1, a0, 7
613 ; RV64-NEXT: andi a1, a1, -8
614 ; RV64-NEXT: addi a0, a0, 15
615 ; RV64-NEXT: sd a0, 8(sp)
616 ; RV64-NEXT: ld a0, 0(a1)
617 ; RV64-NEXT: addi sp, sp, 80
620 ; RV32-WITHFP-LABEL: va2:
621 ; RV32-WITHFP: # %bb.0:
622 ; RV32-WITHFP-NEXT: addi sp, sp, -48
623 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
624 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
625 ; RV32-WITHFP-NEXT: addi s0, sp, 16
626 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
627 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
628 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
629 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
630 ; RV32-WITHFP-NEXT: addi a0, s0, 4
631 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
632 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
633 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
634 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
635 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
636 ; RV32-WITHFP-NEXT: addi a0, a0, 7
637 ; RV32-WITHFP-NEXT: andi a1, a0, -8
638 ; RV32-WITHFP-NEXT: addi a0, a0, 8
639 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
640 ; RV32-WITHFP-NEXT: lw a0, 0(a1)
641 ; RV32-WITHFP-NEXT: lw a1, 4(a1)
642 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
643 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
644 ; RV32-WITHFP-NEXT: addi sp, sp, 48
645 ; RV32-WITHFP-NEXT: ret
647 ; RV64-WITHFP-LABEL: va2:
648 ; RV64-WITHFP: # %bb.0:
649 ; RV64-WITHFP-NEXT: addi sp, sp, -96
650 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
651 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
652 ; RV64-WITHFP-NEXT: addi s0, sp, 32
653 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
654 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
655 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
656 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
657 ; RV64-WITHFP-NEXT: addi a0, s0, 8
658 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
659 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
660 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
661 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
662 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
663 ; RV64-WITHFP-NEXT: addi a1, a0, 7
664 ; RV64-WITHFP-NEXT: andi a1, a1, -8
665 ; RV64-WITHFP-NEXT: addi a0, a0, 15
666 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
667 ; RV64-WITHFP-NEXT: ld a0, 0(a1)
668 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
669 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
670 ; RV64-WITHFP-NEXT: addi sp, sp, 96
671 ; RV64-WITHFP-NEXT: ret
673 call void @llvm.va_start(ptr %va)
674 %argp.cur = load ptr, ptr %va
675 %ptrint = ptrtoint ptr %argp.cur to iXLen
676 %1 = add iXLen %ptrint, 7
677 %2 = and iXLen %1, -8
678 %argp.cur.aligned = inttoptr iXLen %1 to ptr
679 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
680 store ptr %argp.next, ptr %va
681 %3 = inttoptr iXLen %2 to ptr
682 %4 = load double, ptr %3, align 8
683 %5 = bitcast double %4 to i64
684 call void @llvm.va_end(ptr %va)
688 ; This test is slightly different than the SelectionDAG counterpart because
689 ; narrowScalar and widenScalar for G_VAARG on types outside of [s32, sXLen]
690 ; are not implemented yet.
691 define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
692 ; RV32-LABEL: va2_va_arg:
694 ; RV32-NEXT: addi sp, sp, -48
695 ; RV32-NEXT: sw a1, 20(sp)
696 ; RV32-NEXT: sw a2, 24(sp)
697 ; RV32-NEXT: sw a3, 28(sp)
698 ; RV32-NEXT: sw a4, 32(sp)
699 ; RV32-NEXT: sw a5, 36(sp)
700 ; RV32-NEXT: sw a6, 40(sp)
701 ; RV32-NEXT: sw a7, 44(sp)
702 ; RV32-NEXT: addi a0, sp, 20
703 ; RV32-NEXT: sw a0, 12(sp)
704 ; RV32-NEXT: lw a0, 12(sp)
705 ; RV32-NEXT: addi a0, a0, 3
706 ; RV32-NEXT: andi a0, a0, -4
707 ; RV32-NEXT: addi a1, a0, 4
708 ; RV32-NEXT: sw a1, 12(sp)
709 ; RV32-NEXT: lw a0, 0(a0)
710 ; RV32-NEXT: li a1, 0
711 ; RV32-NEXT: addi sp, sp, 48
714 ; RV64-LABEL: va2_va_arg:
716 ; RV64-NEXT: addi sp, sp, -80
717 ; RV64-NEXT: sd a1, 24(sp)
718 ; RV64-NEXT: sd a2, 32(sp)
719 ; RV64-NEXT: sd a3, 40(sp)
720 ; RV64-NEXT: sd a4, 48(sp)
721 ; RV64-NEXT: sd a5, 56(sp)
722 ; RV64-NEXT: sd a6, 64(sp)
723 ; RV64-NEXT: sd a7, 72(sp)
724 ; RV64-NEXT: addi a0, sp, 24
725 ; RV64-NEXT: sd a0, 8(sp)
726 ; RV64-NEXT: ld a0, 8(sp)
727 ; RV64-NEXT: addi a0, a0, 3
728 ; RV64-NEXT: andi a0, a0, -4
729 ; RV64-NEXT: addi a1, a0, 4
730 ; RV64-NEXT: sd a1, 8(sp)
731 ; RV64-NEXT: lw a0, 0(a0)
732 ; RV64-NEXT: slli a0, a0, 32
733 ; RV64-NEXT: srli a0, a0, 32
734 ; RV64-NEXT: addi sp, sp, 80
737 ; RV32-WITHFP-LABEL: va2_va_arg:
738 ; RV32-WITHFP: # %bb.0:
739 ; RV32-WITHFP-NEXT: addi sp, sp, -48
740 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
741 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
742 ; RV32-WITHFP-NEXT: addi s0, sp, 16
743 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
744 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
745 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
746 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
747 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
748 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
749 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
750 ; RV32-WITHFP-NEXT: addi a0, s0, 4
751 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
752 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
753 ; RV32-WITHFP-NEXT: addi a0, a0, 3
754 ; RV32-WITHFP-NEXT: andi a0, a0, -4
755 ; RV32-WITHFP-NEXT: addi a1, a0, 4
756 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
757 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
758 ; RV32-WITHFP-NEXT: li a1, 0
759 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
760 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
761 ; RV32-WITHFP-NEXT: addi sp, sp, 48
762 ; RV32-WITHFP-NEXT: ret
764 ; RV64-WITHFP-LABEL: va2_va_arg:
765 ; RV64-WITHFP: # %bb.0:
766 ; RV64-WITHFP-NEXT: addi sp, sp, -96
767 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
768 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
769 ; RV64-WITHFP-NEXT: addi s0, sp, 32
770 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
771 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
772 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
773 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
774 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
775 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
776 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
777 ; RV64-WITHFP-NEXT: addi a0, s0, 8
778 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
779 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
780 ; RV64-WITHFP-NEXT: addi a0, a0, 3
781 ; RV64-WITHFP-NEXT: andi a0, a0, -4
782 ; RV64-WITHFP-NEXT: addi a1, a0, 4
783 ; RV64-WITHFP-NEXT: sd a1, -24(s0)
784 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
785 ; RV64-WITHFP-NEXT: slli a0, a0, 32
786 ; RV64-WITHFP-NEXT: srli a0, a0, 32
787 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
788 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
789 ; RV64-WITHFP-NEXT: addi sp, sp, 96
790 ; RV64-WITHFP-NEXT: ret
792 call void @llvm.va_start(ptr %va)
793 %1 = va_arg ptr %va, i32
794 call void @llvm.va_end(ptr %va)
795 %2 = zext i32 %1 to i64
799 define void @va2_caller() nounwind {
800 ; RV32-LABEL: va2_caller:
802 ; RV32-NEXT: addi sp, sp, -16
803 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
804 ; RV32-NEXT: li a1, 1
805 ; RV32-NEXT: call va2
806 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
807 ; RV32-NEXT: addi sp, sp, 16
810 ; RV64-LABEL: va2_caller:
812 ; RV64-NEXT: addi sp, sp, -16
813 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
814 ; RV64-NEXT: li a1, 1
815 ; RV64-NEXT: call va2
816 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
817 ; RV64-NEXT: addi sp, sp, 16
820 ; RV32-WITHFP-LABEL: va2_caller:
821 ; RV32-WITHFP: # %bb.0:
822 ; RV32-WITHFP-NEXT: addi sp, sp, -16
823 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
824 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
825 ; RV32-WITHFP-NEXT: addi s0, sp, 16
826 ; RV32-WITHFP-NEXT: li a1, 1
827 ; RV32-WITHFP-NEXT: call va2
828 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
829 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
830 ; RV32-WITHFP-NEXT: addi sp, sp, 16
831 ; RV32-WITHFP-NEXT: ret
833 ; RV64-WITHFP-LABEL: va2_caller:
834 ; RV64-WITHFP: # %bb.0:
835 ; RV64-WITHFP-NEXT: addi sp, sp, -16
836 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
837 ; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
838 ; RV64-WITHFP-NEXT: addi s0, sp, 16
839 ; RV64-WITHFP-NEXT: li a1, 1
840 ; RV64-WITHFP-NEXT: call va2
841 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
842 ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
843 ; RV64-WITHFP-NEXT: addi sp, sp, 16
844 ; RV64-WITHFP-NEXT: ret
845 %1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
849 ; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
850 ; vararg double is passed in a4 and a5 (rather than a3 and a4)
852 define i64 @va3(i32 %a, i64 %b, ...) nounwind {
855 ; ILP32-NEXT: addi sp, sp, -32
856 ; ILP32-NEXT: sw a3, 12(sp)
857 ; ILP32-NEXT: sw a4, 16(sp)
858 ; ILP32-NEXT: addi a0, sp, 12
859 ; ILP32-NEXT: sw a0, 4(sp)
860 ; ILP32-NEXT: lw a0, 4(sp)
861 ; ILP32-NEXT: sw a5, 20(sp)
862 ; ILP32-NEXT: sw a6, 24(sp)
863 ; ILP32-NEXT: sw a7, 28(sp)
864 ; ILP32-NEXT: addi a3, a0, 7
865 ; ILP32-NEXT: andi a3, a3, -8
866 ; ILP32-NEXT: addi a0, a0, 15
867 ; ILP32-NEXT: sw a0, 4(sp)
868 ; ILP32-NEXT: lw a4, 0(a3)
869 ; ILP32-NEXT: lw a3, 4(a3)
870 ; ILP32-NEXT: add a0, a1, a4
871 ; ILP32-NEXT: sltu a1, a0, a4
872 ; ILP32-NEXT: add a2, a2, a3
873 ; ILP32-NEXT: add a1, a2, a1
874 ; ILP32-NEXT: addi sp, sp, 32
877 ; RV32D-ILP32-LABEL: va3:
878 ; RV32D-ILP32: # %bb.0:
879 ; RV32D-ILP32-NEXT: addi sp, sp, -48
880 ; RV32D-ILP32-NEXT: sw a3, 28(sp)
881 ; RV32D-ILP32-NEXT: sw a4, 32(sp)
882 ; RV32D-ILP32-NEXT: addi a0, sp, 28
883 ; RV32D-ILP32-NEXT: sw a0, 20(sp)
884 ; RV32D-ILP32-NEXT: lw a0, 20(sp)
885 ; RV32D-ILP32-NEXT: sw a5, 36(sp)
886 ; RV32D-ILP32-NEXT: sw a6, 40(sp)
887 ; RV32D-ILP32-NEXT: sw a7, 44(sp)
888 ; RV32D-ILP32-NEXT: addi a3, a0, 7
889 ; RV32D-ILP32-NEXT: andi a3, a3, -8
890 ; RV32D-ILP32-NEXT: fld fa5, 0(a3)
891 ; RV32D-ILP32-NEXT: addi a0, a0, 15
892 ; RV32D-ILP32-NEXT: sw a0, 20(sp)
893 ; RV32D-ILP32-NEXT: fsd fa5, 8(sp)
894 ; RV32D-ILP32-NEXT: lw a3, 8(sp)
895 ; RV32D-ILP32-NEXT: lw a4, 12(sp)
896 ; RV32D-ILP32-NEXT: add a0, a1, a3
897 ; RV32D-ILP32-NEXT: sltu a1, a0, a3
898 ; RV32D-ILP32-NEXT: add a2, a2, a4
899 ; RV32D-ILP32-NEXT: add a1, a2, a1
900 ; RV32D-ILP32-NEXT: addi sp, sp, 48
901 ; RV32D-ILP32-NEXT: ret
903 ; RV32D-ILP32F-LABEL: va3:
904 ; RV32D-ILP32F: # %bb.0:
905 ; RV32D-ILP32F-NEXT: addi sp, sp, -48
906 ; RV32D-ILP32F-NEXT: sw a3, 28(sp)
907 ; RV32D-ILP32F-NEXT: sw a4, 32(sp)
908 ; RV32D-ILP32F-NEXT: addi a0, sp, 28
909 ; RV32D-ILP32F-NEXT: sw a0, 20(sp)
910 ; RV32D-ILP32F-NEXT: lw a0, 20(sp)
911 ; RV32D-ILP32F-NEXT: sw a5, 36(sp)
912 ; RV32D-ILP32F-NEXT: sw a6, 40(sp)
913 ; RV32D-ILP32F-NEXT: sw a7, 44(sp)
914 ; RV32D-ILP32F-NEXT: addi a3, a0, 7
915 ; RV32D-ILP32F-NEXT: andi a3, a3, -8
916 ; RV32D-ILP32F-NEXT: fld fa5, 0(a3)
917 ; RV32D-ILP32F-NEXT: addi a0, a0, 15
918 ; RV32D-ILP32F-NEXT: sw a0, 20(sp)
919 ; RV32D-ILP32F-NEXT: fsd fa5, 8(sp)
920 ; RV32D-ILP32F-NEXT: lw a3, 8(sp)
921 ; RV32D-ILP32F-NEXT: lw a4, 12(sp)
922 ; RV32D-ILP32F-NEXT: add a0, a1, a3
923 ; RV32D-ILP32F-NEXT: sltu a1, a0, a3
924 ; RV32D-ILP32F-NEXT: add a2, a2, a4
925 ; RV32D-ILP32F-NEXT: add a1, a2, a1
926 ; RV32D-ILP32F-NEXT: addi sp, sp, 48
927 ; RV32D-ILP32F-NEXT: ret
929 ; RV32D-ILP32D-LABEL: va3:
930 ; RV32D-ILP32D: # %bb.0:
931 ; RV32D-ILP32D-NEXT: addi sp, sp, -48
932 ; RV32D-ILP32D-NEXT: sw a3, 28(sp)
933 ; RV32D-ILP32D-NEXT: sw a4, 32(sp)
934 ; RV32D-ILP32D-NEXT: addi a0, sp, 28
935 ; RV32D-ILP32D-NEXT: sw a0, 20(sp)
936 ; RV32D-ILP32D-NEXT: lw a0, 20(sp)
937 ; RV32D-ILP32D-NEXT: sw a5, 36(sp)
938 ; RV32D-ILP32D-NEXT: sw a6, 40(sp)
939 ; RV32D-ILP32D-NEXT: sw a7, 44(sp)
940 ; RV32D-ILP32D-NEXT: addi a3, a0, 7
941 ; RV32D-ILP32D-NEXT: andi a3, a3, -8
942 ; RV32D-ILP32D-NEXT: fld fa5, 0(a3)
943 ; RV32D-ILP32D-NEXT: addi a0, a0, 15
944 ; RV32D-ILP32D-NEXT: sw a0, 20(sp)
945 ; RV32D-ILP32D-NEXT: fsd fa5, 8(sp)
946 ; RV32D-ILP32D-NEXT: lw a3, 8(sp)
947 ; RV32D-ILP32D-NEXT: lw a4, 12(sp)
948 ; RV32D-ILP32D-NEXT: add a0, a1, a3
949 ; RV32D-ILP32D-NEXT: sltu a1, a0, a3
950 ; RV32D-ILP32D-NEXT: add a2, a2, a4
951 ; RV32D-ILP32D-NEXT: add a1, a2, a1
952 ; RV32D-ILP32D-NEXT: addi sp, sp, 48
953 ; RV32D-ILP32D-NEXT: ret
957 ; RV64-NEXT: addi sp, sp, -64
958 ; RV64-NEXT: sd a2, 16(sp)
959 ; RV64-NEXT: sd a3, 24(sp)
960 ; RV64-NEXT: sd a4, 32(sp)
961 ; RV64-NEXT: addi a0, sp, 16
962 ; RV64-NEXT: sd a0, 8(sp)
963 ; RV64-NEXT: ld a0, 8(sp)
964 ; RV64-NEXT: sd a5, 40(sp)
965 ; RV64-NEXT: sd a6, 48(sp)
966 ; RV64-NEXT: sd a7, 56(sp)
967 ; RV64-NEXT: addi a2, a0, 7
968 ; RV64-NEXT: andi a2, a2, -8
969 ; RV64-NEXT: addi a0, a0, 15
970 ; RV64-NEXT: sd a0, 8(sp)
971 ; RV64-NEXT: ld a0, 0(a2)
972 ; RV64-NEXT: add a0, a1, a0
973 ; RV64-NEXT: addi sp, sp, 64
976 ; RV32-WITHFP-LABEL: va3:
977 ; RV32-WITHFP: # %bb.0:
978 ; RV32-WITHFP-NEXT: addi sp, sp, -48
979 ; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
980 ; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
981 ; RV32-WITHFP-NEXT: addi s0, sp, 24
982 ; RV32-WITHFP-NEXT: sw a3, 4(s0)
983 ; RV32-WITHFP-NEXT: sw a4, 8(s0)
984 ; RV32-WITHFP-NEXT: addi a0, s0, 4
985 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
986 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
987 ; RV32-WITHFP-NEXT: sw a5, 12(s0)
988 ; RV32-WITHFP-NEXT: sw a6, 16(s0)
989 ; RV32-WITHFP-NEXT: sw a7, 20(s0)
990 ; RV32-WITHFP-NEXT: addi a0, a0, 7
991 ; RV32-WITHFP-NEXT: andi a3, a0, -8
992 ; RV32-WITHFP-NEXT: addi a0, a0, 8
993 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
994 ; RV32-WITHFP-NEXT: lw a4, 0(a3)
995 ; RV32-WITHFP-NEXT: lw a3, 4(a3)
996 ; RV32-WITHFP-NEXT: add a0, a1, a4
997 ; RV32-WITHFP-NEXT: sltu a1, a0, a4
998 ; RV32-WITHFP-NEXT: add a2, a2, a3
999 ; RV32-WITHFP-NEXT: add a1, a2, a1
1000 ; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
1001 ; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
1002 ; RV32-WITHFP-NEXT: addi sp, sp, 48
1003 ; RV32-WITHFP-NEXT: ret
1005 ; RV64-WITHFP-LABEL: va3:
1006 ; RV64-WITHFP: # %bb.0:
1007 ; RV64-WITHFP-NEXT: addi sp, sp, -80
1008 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1009 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1010 ; RV64-WITHFP-NEXT: addi s0, sp, 32
1011 ; RV64-WITHFP-NEXT: sd a2, 0(s0)
1012 ; RV64-WITHFP-NEXT: sd a3, 8(s0)
1013 ; RV64-WITHFP-NEXT: sd a4, 16(s0)
1014 ; RV64-WITHFP-NEXT: mv a0, s0
1015 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
1016 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
1017 ; RV64-WITHFP-NEXT: sd a5, 24(s0)
1018 ; RV64-WITHFP-NEXT: sd a6, 32(s0)
1019 ; RV64-WITHFP-NEXT: sd a7, 40(s0)
1020 ; RV64-WITHFP-NEXT: addi a2, a0, 7
1021 ; RV64-WITHFP-NEXT: andi a2, a2, -8
1022 ; RV64-WITHFP-NEXT: addi a0, a0, 15
1023 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
1024 ; RV64-WITHFP-NEXT: ld a0, 0(a2)
1025 ; RV64-WITHFP-NEXT: add a0, a1, a0
1026 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1027 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1028 ; RV64-WITHFP-NEXT: addi sp, sp, 80
1029 ; RV64-WITHFP-NEXT: ret
1031 call void @llvm.va_start(ptr %va)
1032 %argp.cur = load ptr, ptr %va
1033 %ptrint = ptrtoint ptr %argp.cur to iXLen
1034 %1 = add iXLen %ptrint, 7
1035 %2 = and iXLen %1, -8
1036 %argp.cur.aligned = inttoptr iXLen %1 to ptr
1037 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
1038 store ptr %argp.next, ptr %va
1039 %3 = inttoptr iXLen %2 to ptr
1040 %4 = load double, ptr %3, align 8
1041 call void @llvm.va_end(ptr %va)
1042 %5 = bitcast double %4 to i64
1047 ; This test is slightly different than the SelectionDAG counterpart because
1048 ; narrowScalar and widenScalar for G_VAARG on types outside of [s32, sXLen]
1049 ; are not implemented yet.
1050 define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
1051 ; RV32-LABEL: va3_va_arg:
1053 ; RV32-NEXT: addi sp, sp, -32
1054 ; RV32-NEXT: sw a3, 12(sp)
1055 ; RV32-NEXT: sw a4, 16(sp)
1056 ; RV32-NEXT: sw a5, 20(sp)
1057 ; RV32-NEXT: sw a6, 24(sp)
1058 ; RV32-NEXT: sw a7, 28(sp)
1059 ; RV32-NEXT: addi a0, sp, 12
1060 ; RV32-NEXT: sw a0, 4(sp)
1061 ; RV32-NEXT: lw a0, 4(sp)
1062 ; RV32-NEXT: addi a0, a0, 3
1063 ; RV32-NEXT: andi a0, a0, -4
1064 ; RV32-NEXT: addi a3, a0, 4
1065 ; RV32-NEXT: sw a3, 4(sp)
1066 ; RV32-NEXT: lw a3, 0(a0)
1067 ; RV32-NEXT: add a0, a1, a3
1068 ; RV32-NEXT: sltu a1, a0, a3
1069 ; RV32-NEXT: add a1, a2, a1
1070 ; RV32-NEXT: addi sp, sp, 32
1073 ; RV64-LABEL: va3_va_arg:
1075 ; RV64-NEXT: addi sp, sp, -64
1076 ; RV64-NEXT: sd a2, 16(sp)
1077 ; RV64-NEXT: sd a3, 24(sp)
1078 ; RV64-NEXT: sd a4, 32(sp)
1079 ; RV64-NEXT: sd a5, 40(sp)
1080 ; RV64-NEXT: sd a6, 48(sp)
1081 ; RV64-NEXT: sd a7, 56(sp)
1082 ; RV64-NEXT: addi a0, sp, 16
1083 ; RV64-NEXT: sd a0, 8(sp)
1084 ; RV64-NEXT: ld a0, 8(sp)
1085 ; RV64-NEXT: addi a0, a0, 3
1086 ; RV64-NEXT: andi a0, a0, -4
1087 ; RV64-NEXT: addi a2, a0, 4
1088 ; RV64-NEXT: sd a2, 8(sp)
1089 ; RV64-NEXT: lw a0, 0(a0)
1090 ; RV64-NEXT: slli a0, a0, 32
1091 ; RV64-NEXT: srli a0, a0, 32
1092 ; RV64-NEXT: add a0, a1, a0
1093 ; RV64-NEXT: addi sp, sp, 64
1096 ; RV32-WITHFP-LABEL: va3_va_arg:
1097 ; RV32-WITHFP: # %bb.0:
1098 ; RV32-WITHFP-NEXT: addi sp, sp, -48
1099 ; RV32-WITHFP-NEXT: sw ra, 20(sp) # 4-byte Folded Spill
1100 ; RV32-WITHFP-NEXT: sw s0, 16(sp) # 4-byte Folded Spill
1101 ; RV32-WITHFP-NEXT: addi s0, sp, 24
1102 ; RV32-WITHFP-NEXT: sw a3, 4(s0)
1103 ; RV32-WITHFP-NEXT: sw a4, 8(s0)
1104 ; RV32-WITHFP-NEXT: sw a5, 12(s0)
1105 ; RV32-WITHFP-NEXT: sw a6, 16(s0)
1106 ; RV32-WITHFP-NEXT: sw a7, 20(s0)
1107 ; RV32-WITHFP-NEXT: addi a0, s0, 4
1108 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
1109 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
1110 ; RV32-WITHFP-NEXT: addi a0, a0, 3
1111 ; RV32-WITHFP-NEXT: andi a0, a0, -4
1112 ; RV32-WITHFP-NEXT: addi a3, a0, 4
1113 ; RV32-WITHFP-NEXT: sw a3, -12(s0)
1114 ; RV32-WITHFP-NEXT: lw a3, 0(a0)
1115 ; RV32-WITHFP-NEXT: add a0, a1, a3
1116 ; RV32-WITHFP-NEXT: sltu a1, a0, a3
1117 ; RV32-WITHFP-NEXT: add a1, a2, a1
1118 ; RV32-WITHFP-NEXT: lw ra, 20(sp) # 4-byte Folded Reload
1119 ; RV32-WITHFP-NEXT: lw s0, 16(sp) # 4-byte Folded Reload
1120 ; RV32-WITHFP-NEXT: addi sp, sp, 48
1121 ; RV32-WITHFP-NEXT: ret
1123 ; RV64-WITHFP-LABEL: va3_va_arg:
1124 ; RV64-WITHFP: # %bb.0:
1125 ; RV64-WITHFP-NEXT: addi sp, sp, -80
1126 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1127 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1128 ; RV64-WITHFP-NEXT: addi s0, sp, 32
1129 ; RV64-WITHFP-NEXT: sd a2, 0(s0)
1130 ; RV64-WITHFP-NEXT: sd a3, 8(s0)
1131 ; RV64-WITHFP-NEXT: sd a4, 16(s0)
1132 ; RV64-WITHFP-NEXT: sd a5, 24(s0)
1133 ; RV64-WITHFP-NEXT: sd a6, 32(s0)
1134 ; RV64-WITHFP-NEXT: sd a7, 40(s0)
1135 ; RV64-WITHFP-NEXT: mv a0, s0
1136 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
1137 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
1138 ; RV64-WITHFP-NEXT: addi a0, a0, 3
1139 ; RV64-WITHFP-NEXT: andi a0, a0, -4
1140 ; RV64-WITHFP-NEXT: addi a2, a0, 4
1141 ; RV64-WITHFP-NEXT: sd a2, -24(s0)
1142 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
1143 ; RV64-WITHFP-NEXT: slli a0, a0, 32
1144 ; RV64-WITHFP-NEXT: srli a0, a0, 32
1145 ; RV64-WITHFP-NEXT: add a0, a1, a0
1146 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1147 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1148 ; RV64-WITHFP-NEXT: addi sp, sp, 80
1149 ; RV64-WITHFP-NEXT: ret
1151 call void @llvm.va_start(ptr %va)
1152 %1 = va_arg ptr %va, i32
1153 call void @llvm.va_end(ptr %va)
1154 %2 = zext i32 %1 to i64
1159 define void @va3_caller() nounwind {
1160 ; RV32-LABEL: va3_caller:
1162 ; RV32-NEXT: addi sp, sp, -16
1163 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1164 ; RV32-NEXT: lui a0, 5
1165 ; RV32-NEXT: addi a3, a0, -480
1166 ; RV32-NEXT: li a0, 2
1167 ; RV32-NEXT: li a1, 1111
1168 ; RV32-NEXT: li a2, 0
1169 ; RV32-NEXT: call va3
1170 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1171 ; RV32-NEXT: addi sp, sp, 16
1174 ; RV64-LABEL: va3_caller:
1176 ; RV64-NEXT: addi sp, sp, -16
1177 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1178 ; RV64-NEXT: lui a0, 5
1179 ; RV64-NEXT: addiw a2, a0, -480
1180 ; RV64-NEXT: li a0, 2
1181 ; RV64-NEXT: li a1, 1111
1182 ; RV64-NEXT: call va3
1183 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1184 ; RV64-NEXT: addi sp, sp, 16
1187 ; RV32-WITHFP-LABEL: va3_caller:
1188 ; RV32-WITHFP: # %bb.0:
1189 ; RV32-WITHFP-NEXT: addi sp, sp, -16
1190 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1191 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1192 ; RV32-WITHFP-NEXT: addi s0, sp, 16
1193 ; RV32-WITHFP-NEXT: lui a0, 5
1194 ; RV32-WITHFP-NEXT: addi a3, a0, -480
1195 ; RV32-WITHFP-NEXT: li a0, 2
1196 ; RV32-WITHFP-NEXT: li a1, 1111
1197 ; RV32-WITHFP-NEXT: li a2, 0
1198 ; RV32-WITHFP-NEXT: call va3
1199 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1200 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1201 ; RV32-WITHFP-NEXT: addi sp, sp, 16
1202 ; RV32-WITHFP-NEXT: ret
1204 ; RV64-WITHFP-LABEL: va3_caller:
1205 ; RV64-WITHFP: # %bb.0:
1206 ; RV64-WITHFP-NEXT: addi sp, sp, -16
1207 ; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1208 ; RV64-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
1209 ; RV64-WITHFP-NEXT: addi s0, sp, 16
1210 ; RV64-WITHFP-NEXT: lui a0, 5
1211 ; RV64-WITHFP-NEXT: addiw a2, a0, -480
1212 ; RV64-WITHFP-NEXT: li a0, 2
1213 ; RV64-WITHFP-NEXT: li a1, 1111
1214 ; RV64-WITHFP-NEXT: call va3
1215 ; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1216 ; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
1217 ; RV64-WITHFP-NEXT: addi sp, sp, 16
1218 ; RV64-WITHFP-NEXT: ret
1219 %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
1223 declare void @llvm.va_copy(ptr, ptr)
1225 define i32 @va4_va_copy(i32 %argno, ...) nounwind {
1226 ; RV32-LABEL: va4_va_copy:
1228 ; RV32-NEXT: addi sp, sp, -48
1229 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1230 ; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1231 ; RV32-NEXT: sw a1, 20(sp)
1232 ; RV32-NEXT: sw a2, 24(sp)
1233 ; RV32-NEXT: sw a3, 28(sp)
1234 ; RV32-NEXT: sw a4, 32(sp)
1235 ; RV32-NEXT: sw a5, 36(sp)
1236 ; RV32-NEXT: sw a6, 40(sp)
1237 ; RV32-NEXT: sw a7, 44(sp)
1238 ; RV32-NEXT: addi a0, sp, 20
1239 ; RV32-NEXT: sw a0, 4(sp)
1240 ; RV32-NEXT: lw a0, 4(sp)
1241 ; RV32-NEXT: addi a0, a0, 3
1242 ; RV32-NEXT: andi a0, a0, -4
1243 ; RV32-NEXT: addi a1, a0, 4
1244 ; RV32-NEXT: sw a1, 4(sp)
1245 ; RV32-NEXT: lw a1, 4(sp)
1246 ; RV32-NEXT: lw s0, 0(a0)
1247 ; RV32-NEXT: sw a1, 0(sp)
1248 ; RV32-NEXT: lw a0, 0(sp)
1249 ; RV32-NEXT: call notdead
1250 ; RV32-NEXT: lw a0, 4(sp)
1251 ; RV32-NEXT: addi a0, a0, 3
1252 ; RV32-NEXT: andi a0, a0, -4
1253 ; RV32-NEXT: addi a1, a0, 4
1254 ; RV32-NEXT: sw a1, 4(sp)
1255 ; RV32-NEXT: lw a1, 4(sp)
1256 ; RV32-NEXT: lw a0, 0(a0)
1257 ; RV32-NEXT: addi a1, a1, 3
1258 ; RV32-NEXT: andi a1, a1, -4
1259 ; RV32-NEXT: addi a2, a1, 4
1260 ; RV32-NEXT: sw a2, 4(sp)
1261 ; RV32-NEXT: lw a2, 4(sp)
1262 ; RV32-NEXT: lw a1, 0(a1)
1263 ; RV32-NEXT: addi a2, a2, 3
1264 ; RV32-NEXT: andi a2, a2, -4
1265 ; RV32-NEXT: addi a3, a2, 4
1266 ; RV32-NEXT: sw a3, 4(sp)
1267 ; RV32-NEXT: lw a2, 0(a2)
1268 ; RV32-NEXT: add a0, a0, s0
1269 ; RV32-NEXT: add a1, a1, a2
1270 ; RV32-NEXT: add a0, a0, a1
1271 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1272 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1273 ; RV32-NEXT: addi sp, sp, 48
1276 ; RV64-LABEL: va4_va_copy:
1278 ; RV64-NEXT: addi sp, sp, -96
1279 ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1280 ; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1281 ; RV64-NEXT: sd a1, 40(sp)
1282 ; RV64-NEXT: sd a2, 48(sp)
1283 ; RV64-NEXT: sd a3, 56(sp)
1284 ; RV64-NEXT: sd a4, 64(sp)
1285 ; RV64-NEXT: sd a5, 72(sp)
1286 ; RV64-NEXT: sd a6, 80(sp)
1287 ; RV64-NEXT: sd a7, 88(sp)
1288 ; RV64-NEXT: addi a0, sp, 40
1289 ; RV64-NEXT: sd a0, 8(sp)
1290 ; RV64-NEXT: ld a0, 8(sp)
1291 ; RV64-NEXT: addi a0, a0, 3
1292 ; RV64-NEXT: andi a0, a0, -4
1293 ; RV64-NEXT: addi a1, a0, 4
1294 ; RV64-NEXT: sd a1, 8(sp)
1295 ; RV64-NEXT: ld a1, 8(sp)
1296 ; RV64-NEXT: lw s0, 0(a0)
1297 ; RV64-NEXT: sd a1, 0(sp)
1298 ; RV64-NEXT: lw a0, 4(sp)
1299 ; RV64-NEXT: lwu a1, 0(sp)
1300 ; RV64-NEXT: slli a0, a0, 32
1301 ; RV64-NEXT: or a0, a0, a1
1302 ; RV64-NEXT: call notdead
1303 ; RV64-NEXT: ld a0, 8(sp)
1304 ; RV64-NEXT: addi a0, a0, 3
1305 ; RV64-NEXT: andi a0, a0, -4
1306 ; RV64-NEXT: addi a1, a0, 4
1307 ; RV64-NEXT: sd a1, 8(sp)
1308 ; RV64-NEXT: ld a1, 8(sp)
1309 ; RV64-NEXT: lw a0, 0(a0)
1310 ; RV64-NEXT: addi a1, a1, 3
1311 ; RV64-NEXT: andi a1, a1, -4
1312 ; RV64-NEXT: addi a2, a1, 4
1313 ; RV64-NEXT: sd a2, 8(sp)
1314 ; RV64-NEXT: ld a2, 8(sp)
1315 ; RV64-NEXT: lw a1, 0(a1)
1316 ; RV64-NEXT: addi a2, a2, 3
1317 ; RV64-NEXT: andi a2, a2, -4
1318 ; RV64-NEXT: addi a3, a2, 4
1319 ; RV64-NEXT: sd a3, 8(sp)
1320 ; RV64-NEXT: lw a2, 0(a2)
1321 ; RV64-NEXT: add a0, a0, s0
1322 ; RV64-NEXT: add a1, a1, a2
1323 ; RV64-NEXT: addw a0, a0, a1
1324 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1325 ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1326 ; RV64-NEXT: addi sp, sp, 96
1329 ; RV32-WITHFP-LABEL: va4_va_copy:
1330 ; RV32-WITHFP: # %bb.0:
1331 ; RV32-WITHFP-NEXT: addi sp, sp, -64
1332 ; RV32-WITHFP-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1333 ; RV32-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1334 ; RV32-WITHFP-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1335 ; RV32-WITHFP-NEXT: addi s0, sp, 32
1336 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
1337 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
1338 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
1339 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
1340 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
1341 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
1342 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
1343 ; RV32-WITHFP-NEXT: addi a0, s0, 4
1344 ; RV32-WITHFP-NEXT: sw a0, -16(s0)
1345 ; RV32-WITHFP-NEXT: lw a0, -16(s0)
1346 ; RV32-WITHFP-NEXT: addi a0, a0, 3
1347 ; RV32-WITHFP-NEXT: andi a0, a0, -4
1348 ; RV32-WITHFP-NEXT: addi a1, a0, 4
1349 ; RV32-WITHFP-NEXT: sw a1, -16(s0)
1350 ; RV32-WITHFP-NEXT: lw a1, -16(s0)
1351 ; RV32-WITHFP-NEXT: lw s1, 0(a0)
1352 ; RV32-WITHFP-NEXT: sw a1, -20(s0)
1353 ; RV32-WITHFP-NEXT: lw a0, -20(s0)
1354 ; RV32-WITHFP-NEXT: call notdead
1355 ; RV32-WITHFP-NEXT: lw a0, -16(s0)
1356 ; RV32-WITHFP-NEXT: addi a0, a0, 3
1357 ; RV32-WITHFP-NEXT: andi a0, a0, -4
1358 ; RV32-WITHFP-NEXT: addi a1, a0, 4
1359 ; RV32-WITHFP-NEXT: sw a1, -16(s0)
1360 ; RV32-WITHFP-NEXT: lw a1, -16(s0)
1361 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
1362 ; RV32-WITHFP-NEXT: addi a1, a1, 3
1363 ; RV32-WITHFP-NEXT: andi a1, a1, -4
1364 ; RV32-WITHFP-NEXT: addi a2, a1, 4
1365 ; RV32-WITHFP-NEXT: sw a2, -16(s0)
1366 ; RV32-WITHFP-NEXT: lw a2, -16(s0)
1367 ; RV32-WITHFP-NEXT: lw a1, 0(a1)
1368 ; RV32-WITHFP-NEXT: addi a2, a2, 3
1369 ; RV32-WITHFP-NEXT: andi a2, a2, -4
1370 ; RV32-WITHFP-NEXT: addi a3, a2, 4
1371 ; RV32-WITHFP-NEXT: sw a3, -16(s0)
1372 ; RV32-WITHFP-NEXT: lw a2, 0(a2)
1373 ; RV32-WITHFP-NEXT: add a0, a0, s1
1374 ; RV32-WITHFP-NEXT: add a1, a1, a2
1375 ; RV32-WITHFP-NEXT: add a0, a0, a1
1376 ; RV32-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1377 ; RV32-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1378 ; RV32-WITHFP-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1379 ; RV32-WITHFP-NEXT: addi sp, sp, 64
1380 ; RV32-WITHFP-NEXT: ret
1382 ; RV64-WITHFP-LABEL: va4_va_copy:
1383 ; RV64-WITHFP: # %bb.0:
1384 ; RV64-WITHFP-NEXT: addi sp, sp, -112
1385 ; RV64-WITHFP-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
1386 ; RV64-WITHFP-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
1387 ; RV64-WITHFP-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
1388 ; RV64-WITHFP-NEXT: addi s0, sp, 48
1389 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
1390 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
1391 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
1392 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
1393 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
1394 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
1395 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
1396 ; RV64-WITHFP-NEXT: addi a0, s0, 8
1397 ; RV64-WITHFP-NEXT: sd a0, -32(s0)
1398 ; RV64-WITHFP-NEXT: ld a0, -32(s0)
1399 ; RV64-WITHFP-NEXT: addi a0, a0, 3
1400 ; RV64-WITHFP-NEXT: andi a0, a0, -4
1401 ; RV64-WITHFP-NEXT: addi a1, a0, 4
1402 ; RV64-WITHFP-NEXT: sd a1, -32(s0)
1403 ; RV64-WITHFP-NEXT: ld a1, -32(s0)
1404 ; RV64-WITHFP-NEXT: lw s1, 0(a0)
1405 ; RV64-WITHFP-NEXT: sd a1, -40(s0)
1406 ; RV64-WITHFP-NEXT: lw a0, -36(s0)
1407 ; RV64-WITHFP-NEXT: lwu a1, -40(s0)
1408 ; RV64-WITHFP-NEXT: slli a0, a0, 32
1409 ; RV64-WITHFP-NEXT: or a0, a0, a1
1410 ; RV64-WITHFP-NEXT: call notdead
1411 ; RV64-WITHFP-NEXT: ld a0, -32(s0)
1412 ; RV64-WITHFP-NEXT: addi a0, a0, 3
1413 ; RV64-WITHFP-NEXT: andi a0, a0, -4
1414 ; RV64-WITHFP-NEXT: addi a1, a0, 4
1415 ; RV64-WITHFP-NEXT: sd a1, -32(s0)
1416 ; RV64-WITHFP-NEXT: ld a1, -32(s0)
1417 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
1418 ; RV64-WITHFP-NEXT: addi a1, a1, 3
1419 ; RV64-WITHFP-NEXT: andi a1, a1, -4
1420 ; RV64-WITHFP-NEXT: addi a2, a1, 4
1421 ; RV64-WITHFP-NEXT: sd a2, -32(s0)
1422 ; RV64-WITHFP-NEXT: ld a2, -32(s0)
1423 ; RV64-WITHFP-NEXT: lw a1, 0(a1)
1424 ; RV64-WITHFP-NEXT: addi a2, a2, 3
1425 ; RV64-WITHFP-NEXT: andi a2, a2, -4
1426 ; RV64-WITHFP-NEXT: addi a3, a2, 4
1427 ; RV64-WITHFP-NEXT: sd a3, -32(s0)
1428 ; RV64-WITHFP-NEXT: lw a2, 0(a2)
1429 ; RV64-WITHFP-NEXT: add a0, a0, s1
1430 ; RV64-WITHFP-NEXT: add a1, a1, a2
1431 ; RV64-WITHFP-NEXT: addw a0, a0, a1
1432 ; RV64-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
1433 ; RV64-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
1434 ; RV64-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
1435 ; RV64-WITHFP-NEXT: addi sp, sp, 112
1436 ; RV64-WITHFP-NEXT: ret
1439 call void @llvm.va_start(ptr %vargs)
1440 %1 = va_arg ptr %vargs, i32
1441 call void @llvm.va_copy(ptr %wargs, ptr %vargs)
1442 %2 = load ptr, ptr %wargs, align 4
1443 call void @notdead(ptr %2)
1444 %3 = va_arg ptr %vargs, i32
1445 %4 = va_arg ptr %vargs, i32
1446 %5 = va_arg ptr %vargs, i32
1447 call void @llvm.va_end(ptr %vargs)
1448 call void @llvm.va_end(ptr %wargs)
1449 %add1 = add i32 %3, %1
1450 %add2 = add i32 %add1, %4
1451 %add3 = add i32 %add2, %5
1455 ; The va5_aligned_stack_callee and caller function are ommitted from this file
1456 ; since they were not included in the IR lowering test when vararg calls were
1459 ; A function with no fixed arguments is not valid C, but can be
1460 ; specified in LLVM IR. We must ensure the vararg save area is
1461 ; still set up correctly.
1463 define i32 @va6_no_fixed_args(...) nounwind {
1464 ; RV32-LABEL: va6_no_fixed_args:
1466 ; RV32-NEXT: addi sp, sp, -48
1467 ; RV32-NEXT: sw a0, 16(sp)
1468 ; RV32-NEXT: sw a1, 20(sp)
1469 ; RV32-NEXT: sw a2, 24(sp)
1470 ; RV32-NEXT: sw a3, 28(sp)
1471 ; RV32-NEXT: sw a4, 32(sp)
1472 ; RV32-NEXT: sw a5, 36(sp)
1473 ; RV32-NEXT: sw a6, 40(sp)
1474 ; RV32-NEXT: sw a7, 44(sp)
1475 ; RV32-NEXT: addi a0, sp, 16
1476 ; RV32-NEXT: sw a0, 12(sp)
1477 ; RV32-NEXT: lw a0, 12(sp)
1478 ; RV32-NEXT: addi a0, a0, 3
1479 ; RV32-NEXT: andi a0, a0, -4
1480 ; RV32-NEXT: addi a1, a0, 4
1481 ; RV32-NEXT: sw a1, 12(sp)
1482 ; RV32-NEXT: lw a0, 0(a0)
1483 ; RV32-NEXT: addi sp, sp, 48
1486 ; RV64-LABEL: va6_no_fixed_args:
1488 ; RV64-NEXT: addi sp, sp, -80
1489 ; RV64-NEXT: sd a0, 16(sp)
1490 ; RV64-NEXT: sd a1, 24(sp)
1491 ; RV64-NEXT: sd a2, 32(sp)
1492 ; RV64-NEXT: sd a3, 40(sp)
1493 ; RV64-NEXT: sd a4, 48(sp)
1494 ; RV64-NEXT: sd a5, 56(sp)
1495 ; RV64-NEXT: sd a6, 64(sp)
1496 ; RV64-NEXT: sd a7, 72(sp)
1497 ; RV64-NEXT: addi a0, sp, 16
1498 ; RV64-NEXT: sd a0, 8(sp)
1499 ; RV64-NEXT: ld a0, 8(sp)
1500 ; RV64-NEXT: addi a0, a0, 3
1501 ; RV64-NEXT: andi a0, a0, -4
1502 ; RV64-NEXT: addi a1, a0, 4
1503 ; RV64-NEXT: sd a1, 8(sp)
1504 ; RV64-NEXT: lw a0, 0(a0)
1505 ; RV64-NEXT: addi sp, sp, 80
1508 ; RV32-WITHFP-LABEL: va6_no_fixed_args:
1509 ; RV32-WITHFP: # %bb.0:
1510 ; RV32-WITHFP-NEXT: addi sp, sp, -48
1511 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1512 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1513 ; RV32-WITHFP-NEXT: addi s0, sp, 16
1514 ; RV32-WITHFP-NEXT: sw a0, 0(s0)
1515 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
1516 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
1517 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
1518 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
1519 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
1520 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
1521 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
1522 ; RV32-WITHFP-NEXT: mv a0, s0
1523 ; RV32-WITHFP-NEXT: sw a0, -12(s0)
1524 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
1525 ; RV32-WITHFP-NEXT: addi a0, a0, 3
1526 ; RV32-WITHFP-NEXT: andi a0, a0, -4
1527 ; RV32-WITHFP-NEXT: addi a1, a0, 4
1528 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
1529 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
1530 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1531 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1532 ; RV32-WITHFP-NEXT: addi sp, sp, 48
1533 ; RV32-WITHFP-NEXT: ret
1535 ; RV64-WITHFP-LABEL: va6_no_fixed_args:
1536 ; RV64-WITHFP: # %bb.0:
1537 ; RV64-WITHFP-NEXT: addi sp, sp, -96
1538 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1539 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1540 ; RV64-WITHFP-NEXT: addi s0, sp, 32
1541 ; RV64-WITHFP-NEXT: sd a0, 0(s0)
1542 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
1543 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
1544 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
1545 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
1546 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
1547 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
1548 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
1549 ; RV64-WITHFP-NEXT: mv a0, s0
1550 ; RV64-WITHFP-NEXT: sd a0, -24(s0)
1551 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
1552 ; RV64-WITHFP-NEXT: addi a0, a0, 3
1553 ; RV64-WITHFP-NEXT: andi a0, a0, -4
1554 ; RV64-WITHFP-NEXT: addi a1, a0, 4
1555 ; RV64-WITHFP-NEXT: sd a1, -24(s0)
1556 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
1557 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1558 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1559 ; RV64-WITHFP-NEXT: addi sp, sp, 96
1560 ; RV64-WITHFP-NEXT: ret
1562 call void @llvm.va_start(ptr %va)
1563 %1 = va_arg ptr %va, i32
1564 call void @llvm.va_end(ptr %va)
1568 ; TODO: improve constant materialization of stack addresses
1570 define i32 @va_large_stack(ptr %fmt, ...) {
1571 ; RV32-LABEL: va_large_stack:
1573 ; RV32-NEXT: lui a0, 24414
1574 ; RV32-NEXT: addi a0, a0, 304
1575 ; RV32-NEXT: sub sp, sp, a0
1576 ; RV32-NEXT: .cfi_def_cfa_offset 100000048
1577 ; RV32-NEXT: lui a0, 24414
1578 ; RV32-NEXT: add a0, sp, a0
1579 ; RV32-NEXT: sw a1, 276(a0)
1580 ; RV32-NEXT: lui a0, 24414
1581 ; RV32-NEXT: add a0, sp, a0
1582 ; RV32-NEXT: sw a2, 280(a0)
1583 ; RV32-NEXT: lui a0, 24414
1584 ; RV32-NEXT: add a0, sp, a0
1585 ; RV32-NEXT: sw a3, 284(a0)
1586 ; RV32-NEXT: lui a0, 24414
1587 ; RV32-NEXT: add a0, sp, a0
1588 ; RV32-NEXT: sw a4, 288(a0)
1589 ; RV32-NEXT: lui a0, 24414
1590 ; RV32-NEXT: addi a0, a0, 276
1591 ; RV32-NEXT: add a0, sp, a0
1592 ; RV32-NEXT: sw a0, 12(sp)
1593 ; RV32-NEXT: lw a0, 12(sp)
1594 ; RV32-NEXT: lui a1, 24414
1595 ; RV32-NEXT: add a1, sp, a1
1596 ; RV32-NEXT: sw a5, 292(a1)
1597 ; RV32-NEXT: lui a1, 24414
1598 ; RV32-NEXT: add a1, sp, a1
1599 ; RV32-NEXT: sw a6, 296(a1)
1600 ; RV32-NEXT: lui a1, 24414
1601 ; RV32-NEXT: add a1, sp, a1
1602 ; RV32-NEXT: sw a7, 300(a1)
1603 ; RV32-NEXT: addi a1, a0, 4
1604 ; RV32-NEXT: sw a1, 12(sp)
1605 ; RV32-NEXT: lw a0, 0(a0)
1606 ; RV32-NEXT: lui a1, 24414
1607 ; RV32-NEXT: addi a1, a1, 304
1608 ; RV32-NEXT: add sp, sp, a1
1611 ; RV64-LABEL: va_large_stack:
1613 ; RV64-NEXT: lui a0, 24414
1614 ; RV64-NEXT: addiw a0, a0, 336
1615 ; RV64-NEXT: sub sp, sp, a0
1616 ; RV64-NEXT: .cfi_def_cfa_offset 100000080
1617 ; RV64-NEXT: lui a0, 24414
1618 ; RV64-NEXT: add a0, sp, a0
1619 ; RV64-NEXT: sd a1, 280(a0)
1620 ; RV64-NEXT: lui a0, 24414
1621 ; RV64-NEXT: add a0, sp, a0
1622 ; RV64-NEXT: sd a2, 288(a0)
1623 ; RV64-NEXT: lui a0, 24414
1624 ; RV64-NEXT: add a0, sp, a0
1625 ; RV64-NEXT: sd a3, 296(a0)
1626 ; RV64-NEXT: lui a0, 24414
1627 ; RV64-NEXT: add a0, sp, a0
1628 ; RV64-NEXT: sd a4, 304(a0)
1629 ; RV64-NEXT: lui a0, 24414
1630 ; RV64-NEXT: add a0, sp, a0
1631 ; RV64-NEXT: sd a5, 312(a0)
1632 ; RV64-NEXT: lui a0, 24414
1633 ; RV64-NEXT: addiw a0, a0, 280
1634 ; RV64-NEXT: add a0, sp, a0
1635 ; RV64-NEXT: sd a0, 8(sp)
1636 ; RV64-NEXT: lw a0, 12(sp)
1637 ; RV64-NEXT: lwu a1, 8(sp)
1638 ; RV64-NEXT: lui a2, 24414
1639 ; RV64-NEXT: add a2, sp, a2
1640 ; RV64-NEXT: sd a6, 320(a2)
1641 ; RV64-NEXT: lui a2, 24414
1642 ; RV64-NEXT: add a2, sp, a2
1643 ; RV64-NEXT: sd a7, 328(a2)
1644 ; RV64-NEXT: slli a0, a0, 32
1645 ; RV64-NEXT: or a0, a0, a1
1646 ; RV64-NEXT: addi a1, a0, 4
1647 ; RV64-NEXT: srli a2, a1, 32
1648 ; RV64-NEXT: sw a1, 8(sp)
1649 ; RV64-NEXT: sw a2, 12(sp)
1650 ; RV64-NEXT: lw a0, 0(a0)
1651 ; RV64-NEXT: lui a1, 24414
1652 ; RV64-NEXT: addiw a1, a1, 336
1653 ; RV64-NEXT: add sp, sp, a1
1656 ; RV32-WITHFP-LABEL: va_large_stack:
1657 ; RV32-WITHFP: # %bb.0:
1658 ; RV32-WITHFP-NEXT: addi sp, sp, -2032
1659 ; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 2032
1660 ; RV32-WITHFP-NEXT: sw ra, 1996(sp) # 4-byte Folded Spill
1661 ; RV32-WITHFP-NEXT: sw s0, 1992(sp) # 4-byte Folded Spill
1662 ; RV32-WITHFP-NEXT: .cfi_offset ra, -36
1663 ; RV32-WITHFP-NEXT: .cfi_offset s0, -40
1664 ; RV32-WITHFP-NEXT: addi s0, sp, 2000
1665 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
1666 ; RV32-WITHFP-NEXT: lui a0, 24414
1667 ; RV32-WITHFP-NEXT: addi a0, a0, -1728
1668 ; RV32-WITHFP-NEXT: sub sp, sp, a0
1669 ; RV32-WITHFP-NEXT: lui a0, 24414
1670 ; RV32-WITHFP-NEXT: addi a0, a0, 272
1671 ; RV32-WITHFP-NEXT: sub a0, s0, a0
1672 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
1673 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
1674 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
1675 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
1676 ; RV32-WITHFP-NEXT: addi a1, s0, 4
1677 ; RV32-WITHFP-NEXT: sw a1, 0(a0)
1678 ; RV32-WITHFP-NEXT: lw a1, 0(a0)
1679 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
1680 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
1681 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
1682 ; RV32-WITHFP-NEXT: addi a2, a1, 4
1683 ; RV32-WITHFP-NEXT: sw a2, 0(a0)
1684 ; RV32-WITHFP-NEXT: lw a0, 0(a1)
1685 ; RV32-WITHFP-NEXT: lui a1, 24414
1686 ; RV32-WITHFP-NEXT: addi a1, a1, -1728
1687 ; RV32-WITHFP-NEXT: add sp, sp, a1
1688 ; RV32-WITHFP-NEXT: lw ra, 1996(sp) # 4-byte Folded Reload
1689 ; RV32-WITHFP-NEXT: lw s0, 1992(sp) # 4-byte Folded Reload
1690 ; RV32-WITHFP-NEXT: addi sp, sp, 2032
1691 ; RV32-WITHFP-NEXT: ret
1693 ; RV64-WITHFP-LABEL: va_large_stack:
1694 ; RV64-WITHFP: # %bb.0:
1695 ; RV64-WITHFP-NEXT: addi sp, sp, -2032
1696 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 2032
1697 ; RV64-WITHFP-NEXT: sd ra, 1960(sp) # 8-byte Folded Spill
1698 ; RV64-WITHFP-NEXT: sd s0, 1952(sp) # 8-byte Folded Spill
1699 ; RV64-WITHFP-NEXT: .cfi_offset ra, -72
1700 ; RV64-WITHFP-NEXT: .cfi_offset s0, -80
1701 ; RV64-WITHFP-NEXT: addi s0, sp, 1968
1702 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
1703 ; RV64-WITHFP-NEXT: lui a0, 24414
1704 ; RV64-WITHFP-NEXT: addiw a0, a0, -1680
1705 ; RV64-WITHFP-NEXT: sub sp, sp, a0
1706 ; RV64-WITHFP-NEXT: lui a0, 24414
1707 ; RV64-WITHFP-NEXT: addiw a0, a0, 288
1708 ; RV64-WITHFP-NEXT: sub a0, s0, a0
1709 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
1710 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
1711 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
1712 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
1713 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
1714 ; RV64-WITHFP-NEXT: addi a1, s0, 8
1715 ; RV64-WITHFP-NEXT: sd a1, 0(a0)
1716 ; RV64-WITHFP-NEXT: lw a1, 4(a0)
1717 ; RV64-WITHFP-NEXT: lwu a2, 0(a0)
1718 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
1719 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
1720 ; RV64-WITHFP-NEXT: slli a1, a1, 32
1721 ; RV64-WITHFP-NEXT: or a1, a1, a2
1722 ; RV64-WITHFP-NEXT: addi a2, a1, 4
1723 ; RV64-WITHFP-NEXT: srli a3, a2, 32
1724 ; RV64-WITHFP-NEXT: sw a2, 0(a0)
1725 ; RV64-WITHFP-NEXT: sw a3, 4(a0)
1726 ; RV64-WITHFP-NEXT: lw a0, 0(a1)
1727 ; RV64-WITHFP-NEXT: lui a1, 24414
1728 ; RV64-WITHFP-NEXT: addiw a1, a1, -1680
1729 ; RV64-WITHFP-NEXT: add sp, sp, a1
1730 ; RV64-WITHFP-NEXT: ld ra, 1960(sp) # 8-byte Folded Reload
1731 ; RV64-WITHFP-NEXT: ld s0, 1952(sp) # 8-byte Folded Reload
1732 ; RV64-WITHFP-NEXT: addi sp, sp, 2032
1733 ; RV64-WITHFP-NEXT: ret
1734 %large = alloca [ 100000000 x i8 ]
1736 call void @llvm.va_start(ptr %va)
1737 %argp.cur = load ptr, ptr %va, align 4
1738 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
1739 store ptr %argp.next, ptr %va, align 4
1740 %1 = load i32, ptr %argp.cur, align 4
1741 call void @llvm.va_end(ptr %va)
1745 define i32 @va_vprintf(ptr %fmt, ptr %arg_start) {
1746 ; RV32-LABEL: va_vprintf:
1748 ; RV32-NEXT: addi sp, sp, -16
1749 ; RV32-NEXT: .cfi_def_cfa_offset 16
1750 ; RV32-NEXT: sw a1, 12(sp)
1751 ; RV32-NEXT: lw a0, 12(sp)
1752 ; RV32-NEXT: sw a0, 8(sp)
1753 ; RV32-NEXT: lw a0, 8(sp)
1754 ; RV32-NEXT: addi a0, a0, 3
1755 ; RV32-NEXT: andi a0, a0, -4
1756 ; RV32-NEXT: addi a1, a0, 4
1757 ; RV32-NEXT: sw a1, 8(sp)
1758 ; RV32-NEXT: lw a0, 0(a0)
1759 ; RV32-NEXT: addi sp, sp, 16
1762 ; RV64-LABEL: va_vprintf:
1764 ; RV64-NEXT: addi sp, sp, -16
1765 ; RV64-NEXT: .cfi_def_cfa_offset 16
1766 ; RV64-NEXT: sd a1, 8(sp)
1767 ; RV64-NEXT: ld a0, 8(sp)
1768 ; RV64-NEXT: sd a0, 0(sp)
1769 ; RV64-NEXT: ld a0, 0(sp)
1770 ; RV64-NEXT: addi a0, a0, 3
1771 ; RV64-NEXT: andi a0, a0, -4
1772 ; RV64-NEXT: addi a1, a0, 4
1773 ; RV64-NEXT: sd a1, 0(sp)
1774 ; RV64-NEXT: lw a0, 0(a0)
1775 ; RV64-NEXT: addi sp, sp, 16
1778 ; RV32-WITHFP-LABEL: va_vprintf:
1779 ; RV32-WITHFP: # %bb.0:
1780 ; RV32-WITHFP-NEXT: addi sp, sp, -16
1781 ; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
1782 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1783 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1784 ; RV32-WITHFP-NEXT: .cfi_offset ra, -4
1785 ; RV32-WITHFP-NEXT: .cfi_offset s0, -8
1786 ; RV32-WITHFP-NEXT: addi s0, sp, 16
1787 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
1788 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
1789 ; RV32-WITHFP-NEXT: lw a0, -12(s0)
1790 ; RV32-WITHFP-NEXT: sw a0, -16(s0)
1791 ; RV32-WITHFP-NEXT: lw a0, -16(s0)
1792 ; RV32-WITHFP-NEXT: addi a0, a0, 3
1793 ; RV32-WITHFP-NEXT: andi a0, a0, -4
1794 ; RV32-WITHFP-NEXT: addi a1, a0, 4
1795 ; RV32-WITHFP-NEXT: sw a1, -16(s0)
1796 ; RV32-WITHFP-NEXT: lw a0, 0(a0)
1797 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1798 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1799 ; RV32-WITHFP-NEXT: addi sp, sp, 16
1800 ; RV32-WITHFP-NEXT: ret
1802 ; RV64-WITHFP-LABEL: va_vprintf:
1803 ; RV64-WITHFP: # %bb.0:
1804 ; RV64-WITHFP-NEXT: addi sp, sp, -32
1805 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 32
1806 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1807 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1808 ; RV64-WITHFP-NEXT: .cfi_offset ra, -8
1809 ; RV64-WITHFP-NEXT: .cfi_offset s0, -16
1810 ; RV64-WITHFP-NEXT: addi s0, sp, 32
1811 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
1812 ; RV64-WITHFP-NEXT: sd a1, -24(s0)
1813 ; RV64-WITHFP-NEXT: ld a0, -24(s0)
1814 ; RV64-WITHFP-NEXT: sd a0, -32(s0)
1815 ; RV64-WITHFP-NEXT: ld a0, -32(s0)
1816 ; RV64-WITHFP-NEXT: addi a0, a0, 3
1817 ; RV64-WITHFP-NEXT: andi a0, a0, -4
1818 ; RV64-WITHFP-NEXT: addi a1, a0, 4
1819 ; RV64-WITHFP-NEXT: sd a1, -32(s0)
1820 ; RV64-WITHFP-NEXT: lw a0, 0(a0)
1821 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1822 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1823 ; RV64-WITHFP-NEXT: addi sp, sp, 32
1824 ; RV64-WITHFP-NEXT: ret
1826 %args_cp = alloca ptr
1827 store ptr %arg_start, ptr %args
1828 call void @llvm.va_copy(ptr %args_cp, ptr %args)
1829 %width = va_arg ptr %args_cp, i32
1830 call void @llvm.va_end(ptr %args_cp)
1834 define i32 @va_printf(ptr %fmt, ...) {
1835 ; RV32-LABEL: va_printf:
1837 ; RV32-NEXT: addi sp, sp, -48
1838 ; RV32-NEXT: .cfi_def_cfa_offset 48
1839 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1840 ; RV32-NEXT: .cfi_offset ra, -36
1841 ; RV32-NEXT: sw a1, 20(sp)
1842 ; RV32-NEXT: sw a2, 24(sp)
1843 ; RV32-NEXT: sw a3, 28(sp)
1844 ; RV32-NEXT: sw a4, 32(sp)
1845 ; RV32-NEXT: addi a1, sp, 20
1846 ; RV32-NEXT: sw a1, 8(sp)
1847 ; RV32-NEXT: lw a1, 8(sp)
1848 ; RV32-NEXT: sw a5, 36(sp)
1849 ; RV32-NEXT: sw a6, 40(sp)
1850 ; RV32-NEXT: sw a7, 44(sp)
1851 ; RV32-NEXT: call va_vprintf
1852 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1853 ; RV32-NEXT: addi sp, sp, 48
1856 ; RV64-LABEL: va_printf:
1858 ; RV64-NEXT: addi sp, sp, -80
1859 ; RV64-NEXT: .cfi_def_cfa_offset 80
1860 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1861 ; RV64-NEXT: .cfi_offset ra, -72
1862 ; RV64-NEXT: sd a1, 24(sp)
1863 ; RV64-NEXT: sd a2, 32(sp)
1864 ; RV64-NEXT: sd a3, 40(sp)
1865 ; RV64-NEXT: sd a4, 48(sp)
1866 ; RV64-NEXT: addi a1, sp, 24
1867 ; RV64-NEXT: sd a1, 0(sp)
1868 ; RV64-NEXT: ld a1, 0(sp)
1869 ; RV64-NEXT: sd a5, 56(sp)
1870 ; RV64-NEXT: sd a6, 64(sp)
1871 ; RV64-NEXT: sd a7, 72(sp)
1872 ; RV64-NEXT: call va_vprintf
1873 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1874 ; RV64-NEXT: addi sp, sp, 80
1877 ; RV32-WITHFP-LABEL: va_printf:
1878 ; RV32-WITHFP: # %bb.0:
1879 ; RV32-WITHFP-NEXT: addi sp, sp, -48
1880 ; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 48
1881 ; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1882 ; RV32-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1883 ; RV32-WITHFP-NEXT: .cfi_offset ra, -36
1884 ; RV32-WITHFP-NEXT: .cfi_offset s0, -40
1885 ; RV32-WITHFP-NEXT: addi s0, sp, 16
1886 ; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 32
1887 ; RV32-WITHFP-NEXT: sw a1, 4(s0)
1888 ; RV32-WITHFP-NEXT: sw a2, 8(s0)
1889 ; RV32-WITHFP-NEXT: sw a3, 12(s0)
1890 ; RV32-WITHFP-NEXT: sw a4, 16(s0)
1891 ; RV32-WITHFP-NEXT: addi a1, s0, 4
1892 ; RV32-WITHFP-NEXT: sw a1, -12(s0)
1893 ; RV32-WITHFP-NEXT: lw a1, -12(s0)
1894 ; RV32-WITHFP-NEXT: sw a5, 20(s0)
1895 ; RV32-WITHFP-NEXT: sw a6, 24(s0)
1896 ; RV32-WITHFP-NEXT: sw a7, 28(s0)
1897 ; RV32-WITHFP-NEXT: call va_vprintf
1898 ; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1899 ; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1900 ; RV32-WITHFP-NEXT: addi sp, sp, 48
1901 ; RV32-WITHFP-NEXT: ret
1903 ; RV64-WITHFP-LABEL: va_printf:
1904 ; RV64-WITHFP: # %bb.0:
1905 ; RV64-WITHFP-NEXT: addi sp, sp, -96
1906 ; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 96
1907 ; RV64-WITHFP-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1908 ; RV64-WITHFP-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1909 ; RV64-WITHFP-NEXT: .cfi_offset ra, -72
1910 ; RV64-WITHFP-NEXT: .cfi_offset s0, -80
1911 ; RV64-WITHFP-NEXT: addi s0, sp, 32
1912 ; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 64
1913 ; RV64-WITHFP-NEXT: sd a1, 8(s0)
1914 ; RV64-WITHFP-NEXT: sd a2, 16(s0)
1915 ; RV64-WITHFP-NEXT: sd a3, 24(s0)
1916 ; RV64-WITHFP-NEXT: sd a4, 32(s0)
1917 ; RV64-WITHFP-NEXT: addi a1, s0, 8
1918 ; RV64-WITHFP-NEXT: sd a1, -24(s0)
1919 ; RV64-WITHFP-NEXT: ld a1, -24(s0)
1920 ; RV64-WITHFP-NEXT: sd a5, 40(s0)
1921 ; RV64-WITHFP-NEXT: sd a6, 48(s0)
1922 ; RV64-WITHFP-NEXT: sd a7, 56(s0)
1923 ; RV64-WITHFP-NEXT: call va_vprintf
1924 ; RV64-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1925 ; RV64-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1926 ; RV64-WITHFP-NEXT: addi sp, sp, 96
1927 ; RV64-WITHFP-NEXT: ret
1929 call void @llvm.va_start(ptr %args)
1930 %arg_start = load ptr, ptr %args
1931 %ret_val = call i32 @va_vprintf(ptr %fmt, ptr %arg_start)
1932 call void @llvm.va_end(ptr %args)