1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -verify-machineinstrs \
3 ; RUN: | FileCheck -check-prefixes=RV32,ILP32 %s
4 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -verify-machineinstrs \
5 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
6 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -target-abi ilp32f \
7 ; RUN: -verify-machineinstrs \
8 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
9 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -global-isel -mattr=+d -target-abi ilp32d \
10 ; RUN: -verify-machineinstrs \
11 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
12 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -verify-machineinstrs \
13 ; RUN: | FileCheck -check-prefixes=RV64,LP64 %s
14 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64f \
15 ; RUN: -verify-machineinstrs \
16 ; RUN: | FileCheck -check-prefixes=RV64,LP64F %s
17 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -global-isel -mattr=+d -target-abi lp64d \
18 ; RUN: -verify-machineinstrs \
19 ; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
21 ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
22 ; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
23 ; codegen differences due to the way the f64 load operations are lowered and
24 ; because the PseudoCALL specifies the calling convention.
25 ; The nounwind attribute is omitted for some of the tests, to check that CFI
26 ; directives are correctly generated.
28 declare void @llvm.va_start(ptr)
29 declare void @llvm.va_end(ptr)
31 declare void @notdead(ptr)
33 ; Although frontends are recommended to not generate va_arg due to the lack of
34 ; support for aggregate types, we test simple cases here to ensure they are
37 define i32 @va1(ptr %fmt, ...) {
40 ; RV32-NEXT: addi sp, sp, -48
41 ; RV32-NEXT: .cfi_def_cfa_offset 48
42 ; RV32-NEXT: sw a1, 20(sp)
43 ; RV32-NEXT: sw a2, 24(sp)
44 ; RV32-NEXT: sw a3, 28(sp)
45 ; RV32-NEXT: sw a4, 32(sp)
46 ; RV32-NEXT: addi a0, sp, 20
47 ; RV32-NEXT: sw a0, 12(sp)
48 ; RV32-NEXT: lw a0, 12(sp)
49 ; RV32-NEXT: sw a5, 36(sp)
50 ; RV32-NEXT: sw a6, 40(sp)
51 ; RV32-NEXT: sw a7, 44(sp)
52 ; RV32-NEXT: addi a1, a0, 4
53 ; RV32-NEXT: sw a1, 12(sp)
54 ; RV32-NEXT: lw a0, 0(a0)
55 ; RV32-NEXT: addi sp, sp, 48
60 ; RV64-NEXT: addi sp, sp, -80
61 ; RV64-NEXT: .cfi_def_cfa_offset 80
62 ; RV64-NEXT: sd a1, 24(sp)
63 ; RV64-NEXT: sd a2, 32(sp)
64 ; RV64-NEXT: sd a3, 40(sp)
65 ; RV64-NEXT: sd a4, 48(sp)
66 ; RV64-NEXT: sd a5, 56(sp)
67 ; RV64-NEXT: addi a0, sp, 24
68 ; RV64-NEXT: sd a0, 8(sp)
69 ; RV64-NEXT: lw a0, 12(sp)
70 ; RV64-NEXT: lwu a1, 8(sp)
71 ; RV64-NEXT: sd a6, 64(sp)
72 ; RV64-NEXT: sd a7, 72(sp)
73 ; RV64-NEXT: slli a0, a0, 32
74 ; RV64-NEXT: or a0, a0, a1
75 ; RV64-NEXT: addi a1, a0, 4
76 ; RV64-NEXT: srli a2, a1, 32
77 ; RV64-NEXT: sw a1, 8(sp)
78 ; RV64-NEXT: sw a2, 12(sp)
79 ; RV64-NEXT: lw a0, 0(a0)
80 ; RV64-NEXT: addi sp, sp, 80
83 call void @llvm.va_start(ptr %va)
84 %argp.cur = load ptr, ptr %va, align 4
85 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
86 store ptr %argp.next, ptr %va, align 4
87 %1 = load i32, ptr %argp.cur, align 4
88 call void @llvm.va_end(ptr %va)
92 define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
93 ; RV32-LABEL: va1_va_arg:
95 ; RV32-NEXT: addi sp, sp, -48
96 ; RV32-NEXT: sw a1, 20(sp)
97 ; RV32-NEXT: sw a2, 24(sp)
98 ; RV32-NEXT: sw a3, 28(sp)
99 ; RV32-NEXT: sw a4, 32(sp)
100 ; RV32-NEXT: sw a5, 36(sp)
101 ; RV32-NEXT: sw a6, 40(sp)
102 ; RV32-NEXT: sw a7, 44(sp)
103 ; RV32-NEXT: addi a0, sp, 20
104 ; RV32-NEXT: sw a0, 12(sp)
105 ; RV32-NEXT: lw a0, 12(sp)
106 ; RV32-NEXT: addi a0, a0, 3
107 ; RV32-NEXT: andi a0, a0, -4
108 ; RV32-NEXT: addi a1, a0, 4
109 ; RV32-NEXT: sw a1, 12(sp)
110 ; RV32-NEXT: lw a0, 0(a0)
111 ; RV32-NEXT: addi sp, sp, 48
114 ; RV64-LABEL: va1_va_arg:
116 ; RV64-NEXT: addi sp, sp, -80
117 ; RV64-NEXT: sd a1, 24(sp)
118 ; RV64-NEXT: sd a2, 32(sp)
119 ; RV64-NEXT: sd a3, 40(sp)
120 ; RV64-NEXT: sd a4, 48(sp)
121 ; RV64-NEXT: sd a5, 56(sp)
122 ; RV64-NEXT: sd a6, 64(sp)
123 ; RV64-NEXT: sd a7, 72(sp)
124 ; RV64-NEXT: addi a0, sp, 24
125 ; RV64-NEXT: sd a0, 8(sp)
126 ; RV64-NEXT: ld a0, 8(sp)
127 ; RV64-NEXT: addi a0, a0, 3
128 ; RV64-NEXT: andi a0, a0, -4
129 ; RV64-NEXT: addi a1, a0, 4
130 ; RV64-NEXT: sd a1, 8(sp)
131 ; RV64-NEXT: lw a0, 0(a0)
132 ; RV64-NEXT: addi sp, sp, 80
135 call void @llvm.va_start(ptr %va)
136 %1 = va_arg ptr %va, i32
137 call void @llvm.va_end(ptr %va)
141 ; Ensure the adjustment when restoring the stack pointer using the frame
143 define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
144 ; RV32-LABEL: va1_va_arg_alloca:
146 ; RV32-NEXT: addi sp, sp, -48
147 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
148 ; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
149 ; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
150 ; RV32-NEXT: addi s0, sp, 16
151 ; RV32-NEXT: sw a1, 4(s0)
152 ; RV32-NEXT: sw a2, 8(s0)
153 ; RV32-NEXT: sw a3, 12(s0)
154 ; RV32-NEXT: sw a4, 16(s0)
155 ; RV32-NEXT: sw a5, 20(s0)
156 ; RV32-NEXT: sw a6, 24(s0)
157 ; RV32-NEXT: sw a7, 28(s0)
158 ; RV32-NEXT: addi a0, s0, 4
159 ; RV32-NEXT: sw a0, -16(s0)
160 ; RV32-NEXT: lw a0, -16(s0)
161 ; RV32-NEXT: addi a0, a0, 3
162 ; RV32-NEXT: andi a0, a0, -4
163 ; RV32-NEXT: addi a1, a0, 4
164 ; RV32-NEXT: sw a1, -16(s0)
165 ; RV32-NEXT: lw s1, 0(a0)
166 ; RV32-NEXT: addi a0, s1, 15
167 ; RV32-NEXT: andi a0, a0, -16
168 ; RV32-NEXT: sub a0, sp, a0
169 ; RV32-NEXT: mv sp, a0
170 ; RV32-NEXT: call notdead
171 ; RV32-NEXT: mv a0, s1
172 ; RV32-NEXT: addi sp, s0, -16
173 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
174 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
175 ; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
176 ; RV32-NEXT: addi sp, sp, 48
179 ; RV64-LABEL: va1_va_arg_alloca:
181 ; RV64-NEXT: addi sp, sp, -96
182 ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
183 ; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
184 ; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
185 ; RV64-NEXT: addi s0, sp, 32
186 ; RV64-NEXT: sd a1, 8(s0)
187 ; RV64-NEXT: sd a2, 16(s0)
188 ; RV64-NEXT: sd a3, 24(s0)
189 ; RV64-NEXT: sd a4, 32(s0)
190 ; RV64-NEXT: sd a5, 40(s0)
191 ; RV64-NEXT: sd a6, 48(s0)
192 ; RV64-NEXT: sd a7, 56(s0)
193 ; RV64-NEXT: addi a0, s0, 8
194 ; RV64-NEXT: sd a0, -32(s0)
195 ; RV64-NEXT: ld a0, -32(s0)
196 ; RV64-NEXT: addi a0, a0, 3
197 ; RV64-NEXT: andi a0, a0, -4
198 ; RV64-NEXT: addi a1, a0, 4
199 ; RV64-NEXT: sd a1, -32(s0)
200 ; RV64-NEXT: lw s1, 0(a0)
201 ; RV64-NEXT: slli a0, s1, 32
202 ; RV64-NEXT: srli a0, a0, 32
203 ; RV64-NEXT: addi a0, a0, 15
204 ; RV64-NEXT: andi a0, a0, -16
205 ; RV64-NEXT: sub a0, sp, a0
206 ; RV64-NEXT: mv sp, a0
207 ; RV64-NEXT: call notdead
208 ; RV64-NEXT: mv a0, s1
209 ; RV64-NEXT: addi sp, s0, -32
210 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
211 ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
212 ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
213 ; RV64-NEXT: addi sp, sp, 96
216 call void @llvm.va_start(ptr %va)
217 %1 = va_arg ptr %va, i32
218 %2 = alloca i8, i32 %1
219 call void @notdead(ptr %2)
220 call void @llvm.va_end(ptr %va)
224 define void @va1_caller() nounwind {
225 ; RV32-LABEL: va1_caller:
227 ; RV32-NEXT: addi sp, sp, -16
228 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
229 ; RV32-NEXT: lui a3, 261888
230 ; RV32-NEXT: li a4, 2
231 ; RV32-NEXT: li a2, 0
232 ; RV32-NEXT: call va1
233 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
234 ; RV32-NEXT: addi sp, sp, 16
237 ; LP64-LABEL: va1_caller:
239 ; LP64-NEXT: addi sp, sp, -16
240 ; LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
241 ; LP64-NEXT: lui a0, %hi(.LCPI3_0)
242 ; LP64-NEXT: ld a1, %lo(.LCPI3_0)(a0)
243 ; LP64-NEXT: li a2, 2
244 ; LP64-NEXT: call va1
245 ; LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
246 ; LP64-NEXT: addi sp, sp, 16
249 ; LP64F-LABEL: va1_caller:
251 ; LP64F-NEXT: addi sp, sp, -16
252 ; LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
253 ; LP64F-NEXT: li a0, 1023
254 ; LP64F-NEXT: slli a0, a0, 52
255 ; LP64F-NEXT: fmv.d.x fa5, a0
256 ; LP64F-NEXT: li a2, 2
257 ; LP64F-NEXT: fmv.x.d a1, fa5
258 ; LP64F-NEXT: call va1
259 ; LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
260 ; LP64F-NEXT: addi sp, sp, 16
263 ; LP64D-LABEL: va1_caller:
265 ; LP64D-NEXT: addi sp, sp, -16
266 ; LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
267 ; LP64D-NEXT: li a0, 1023
268 ; LP64D-NEXT: slli a0, a0, 52
269 ; LP64D-NEXT: fmv.d.x fa5, a0
270 ; LP64D-NEXT: li a2, 2
271 ; LP64D-NEXT: fmv.x.d a1, fa5
272 ; LP64D-NEXT: call va1
273 ; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
274 ; LP64D-NEXT: addi sp, sp, 16
276 %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
280 ; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
281 ; register pair (where the first register is even-numbered).
283 define i64 @va2(ptr %fmt, ...) nounwind {
286 ; ILP32-NEXT: addi sp, sp, -48
287 ; ILP32-NEXT: sw a1, 20(sp)
288 ; ILP32-NEXT: sw a2, 24(sp)
289 ; ILP32-NEXT: sw a3, 28(sp)
290 ; ILP32-NEXT: sw a4, 32(sp)
291 ; ILP32-NEXT: addi a0, sp, 20
292 ; ILP32-NEXT: sw a0, 12(sp)
293 ; ILP32-NEXT: lw a0, 12(sp)
294 ; ILP32-NEXT: sw a5, 36(sp)
295 ; ILP32-NEXT: sw a6, 40(sp)
296 ; ILP32-NEXT: sw a7, 44(sp)
297 ; ILP32-NEXT: addi a1, a0, 7
298 ; ILP32-NEXT: andi a1, a1, -8
299 ; ILP32-NEXT: addi a0, a0, 15
300 ; ILP32-NEXT: sw a0, 12(sp)
301 ; ILP32-NEXT: lw a0, 0(a1)
302 ; ILP32-NEXT: lw a1, 4(a1)
303 ; ILP32-NEXT: addi sp, sp, 48
306 ; RV32D-ILP32-LABEL: va2:
307 ; RV32D-ILP32: # %bb.0:
308 ; RV32D-ILP32-NEXT: addi sp, sp, -48
309 ; RV32D-ILP32-NEXT: sw a1, 20(sp)
310 ; RV32D-ILP32-NEXT: sw a2, 24(sp)
311 ; RV32D-ILP32-NEXT: sw a3, 28(sp)
312 ; RV32D-ILP32-NEXT: sw a4, 32(sp)
313 ; RV32D-ILP32-NEXT: addi a0, sp, 20
314 ; RV32D-ILP32-NEXT: sw a0, 12(sp)
315 ; RV32D-ILP32-NEXT: lw a0, 12(sp)
316 ; RV32D-ILP32-NEXT: sw a5, 36(sp)
317 ; RV32D-ILP32-NEXT: sw a6, 40(sp)
318 ; RV32D-ILP32-NEXT: sw a7, 44(sp)
319 ; RV32D-ILP32-NEXT: addi a1, a0, 7
320 ; RV32D-ILP32-NEXT: andi a1, a1, -8
321 ; RV32D-ILP32-NEXT: fld fa5, 0(a1)
322 ; RV32D-ILP32-NEXT: addi a0, a0, 15
323 ; RV32D-ILP32-NEXT: sw a0, 12(sp)
324 ; RV32D-ILP32-NEXT: fsd fa5, 0(sp)
325 ; RV32D-ILP32-NEXT: lw a0, 0(sp)
326 ; RV32D-ILP32-NEXT: lw a1, 4(sp)
327 ; RV32D-ILP32-NEXT: addi sp, sp, 48
328 ; RV32D-ILP32-NEXT: ret
330 ; RV32D-ILP32F-LABEL: va2:
331 ; RV32D-ILP32F: # %bb.0:
332 ; RV32D-ILP32F-NEXT: addi sp, sp, -48
333 ; RV32D-ILP32F-NEXT: sw a1, 20(sp)
334 ; RV32D-ILP32F-NEXT: sw a2, 24(sp)
335 ; RV32D-ILP32F-NEXT: sw a3, 28(sp)
336 ; RV32D-ILP32F-NEXT: sw a4, 32(sp)
337 ; RV32D-ILP32F-NEXT: addi a0, sp, 20
338 ; RV32D-ILP32F-NEXT: sw a0, 12(sp)
339 ; RV32D-ILP32F-NEXT: lw a0, 12(sp)
340 ; RV32D-ILP32F-NEXT: sw a5, 36(sp)
341 ; RV32D-ILP32F-NEXT: sw a6, 40(sp)
342 ; RV32D-ILP32F-NEXT: sw a7, 44(sp)
343 ; RV32D-ILP32F-NEXT: addi a1, a0, 7
344 ; RV32D-ILP32F-NEXT: andi a1, a1, -8
345 ; RV32D-ILP32F-NEXT: fld fa5, 0(a1)
346 ; RV32D-ILP32F-NEXT: addi a0, a0, 15
347 ; RV32D-ILP32F-NEXT: sw a0, 12(sp)
348 ; RV32D-ILP32F-NEXT: fsd fa5, 0(sp)
349 ; RV32D-ILP32F-NEXT: lw a0, 0(sp)
350 ; RV32D-ILP32F-NEXT: lw a1, 4(sp)
351 ; RV32D-ILP32F-NEXT: addi sp, sp, 48
352 ; RV32D-ILP32F-NEXT: ret
354 ; RV32D-ILP32D-LABEL: va2:
355 ; RV32D-ILP32D: # %bb.0:
356 ; RV32D-ILP32D-NEXT: addi sp, sp, -48
357 ; RV32D-ILP32D-NEXT: sw a1, 20(sp)
358 ; RV32D-ILP32D-NEXT: sw a2, 24(sp)
359 ; RV32D-ILP32D-NEXT: sw a3, 28(sp)
360 ; RV32D-ILP32D-NEXT: sw a4, 32(sp)
361 ; RV32D-ILP32D-NEXT: addi a0, sp, 20
362 ; RV32D-ILP32D-NEXT: sw a0, 12(sp)
363 ; RV32D-ILP32D-NEXT: lw a0, 12(sp)
364 ; RV32D-ILP32D-NEXT: sw a5, 36(sp)
365 ; RV32D-ILP32D-NEXT: sw a6, 40(sp)
366 ; RV32D-ILP32D-NEXT: sw a7, 44(sp)
367 ; RV32D-ILP32D-NEXT: addi a1, a0, 7
368 ; RV32D-ILP32D-NEXT: andi a1, a1, -8
369 ; RV32D-ILP32D-NEXT: fld fa5, 0(a1)
370 ; RV32D-ILP32D-NEXT: addi a0, a0, 15
371 ; RV32D-ILP32D-NEXT: sw a0, 12(sp)
372 ; RV32D-ILP32D-NEXT: fsd fa5, 0(sp)
373 ; RV32D-ILP32D-NEXT: lw a0, 0(sp)
374 ; RV32D-ILP32D-NEXT: lw a1, 4(sp)
375 ; RV32D-ILP32D-NEXT: addi sp, sp, 48
376 ; RV32D-ILP32D-NEXT: ret
380 ; RV64-NEXT: addi sp, sp, -80
381 ; RV64-NEXT: sd a1, 24(sp)
382 ; RV64-NEXT: sd a2, 32(sp)
383 ; RV64-NEXT: sd a3, 40(sp)
384 ; RV64-NEXT: sd a4, 48(sp)
385 ; RV64-NEXT: addi a0, sp, 24
386 ; RV64-NEXT: sd a0, 8(sp)
387 ; RV64-NEXT: ld a0, 8(sp)
388 ; RV64-NEXT: sd a5, 56(sp)
389 ; RV64-NEXT: sd a6, 64(sp)
390 ; RV64-NEXT: sd a7, 72(sp)
391 ; RV64-NEXT: addi a1, a0, 7
392 ; RV64-NEXT: andi a1, a1, -8
393 ; RV64-NEXT: addi a0, a0, 15
394 ; RV64-NEXT: sd a0, 8(sp)
395 ; RV64-NEXT: ld a0, 0(a1)
396 ; RV64-NEXT: addi sp, sp, 80
399 call void @llvm.va_start(ptr %va)
400 %argp.cur = load ptr, ptr %va
401 %ptrint = ptrtoint ptr %argp.cur to iXLen
402 %1 = add iXLen %ptrint, 7
403 %2 = and iXLen %1, -8
404 %argp.cur.aligned = inttoptr iXLen %1 to ptr
405 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
406 store ptr %argp.next, ptr %va
407 %3 = inttoptr iXLen %2 to ptr
408 %4 = load double, ptr %3, align 8
409 %5 = bitcast double %4 to i64
410 call void @llvm.va_end(ptr %va)
414 ; This test is slightly different than the SelectionDAG counterpart because
415 ; narrowScalar and widenScalar for G_VAARG on types outside of [s32, sXLen]
416 ; are not implemented yet.
417 define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
418 ; RV32-LABEL: va2_va_arg:
420 ; RV32-NEXT: addi sp, sp, -48
421 ; RV32-NEXT: sw a1, 20(sp)
422 ; RV32-NEXT: sw a2, 24(sp)
423 ; RV32-NEXT: sw a3, 28(sp)
424 ; RV32-NEXT: sw a4, 32(sp)
425 ; RV32-NEXT: sw a5, 36(sp)
426 ; RV32-NEXT: sw a6, 40(sp)
427 ; RV32-NEXT: sw a7, 44(sp)
428 ; RV32-NEXT: addi a0, sp, 20
429 ; RV32-NEXT: sw a0, 12(sp)
430 ; RV32-NEXT: lw a0, 12(sp)
431 ; RV32-NEXT: addi a0, a0, 3
432 ; RV32-NEXT: andi a0, a0, -4
433 ; RV32-NEXT: addi a1, a0, 4
434 ; RV32-NEXT: sw a1, 12(sp)
435 ; RV32-NEXT: lw a0, 0(a0)
436 ; RV32-NEXT: li a1, 0
437 ; RV32-NEXT: addi sp, sp, 48
440 ; RV64-LABEL: va2_va_arg:
442 ; RV64-NEXT: addi sp, sp, -80
443 ; RV64-NEXT: sd a1, 24(sp)
444 ; RV64-NEXT: sd a2, 32(sp)
445 ; RV64-NEXT: sd a3, 40(sp)
446 ; RV64-NEXT: sd a4, 48(sp)
447 ; RV64-NEXT: sd a5, 56(sp)
448 ; RV64-NEXT: sd a6, 64(sp)
449 ; RV64-NEXT: sd a7, 72(sp)
450 ; RV64-NEXT: addi a0, sp, 24
451 ; RV64-NEXT: sd a0, 8(sp)
452 ; RV64-NEXT: ld a0, 8(sp)
453 ; RV64-NEXT: addi a0, a0, 3
454 ; RV64-NEXT: andi a0, a0, -4
455 ; RV64-NEXT: addi a1, a0, 4
456 ; RV64-NEXT: sd a1, 8(sp)
457 ; RV64-NEXT: lw a0, 0(a0)
458 ; RV64-NEXT: slli a0, a0, 32
459 ; RV64-NEXT: srli a0, a0, 32
460 ; RV64-NEXT: addi sp, sp, 80
463 call void @llvm.va_start(ptr %va)
464 %1 = va_arg ptr %va, i32
465 call void @llvm.va_end(ptr %va)
466 %2 = zext i32 %1 to i64
470 define void @va2_caller() nounwind {
471 ; RV32-LABEL: va2_caller:
473 ; RV32-NEXT: addi sp, sp, -16
474 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
475 ; RV32-NEXT: li a1, 1
476 ; RV32-NEXT: call va2
477 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
478 ; RV32-NEXT: addi sp, sp, 16
481 ; RV64-LABEL: va2_caller:
483 ; RV64-NEXT: addi sp, sp, -16
484 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
485 ; RV64-NEXT: li a1, 1
486 ; RV64-NEXT: call va2
487 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
488 ; RV64-NEXT: addi sp, sp, 16
490 %1 = call i64 (ptr, ...) @va2(ptr undef, i32 1)
494 ; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
495 ; vararg double is passed in a4 and a5 (rather than a3 and a4)
497 define i64 @va3(i32 %a, i64 %b, ...) nounwind {
500 ; ILP32-NEXT: addi sp, sp, -32
501 ; ILP32-NEXT: sw a3, 12(sp)
502 ; ILP32-NEXT: sw a4, 16(sp)
503 ; ILP32-NEXT: addi a0, sp, 12
504 ; ILP32-NEXT: sw a0, 4(sp)
505 ; ILP32-NEXT: lw a0, 4(sp)
506 ; ILP32-NEXT: sw a5, 20(sp)
507 ; ILP32-NEXT: sw a6, 24(sp)
508 ; ILP32-NEXT: sw a7, 28(sp)
509 ; ILP32-NEXT: addi a3, a0, 7
510 ; ILP32-NEXT: andi a3, a3, -8
511 ; ILP32-NEXT: addi a0, a0, 15
512 ; ILP32-NEXT: sw a0, 4(sp)
513 ; ILP32-NEXT: lw a4, 0(a3)
514 ; ILP32-NEXT: lw a3, 4(a3)
515 ; ILP32-NEXT: add a0, a1, a4
516 ; ILP32-NEXT: sltu a1, a0, a4
517 ; ILP32-NEXT: add a2, a2, a3
518 ; ILP32-NEXT: add a1, a2, a1
519 ; ILP32-NEXT: addi sp, sp, 32
522 ; RV32D-ILP32-LABEL: va3:
523 ; RV32D-ILP32: # %bb.0:
524 ; RV32D-ILP32-NEXT: addi sp, sp, -48
525 ; RV32D-ILP32-NEXT: sw a3, 28(sp)
526 ; RV32D-ILP32-NEXT: sw a4, 32(sp)
527 ; RV32D-ILP32-NEXT: addi a0, sp, 28
528 ; RV32D-ILP32-NEXT: sw a0, 20(sp)
529 ; RV32D-ILP32-NEXT: lw a0, 20(sp)
530 ; RV32D-ILP32-NEXT: sw a5, 36(sp)
531 ; RV32D-ILP32-NEXT: sw a6, 40(sp)
532 ; RV32D-ILP32-NEXT: sw a7, 44(sp)
533 ; RV32D-ILP32-NEXT: addi a3, a0, 7
534 ; RV32D-ILP32-NEXT: andi a3, a3, -8
535 ; RV32D-ILP32-NEXT: fld fa5, 0(a3)
536 ; RV32D-ILP32-NEXT: addi a0, a0, 15
537 ; RV32D-ILP32-NEXT: sw a0, 20(sp)
538 ; RV32D-ILP32-NEXT: fsd fa5, 8(sp)
539 ; RV32D-ILP32-NEXT: lw a3, 8(sp)
540 ; RV32D-ILP32-NEXT: lw a4, 12(sp)
541 ; RV32D-ILP32-NEXT: add a0, a1, a3
542 ; RV32D-ILP32-NEXT: sltu a1, a0, a3
543 ; RV32D-ILP32-NEXT: add a2, a2, a4
544 ; RV32D-ILP32-NEXT: add a1, a2, a1
545 ; RV32D-ILP32-NEXT: addi sp, sp, 48
546 ; RV32D-ILP32-NEXT: ret
548 ; RV32D-ILP32F-LABEL: va3:
549 ; RV32D-ILP32F: # %bb.0:
550 ; RV32D-ILP32F-NEXT: addi sp, sp, -48
551 ; RV32D-ILP32F-NEXT: sw a3, 28(sp)
552 ; RV32D-ILP32F-NEXT: sw a4, 32(sp)
553 ; RV32D-ILP32F-NEXT: addi a0, sp, 28
554 ; RV32D-ILP32F-NEXT: sw a0, 20(sp)
555 ; RV32D-ILP32F-NEXT: lw a0, 20(sp)
556 ; RV32D-ILP32F-NEXT: sw a5, 36(sp)
557 ; RV32D-ILP32F-NEXT: sw a6, 40(sp)
558 ; RV32D-ILP32F-NEXT: sw a7, 44(sp)
559 ; RV32D-ILP32F-NEXT: addi a3, a0, 7
560 ; RV32D-ILP32F-NEXT: andi a3, a3, -8
561 ; RV32D-ILP32F-NEXT: fld fa5, 0(a3)
562 ; RV32D-ILP32F-NEXT: addi a0, a0, 15
563 ; RV32D-ILP32F-NEXT: sw a0, 20(sp)
564 ; RV32D-ILP32F-NEXT: fsd fa5, 8(sp)
565 ; RV32D-ILP32F-NEXT: lw a3, 8(sp)
566 ; RV32D-ILP32F-NEXT: lw a4, 12(sp)
567 ; RV32D-ILP32F-NEXT: add a0, a1, a3
568 ; RV32D-ILP32F-NEXT: sltu a1, a0, a3
569 ; RV32D-ILP32F-NEXT: add a2, a2, a4
570 ; RV32D-ILP32F-NEXT: add a1, a2, a1
571 ; RV32D-ILP32F-NEXT: addi sp, sp, 48
572 ; RV32D-ILP32F-NEXT: ret
574 ; RV32D-ILP32D-LABEL: va3:
575 ; RV32D-ILP32D: # %bb.0:
576 ; RV32D-ILP32D-NEXT: addi sp, sp, -48
577 ; RV32D-ILP32D-NEXT: sw a3, 28(sp)
578 ; RV32D-ILP32D-NEXT: sw a4, 32(sp)
579 ; RV32D-ILP32D-NEXT: addi a0, sp, 28
580 ; RV32D-ILP32D-NEXT: sw a0, 20(sp)
581 ; RV32D-ILP32D-NEXT: lw a0, 20(sp)
582 ; RV32D-ILP32D-NEXT: sw a5, 36(sp)
583 ; RV32D-ILP32D-NEXT: sw a6, 40(sp)
584 ; RV32D-ILP32D-NEXT: sw a7, 44(sp)
585 ; RV32D-ILP32D-NEXT: addi a3, a0, 7
586 ; RV32D-ILP32D-NEXT: andi a3, a3, -8
587 ; RV32D-ILP32D-NEXT: fld fa5, 0(a3)
588 ; RV32D-ILP32D-NEXT: addi a0, a0, 15
589 ; RV32D-ILP32D-NEXT: sw a0, 20(sp)
590 ; RV32D-ILP32D-NEXT: fsd fa5, 8(sp)
591 ; RV32D-ILP32D-NEXT: lw a3, 8(sp)
592 ; RV32D-ILP32D-NEXT: lw a4, 12(sp)
593 ; RV32D-ILP32D-NEXT: add a0, a1, a3
594 ; RV32D-ILP32D-NEXT: sltu a1, a0, a3
595 ; RV32D-ILP32D-NEXT: add a2, a2, a4
596 ; RV32D-ILP32D-NEXT: add a1, a2, a1
597 ; RV32D-ILP32D-NEXT: addi sp, sp, 48
598 ; RV32D-ILP32D-NEXT: ret
602 ; RV64-NEXT: addi sp, sp, -64
603 ; RV64-NEXT: sd a2, 16(sp)
604 ; RV64-NEXT: sd a3, 24(sp)
605 ; RV64-NEXT: sd a4, 32(sp)
606 ; RV64-NEXT: addi a0, sp, 16
607 ; RV64-NEXT: sd a0, 8(sp)
608 ; RV64-NEXT: ld a0, 8(sp)
609 ; RV64-NEXT: sd a5, 40(sp)
610 ; RV64-NEXT: sd a6, 48(sp)
611 ; RV64-NEXT: sd a7, 56(sp)
612 ; RV64-NEXT: addi a2, a0, 7
613 ; RV64-NEXT: andi a2, a2, -8
614 ; RV64-NEXT: addi a0, a0, 15
615 ; RV64-NEXT: sd a0, 8(sp)
616 ; RV64-NEXT: ld a0, 0(a2)
617 ; RV64-NEXT: add a0, a1, a0
618 ; RV64-NEXT: addi sp, sp, 64
621 call void @llvm.va_start(ptr %va)
622 %argp.cur = load ptr, ptr %va
623 %ptrint = ptrtoint ptr %argp.cur to iXLen
624 %1 = add iXLen %ptrint, 7
625 %2 = and iXLen %1, -8
626 %argp.cur.aligned = inttoptr iXLen %1 to ptr
627 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
628 store ptr %argp.next, ptr %va
629 %3 = inttoptr iXLen %2 to ptr
630 %4 = load double, ptr %3, align 8
631 call void @llvm.va_end(ptr %va)
632 %5 = bitcast double %4 to i64
637 ; This test is slightly different than the SelectionDAG counterpart because
638 ; narrowScalar and widenScalar for G_VAARG on types outside of [s32, sXLen]
639 ; are not implemented yet.
640 define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
641 ; RV32-LABEL: va3_va_arg:
643 ; RV32-NEXT: addi sp, sp, -32
644 ; RV32-NEXT: sw a3, 12(sp)
645 ; RV32-NEXT: sw a4, 16(sp)
646 ; RV32-NEXT: sw a5, 20(sp)
647 ; RV32-NEXT: sw a6, 24(sp)
648 ; RV32-NEXT: sw a7, 28(sp)
649 ; RV32-NEXT: addi a0, sp, 12
650 ; RV32-NEXT: sw a0, 4(sp)
651 ; RV32-NEXT: lw a0, 4(sp)
652 ; RV32-NEXT: addi a0, a0, 3
653 ; RV32-NEXT: andi a0, a0, -4
654 ; RV32-NEXT: addi a3, a0, 4
655 ; RV32-NEXT: sw a3, 4(sp)
656 ; RV32-NEXT: lw a3, 0(a0)
657 ; RV32-NEXT: add a0, a1, a3
658 ; RV32-NEXT: sltu a1, a0, a3
659 ; RV32-NEXT: add a1, a2, a1
660 ; RV32-NEXT: addi sp, sp, 32
663 ; RV64-LABEL: va3_va_arg:
665 ; RV64-NEXT: addi sp, sp, -64
666 ; RV64-NEXT: sd a2, 16(sp)
667 ; RV64-NEXT: sd a3, 24(sp)
668 ; RV64-NEXT: sd a4, 32(sp)
669 ; RV64-NEXT: sd a5, 40(sp)
670 ; RV64-NEXT: sd a6, 48(sp)
671 ; RV64-NEXT: sd a7, 56(sp)
672 ; RV64-NEXT: addi a0, sp, 16
673 ; RV64-NEXT: sd a0, 8(sp)
674 ; RV64-NEXT: ld a0, 8(sp)
675 ; RV64-NEXT: addi a0, a0, 3
676 ; RV64-NEXT: andi a0, a0, -4
677 ; RV64-NEXT: addi a2, a0, 4
678 ; RV64-NEXT: sd a2, 8(sp)
679 ; RV64-NEXT: lw a0, 0(a0)
680 ; RV64-NEXT: slli a0, a0, 32
681 ; RV64-NEXT: srli a0, a0, 32
682 ; RV64-NEXT: add a0, a1, a0
683 ; RV64-NEXT: addi sp, sp, 64
686 call void @llvm.va_start(ptr %va)
687 %1 = va_arg ptr %va, i32
688 call void @llvm.va_end(ptr %va)
689 %2 = zext i32 %1 to i64
694 define void @va3_caller() nounwind {
695 ; RV32-LABEL: va3_caller:
697 ; RV32-NEXT: addi sp, sp, -16
698 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
699 ; RV32-NEXT: lui a0, 5
700 ; RV32-NEXT: addi a3, a0, -480
701 ; RV32-NEXT: li a0, 2
702 ; RV32-NEXT: li a1, 1111
703 ; RV32-NEXT: li a2, 0
704 ; RV32-NEXT: call va3
705 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
706 ; RV32-NEXT: addi sp, sp, 16
709 ; RV64-LABEL: va3_caller:
711 ; RV64-NEXT: addi sp, sp, -16
712 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
713 ; RV64-NEXT: lui a0, 5
714 ; RV64-NEXT: addiw a2, a0, -480
715 ; RV64-NEXT: li a0, 2
716 ; RV64-NEXT: li a1, 1111
717 ; RV64-NEXT: call va3
718 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
719 ; RV64-NEXT: addi sp, sp, 16
721 %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, i32 20000)
725 declare void @llvm.va_copy(ptr, ptr)
727 define i32 @va4_va_copy(i32 %argno, ...) nounwind {
728 ; RV32-LABEL: va4_va_copy:
730 ; RV32-NEXT: addi sp, sp, -48
731 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
732 ; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
733 ; RV32-NEXT: sw a1, 20(sp)
734 ; RV32-NEXT: sw a2, 24(sp)
735 ; RV32-NEXT: sw a3, 28(sp)
736 ; RV32-NEXT: sw a4, 32(sp)
737 ; RV32-NEXT: sw a5, 36(sp)
738 ; RV32-NEXT: sw a6, 40(sp)
739 ; RV32-NEXT: sw a7, 44(sp)
740 ; RV32-NEXT: addi a0, sp, 20
741 ; RV32-NEXT: sw a0, 4(sp)
742 ; RV32-NEXT: lw a0, 4(sp)
743 ; RV32-NEXT: addi a0, a0, 3
744 ; RV32-NEXT: andi a0, a0, -4
745 ; RV32-NEXT: addi a1, a0, 4
746 ; RV32-NEXT: sw a1, 4(sp)
747 ; RV32-NEXT: lw a1, 4(sp)
748 ; RV32-NEXT: mv a2, sp
749 ; RV32-NEXT: lw s0, 0(a0)
750 ; RV32-NEXT: sw a2, 0(a1)
751 ; RV32-NEXT: lw a0, 0(sp)
752 ; RV32-NEXT: call notdead
753 ; RV32-NEXT: lw a0, 4(sp)
754 ; RV32-NEXT: addi a0, a0, 3
755 ; RV32-NEXT: andi a0, a0, -4
756 ; RV32-NEXT: addi a1, a0, 4
757 ; RV32-NEXT: sw a1, 4(sp)
758 ; RV32-NEXT: lw a1, 4(sp)
759 ; RV32-NEXT: lw a0, 0(a0)
760 ; RV32-NEXT: addi a1, a1, 3
761 ; RV32-NEXT: andi a1, a1, -4
762 ; RV32-NEXT: addi a2, a1, 4
763 ; RV32-NEXT: sw a2, 4(sp)
764 ; RV32-NEXT: lw a2, 4(sp)
765 ; RV32-NEXT: lw a1, 0(a1)
766 ; RV32-NEXT: addi a2, a2, 3
767 ; RV32-NEXT: andi a2, a2, -4
768 ; RV32-NEXT: addi a3, a2, 4
769 ; RV32-NEXT: sw a3, 4(sp)
770 ; RV32-NEXT: lw a2, 0(a2)
771 ; RV32-NEXT: add a0, a0, s0
772 ; RV32-NEXT: add a1, a1, a2
773 ; RV32-NEXT: add a0, a0, a1
774 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
775 ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
776 ; RV32-NEXT: addi sp, sp, 48
779 ; RV64-LABEL: va4_va_copy:
781 ; RV64-NEXT: addi sp, sp, -96
782 ; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
783 ; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
784 ; RV64-NEXT: sd a1, 40(sp)
785 ; RV64-NEXT: sd a2, 48(sp)
786 ; RV64-NEXT: sd a3, 56(sp)
787 ; RV64-NEXT: sd a4, 64(sp)
788 ; RV64-NEXT: sd a5, 72(sp)
789 ; RV64-NEXT: sd a6, 80(sp)
790 ; RV64-NEXT: sd a7, 88(sp)
791 ; RV64-NEXT: addi a0, sp, 40
792 ; RV64-NEXT: sd a0, 8(sp)
793 ; RV64-NEXT: ld a0, 8(sp)
794 ; RV64-NEXT: addi a0, a0, 3
795 ; RV64-NEXT: andi a0, a0, -4
796 ; RV64-NEXT: addi a1, a0, 4
797 ; RV64-NEXT: sd a1, 8(sp)
798 ; RV64-NEXT: ld a1, 8(sp)
799 ; RV64-NEXT: mv a2, sp
800 ; RV64-NEXT: lw s0, 0(a0)
801 ; RV64-NEXT: sd a2, 0(a1)
802 ; RV64-NEXT: lw a0, 4(sp)
803 ; RV64-NEXT: lwu a1, 0(sp)
804 ; RV64-NEXT: slli a0, a0, 32
805 ; RV64-NEXT: or a0, a0, a1
806 ; RV64-NEXT: call notdead
807 ; RV64-NEXT: ld a0, 8(sp)
808 ; RV64-NEXT: addi a0, a0, 3
809 ; RV64-NEXT: andi a0, a0, -4
810 ; RV64-NEXT: addi a1, a0, 4
811 ; RV64-NEXT: sd a1, 8(sp)
812 ; RV64-NEXT: ld a1, 8(sp)
813 ; RV64-NEXT: lw a0, 0(a0)
814 ; RV64-NEXT: addi a1, a1, 3
815 ; RV64-NEXT: andi a1, a1, -4
816 ; RV64-NEXT: addi a2, a1, 4
817 ; RV64-NEXT: sd a2, 8(sp)
818 ; RV64-NEXT: ld a2, 8(sp)
819 ; RV64-NEXT: lw a1, 0(a1)
820 ; RV64-NEXT: addi a2, a2, 3
821 ; RV64-NEXT: andi a2, a2, -4
822 ; RV64-NEXT: addi a3, a2, 4
823 ; RV64-NEXT: sd a3, 8(sp)
824 ; RV64-NEXT: lw a2, 0(a2)
825 ; RV64-NEXT: add a0, a0, s0
826 ; RV64-NEXT: add a1, a1, a2
827 ; RV64-NEXT: addw a0, a0, a1
828 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
829 ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
830 ; RV64-NEXT: addi sp, sp, 96
834 call void @llvm.va_start(ptr %vargs)
835 %1 = va_arg ptr %vargs, i32
836 call void @llvm.va_copy(ptr %wargs, ptr %vargs)
837 %2 = load ptr, ptr %wargs, align 4
838 call void @notdead(ptr %2)
839 %3 = va_arg ptr %vargs, i32
840 %4 = va_arg ptr %vargs, i32
841 %5 = va_arg ptr %vargs, i32
842 call void @llvm.va_end(ptr %vargs)
843 call void @llvm.va_end(ptr %wargs)
844 %add1 = add i32 %3, %1
845 %add2 = add i32 %add1, %4
846 %add3 = add i32 %add2, %5
850 ; The va5_aligned_stack_callee and caller function are ommitted from this file
851 ; since they were not included in the IR lowering test when vararg calls were
854 ; A function with no fixed arguments is not valid C, but can be
855 ; specified in LLVM IR. We must ensure the vararg save area is
856 ; still set up correctly.
858 define i32 @va6_no_fixed_args(...) nounwind {
859 ; RV32-LABEL: va6_no_fixed_args:
861 ; RV32-NEXT: addi sp, sp, -48
862 ; RV32-NEXT: sw a0, 16(sp)
863 ; RV32-NEXT: sw a1, 20(sp)
864 ; RV32-NEXT: sw a2, 24(sp)
865 ; RV32-NEXT: sw a3, 28(sp)
866 ; RV32-NEXT: sw a4, 32(sp)
867 ; RV32-NEXT: sw a5, 36(sp)
868 ; RV32-NEXT: sw a6, 40(sp)
869 ; RV32-NEXT: sw a7, 44(sp)
870 ; RV32-NEXT: addi a0, sp, 16
871 ; RV32-NEXT: sw a0, 12(sp)
872 ; RV32-NEXT: lw a0, 12(sp)
873 ; RV32-NEXT: addi a0, a0, 3
874 ; RV32-NEXT: andi a0, a0, -4
875 ; RV32-NEXT: addi a1, a0, 4
876 ; RV32-NEXT: sw a1, 12(sp)
877 ; RV32-NEXT: lw a0, 0(a0)
878 ; RV32-NEXT: addi sp, sp, 48
881 ; RV64-LABEL: va6_no_fixed_args:
883 ; RV64-NEXT: addi sp, sp, -80
884 ; RV64-NEXT: sd a0, 16(sp)
885 ; RV64-NEXT: sd a1, 24(sp)
886 ; RV64-NEXT: sd a2, 32(sp)
887 ; RV64-NEXT: sd a3, 40(sp)
888 ; RV64-NEXT: sd a4, 48(sp)
889 ; RV64-NEXT: sd a5, 56(sp)
890 ; RV64-NEXT: sd a6, 64(sp)
891 ; RV64-NEXT: sd a7, 72(sp)
892 ; RV64-NEXT: addi a0, sp, 16
893 ; RV64-NEXT: sd a0, 8(sp)
894 ; RV64-NEXT: ld a0, 8(sp)
895 ; RV64-NEXT: addi a0, a0, 3
896 ; RV64-NEXT: andi a0, a0, -4
897 ; RV64-NEXT: addi a1, a0, 4
898 ; RV64-NEXT: sd a1, 8(sp)
899 ; RV64-NEXT: lw a0, 0(a0)
900 ; RV64-NEXT: addi sp, sp, 80
903 call void @llvm.va_start(ptr %va)
904 %1 = va_arg ptr %va, i32
905 call void @llvm.va_end(ptr %va)
909 ; TODO: improve constant materialization of stack addresses
911 define i32 @va_large_stack(ptr %fmt, ...) {
912 ; RV32-LABEL: va_large_stack:
914 ; RV32-NEXT: lui a0, 24414
915 ; RV32-NEXT: addi a0, a0, 304
916 ; RV32-NEXT: sub sp, sp, a0
917 ; RV32-NEXT: .cfi_def_cfa_offset 100000048
918 ; RV32-NEXT: lui a0, 24414
919 ; RV32-NEXT: add a0, sp, a0
920 ; RV32-NEXT: sw a1, 276(a0)
921 ; RV32-NEXT: lui a0, 24414
922 ; RV32-NEXT: add a0, sp, a0
923 ; RV32-NEXT: sw a2, 280(a0)
924 ; RV32-NEXT: lui a0, 24414
925 ; RV32-NEXT: add a0, sp, a0
926 ; RV32-NEXT: sw a3, 284(a0)
927 ; RV32-NEXT: lui a0, 24414
928 ; RV32-NEXT: add a0, sp, a0
929 ; RV32-NEXT: sw a4, 288(a0)
930 ; RV32-NEXT: lui a0, 24414
931 ; RV32-NEXT: addi a0, a0, 276
932 ; RV32-NEXT: add a0, sp, a0
933 ; RV32-NEXT: sw a0, 12(sp)
934 ; RV32-NEXT: lw a0, 12(sp)
935 ; RV32-NEXT: lui a1, 24414
936 ; RV32-NEXT: add a1, sp, a1
937 ; RV32-NEXT: sw a5, 292(a1)
938 ; RV32-NEXT: lui a1, 24414
939 ; RV32-NEXT: add a1, sp, a1
940 ; RV32-NEXT: sw a6, 296(a1)
941 ; RV32-NEXT: lui a1, 24414
942 ; RV32-NEXT: add a1, sp, a1
943 ; RV32-NEXT: sw a7, 300(a1)
944 ; RV32-NEXT: addi a1, a0, 4
945 ; RV32-NEXT: sw a1, 12(sp)
946 ; RV32-NEXT: lw a0, 0(a0)
947 ; RV32-NEXT: lui a1, 24414
948 ; RV32-NEXT: addi a1, a1, 304
949 ; RV32-NEXT: add sp, sp, a1
952 ; RV64-LABEL: va_large_stack:
954 ; RV64-NEXT: lui a0, 24414
955 ; RV64-NEXT: addiw a0, a0, 336
956 ; RV64-NEXT: sub sp, sp, a0
957 ; RV64-NEXT: .cfi_def_cfa_offset 100000080
958 ; RV64-NEXT: lui a0, 24414
959 ; RV64-NEXT: add a0, sp, a0
960 ; RV64-NEXT: sd a1, 280(a0)
961 ; RV64-NEXT: lui a0, 24414
962 ; RV64-NEXT: add a0, sp, a0
963 ; RV64-NEXT: sd a2, 288(a0)
964 ; RV64-NEXT: lui a0, 24414
965 ; RV64-NEXT: add a0, sp, a0
966 ; RV64-NEXT: sd a3, 296(a0)
967 ; RV64-NEXT: lui a0, 24414
968 ; RV64-NEXT: add a0, sp, a0
969 ; RV64-NEXT: sd a4, 304(a0)
970 ; RV64-NEXT: lui a0, 24414
971 ; RV64-NEXT: add a0, sp, a0
972 ; RV64-NEXT: sd a5, 312(a0)
973 ; RV64-NEXT: lui a0, 24414
974 ; RV64-NEXT: addiw a0, a0, 280
975 ; RV64-NEXT: add a0, sp, a0
976 ; RV64-NEXT: sd a0, 8(sp)
977 ; RV64-NEXT: lw a0, 12(sp)
978 ; RV64-NEXT: lwu a1, 8(sp)
979 ; RV64-NEXT: lui a2, 24414
980 ; RV64-NEXT: add a2, sp, a2
981 ; RV64-NEXT: sd a6, 320(a2)
982 ; RV64-NEXT: lui a2, 24414
983 ; RV64-NEXT: add a2, sp, a2
984 ; RV64-NEXT: sd a7, 328(a2)
985 ; RV64-NEXT: slli a0, a0, 32
986 ; RV64-NEXT: or a0, a0, a1
987 ; RV64-NEXT: addi a1, a0, 4
988 ; RV64-NEXT: srli a2, a1, 32
989 ; RV64-NEXT: sw a1, 8(sp)
990 ; RV64-NEXT: sw a2, 12(sp)
991 ; RV64-NEXT: lw a0, 0(a0)
992 ; RV64-NEXT: lui a1, 24414
993 ; RV64-NEXT: addiw a1, a1, 336
994 ; RV64-NEXT: add sp, sp, a1
996 %large = alloca [ 100000000 x i8 ]
998 call void @llvm.va_start(ptr %va)
999 %argp.cur = load ptr, ptr %va, align 4
1000 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
1001 store ptr %argp.next, ptr %va, align 4
1002 %1 = load i32, ptr %argp.cur, align 4
1003 call void @llvm.va_end(ptr %va)