1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
2 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefixes=RV32,ILP32 %s
4 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s
6 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32f \
7 ; RUN: -verify-machineinstrs < %s \
8 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s
9 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32d \
10 ; RUN: -verify-machineinstrs < %s \
11 ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s
12 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
13 ; RUN: | FileCheck -check-prefixes=RV64,LP64 %s
14 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64f \
15 ; RUN: -verify-machineinstrs < %s \
16 ; RUN: | FileCheck -check-prefixes=RV64,LP64F %s
17 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64d \
18 ; RUN: -verify-machineinstrs < %s \
19 ; RUN: | FileCheck -check-prefixes=RV64,LP64D %s
21 ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for
22 ; lp64/lp64f/lp64d. Different CHECK lines are required due to slight
23 ; codegen differences due to the way the f64 load operations are lowered and
24 ; because the PseudoCALL specifies the calling convention.
25 ; The nounwind attribute is omitted for some of the tests, to check that CFI
26 ; directives are correctly generated.
28 declare void @llvm.va_start(ptr)
29 declare void @llvm.va_end(ptr)
31 declare void @notdead(ptr)
33 ; Although frontends are recommended to not generate va_arg due to the lack of
34 ; support for aggregate types, we test simple cases here to ensure they are
37 define i32 @va1(ptr %fmt, ...) {
38 ; RV32-LABEL: name: va1
39 ; RV32: bb.1 (%ir-block.0):
40 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
42 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
43 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
44 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
45 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
46 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
47 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
48 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
49 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
50 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
51 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
52 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
53 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
54 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
55 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
56 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
57 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
58 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
59 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
60 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
61 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
62 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
63 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
64 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
65 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
66 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
67 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
68 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va)
69 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
70 ; RV32-NEXT: %20:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s32)
71 ; RV32-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
72 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
73 ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
74 ; RV32-NEXT: PseudoRET implicit $x10
76 ; RV64-LABEL: name: va1
77 ; RV64: bb.1 (%ir-block.0):
78 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
80 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
81 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
82 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
83 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
84 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
85 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
86 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
87 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
88 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
89 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
90 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
91 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
92 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
93 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
94 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
95 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
96 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
97 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
98 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
99 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
100 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
101 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
102 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
103 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
104 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
105 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
106 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
107 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
108 ; RV64-NEXT: %20:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s64)
109 ; RV64-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
110 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
111 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
112 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
113 ; RV64-NEXT: PseudoRET implicit $x10
115 call void @llvm.va_start(ptr %va)
116 %argp.cur = load ptr, ptr %va, align 4
117 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
118 store ptr %argp.next, ptr %va, align 4
119 %1 = load i32, ptr %argp.cur, align 4
120 call void @llvm.va_end(ptr %va)
124 ; Ensure the adjustment when restoring the stack pointer using the frame
126 define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
127 ; ILP32-LABEL: name: va1_va_arg_alloca
128 ; ILP32: bb.1 (%ir-block.0):
129 ; ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
131 ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
132 ; ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
133 ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
134 ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
135 ; ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
136 ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
137 ; ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
138 ; ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
139 ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
140 ; ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
141 ; ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
142 ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
143 ; ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
144 ; ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
145 ; ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
146 ; ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
147 ; ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
148 ; ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
149 ; ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
150 ; ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
151 ; ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
152 ; ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
153 ; ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
154 ; ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
155 ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
156 ; ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
157 ; ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
158 ; ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
159 ; ILP32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]]
160 ; ILP32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
161 ; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]]
162 ; ILP32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
163 ; ILP32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
164 ; ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
165 ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
166 ; ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
167 ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
168 ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
169 ; ILP32-NEXT: $x10 = COPY [[VAARG]](s32)
170 ; ILP32-NEXT: PseudoRET implicit $x10
172 ; RV32D-ILP32-LABEL: name: va1_va_arg_alloca
173 ; RV32D-ILP32: bb.1 (%ir-block.0):
174 ; RV32D-ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
175 ; RV32D-ILP32-NEXT: {{ $}}
176 ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
177 ; RV32D-ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
178 ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
179 ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
180 ; RV32D-ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
181 ; RV32D-ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
182 ; RV32D-ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
183 ; RV32D-ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
184 ; RV32D-ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
185 ; RV32D-ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
186 ; RV32D-ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
187 ; RV32D-ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
188 ; RV32D-ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
189 ; RV32D-ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
190 ; RV32D-ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
191 ; RV32D-ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
192 ; RV32D-ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
193 ; RV32D-ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
194 ; RV32D-ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
195 ; RV32D-ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
196 ; RV32D-ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
197 ; RV32D-ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
198 ; RV32D-ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
199 ; RV32D-ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
200 ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
201 ; RV32D-ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
202 ; RV32D-ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
203 ; RV32D-ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
204 ; RV32D-ILP32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]]
205 ; RV32D-ILP32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
206 ; RV32D-ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]]
207 ; RV32D-ILP32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
208 ; RV32D-ILP32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
209 ; RV32D-ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
210 ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
211 ; RV32D-ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
212 ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
213 ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
214 ; RV32D-ILP32-NEXT: $x10 = COPY [[VAARG]](s32)
215 ; RV32D-ILP32-NEXT: PseudoRET implicit $x10
217 ; RV32D-ILP32F-LABEL: name: va1_va_arg_alloca
218 ; RV32D-ILP32F: bb.1 (%ir-block.0):
219 ; RV32D-ILP32F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
220 ; RV32D-ILP32F-NEXT: {{ $}}
221 ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
222 ; RV32D-ILP32F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
223 ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
224 ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
225 ; RV32D-ILP32F-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
226 ; RV32D-ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
227 ; RV32D-ILP32F-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
228 ; RV32D-ILP32F-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
229 ; RV32D-ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
230 ; RV32D-ILP32F-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
231 ; RV32D-ILP32F-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
232 ; RV32D-ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
233 ; RV32D-ILP32F-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
234 ; RV32D-ILP32F-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
235 ; RV32D-ILP32F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
236 ; RV32D-ILP32F-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
237 ; RV32D-ILP32F-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
238 ; RV32D-ILP32F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
239 ; RV32D-ILP32F-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
240 ; RV32D-ILP32F-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
241 ; RV32D-ILP32F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
242 ; RV32D-ILP32F-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
243 ; RV32D-ILP32F-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
244 ; RV32D-ILP32F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
245 ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
246 ; RV32D-ILP32F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
247 ; RV32D-ILP32F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
248 ; RV32D-ILP32F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
249 ; RV32D-ILP32F-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]]
250 ; RV32D-ILP32F-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
251 ; RV32D-ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]]
252 ; RV32D-ILP32F-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
253 ; RV32D-ILP32F-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
254 ; RV32D-ILP32F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
255 ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
256 ; RV32D-ILP32F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
257 ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
258 ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
259 ; RV32D-ILP32F-NEXT: $x10 = COPY [[VAARG]](s32)
260 ; RV32D-ILP32F-NEXT: PseudoRET implicit $x10
262 ; RV32D-ILP32D-LABEL: name: va1_va_arg_alloca
263 ; RV32D-ILP32D: bb.1 (%ir-block.0):
264 ; RV32D-ILP32D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
265 ; RV32D-ILP32D-NEXT: {{ $}}
266 ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
267 ; RV32D-ILP32D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
268 ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
269 ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
270 ; RV32D-ILP32D-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
271 ; RV32D-ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
272 ; RV32D-ILP32D-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
273 ; RV32D-ILP32D-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
274 ; RV32D-ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
275 ; RV32D-ILP32D-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
276 ; RV32D-ILP32D-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
277 ; RV32D-ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
278 ; RV32D-ILP32D-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
279 ; RV32D-ILP32D-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
280 ; RV32D-ILP32D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
281 ; RV32D-ILP32D-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
282 ; RV32D-ILP32D-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
283 ; RV32D-ILP32D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
284 ; RV32D-ILP32D-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
285 ; RV32D-ILP32D-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
286 ; RV32D-ILP32D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
287 ; RV32D-ILP32D-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
288 ; RV32D-ILP32D-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
289 ; RV32D-ILP32D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
290 ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
291 ; RV32D-ILP32D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
292 ; RV32D-ILP32D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
293 ; RV32D-ILP32D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
294 ; RV32D-ILP32D-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]]
295 ; RV32D-ILP32D-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
296 ; RV32D-ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]]
297 ; RV32D-ILP32D-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16
298 ; RV32D-ILP32D-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
299 ; RV32D-ILP32D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
300 ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
301 ; RV32D-ILP32D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
302 ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
303 ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
304 ; RV32D-ILP32D-NEXT: $x10 = COPY [[VAARG]](s32)
305 ; RV32D-ILP32D-NEXT: PseudoRET implicit $x10
307 ; LP64-LABEL: name: va1_va_arg_alloca
308 ; LP64: bb.1 (%ir-block.0):
309 ; LP64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
311 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
312 ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
313 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
314 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
315 ; LP64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
316 ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
317 ; LP64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
318 ; LP64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
319 ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
320 ; LP64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
321 ; LP64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
322 ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
323 ; LP64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
324 ; LP64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
325 ; LP64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
326 ; LP64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
327 ; LP64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
328 ; LP64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
329 ; LP64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
330 ; LP64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
331 ; LP64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
332 ; LP64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
333 ; LP64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
334 ; LP64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
335 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
336 ; LP64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
337 ; LP64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
338 ; LP64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
339 ; LP64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
340 ; LP64-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]]
341 ; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
342 ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]]
343 ; LP64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
344 ; LP64-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]]
345 ; LP64-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
346 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
347 ; LP64-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
348 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
349 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
350 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
351 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
352 ; LP64-NEXT: PseudoRET implicit $x10
354 ; LP64F-LABEL: name: va1_va_arg_alloca
355 ; LP64F: bb.1 (%ir-block.0):
356 ; LP64F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
358 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
359 ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
360 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
361 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
362 ; LP64F-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
363 ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
364 ; LP64F-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
365 ; LP64F-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
366 ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
367 ; LP64F-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
368 ; LP64F-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
369 ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
370 ; LP64F-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
371 ; LP64F-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
372 ; LP64F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
373 ; LP64F-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
374 ; LP64F-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
375 ; LP64F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
376 ; LP64F-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
377 ; LP64F-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
378 ; LP64F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
379 ; LP64F-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
380 ; LP64F-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
381 ; LP64F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
382 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
383 ; LP64F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
384 ; LP64F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
385 ; LP64F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
386 ; LP64F-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
387 ; LP64F-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]]
388 ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
389 ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]]
390 ; LP64F-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
391 ; LP64F-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]]
392 ; LP64F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
393 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
394 ; LP64F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
395 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
396 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
397 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
398 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
399 ; LP64F-NEXT: PseudoRET implicit $x10
401 ; LP64D-LABEL: name: va1_va_arg_alloca
402 ; LP64D: bb.1 (%ir-block.0):
403 ; LP64D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
405 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
406 ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
407 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
408 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
409 ; LP64D-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
410 ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
411 ; LP64D-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
412 ; LP64D-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
413 ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
414 ; LP64D-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
415 ; LP64D-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
416 ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
417 ; LP64D-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
418 ; LP64D-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
419 ; LP64D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
420 ; LP64D-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
421 ; LP64D-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
422 ; LP64D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
423 ; LP64D-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
424 ; LP64D-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
425 ; LP64D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
426 ; LP64D-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
427 ; LP64D-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
428 ; LP64D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
429 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
430 ; LP64D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
431 ; LP64D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
432 ; LP64D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
433 ; LP64D-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32)
434 ; LP64D-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]]
435 ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
436 ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]]
437 ; LP64D-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
438 ; LP64D-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]]
439 ; LP64D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
440 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
441 ; LP64D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
442 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
443 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
444 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
445 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
446 ; LP64D-NEXT: PseudoRET implicit $x10
448 call void @llvm.va_start(ptr %va)
449 %1 = va_arg ptr %va, i32
450 %2 = alloca i8, i32 %1
451 call void @notdead(ptr %2)
452 call void @llvm.va_end(ptr %va)
457 define i32 @va1_va_arg(ptr %fmt, ...) nounwind {
458 ; RV32-LABEL: name: va1_va_arg
459 ; RV32: bb.1 (%ir-block.0):
460 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
462 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
463 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
464 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
465 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
466 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
467 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
468 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
469 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
470 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
471 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
472 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
473 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
474 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
475 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
476 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
477 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
478 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
479 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
480 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
481 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
482 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
483 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
484 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
485 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
486 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
487 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
488 ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
489 ; RV32-NEXT: $x10 = COPY [[VAARG]](s32)
490 ; RV32-NEXT: PseudoRET implicit $x10
492 ; RV64-LABEL: name: va1_va_arg
493 ; RV64: bb.1 (%ir-block.0):
494 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
496 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
497 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
498 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
499 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
500 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
501 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
502 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
503 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
504 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
505 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
506 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
507 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
508 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
509 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
510 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
511 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
512 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
513 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
514 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
515 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
516 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
517 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
518 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
519 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
520 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
521 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
522 ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
523 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
524 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
525 ; RV64-NEXT: PseudoRET implicit $x10
527 call void @llvm.va_start(ptr %va)
528 %1 = va_arg ptr %va, i32
529 call void @llvm.va_end(ptr %va)
533 define void @va1_caller() nounwind {
534 ; ILP32-LABEL: name: va1_caller
535 ; ILP32: bb.1 (%ir-block.0):
536 ; ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
537 ; ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
538 ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
539 ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
540 ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
541 ; ILP32-NEXT: $x10 = COPY [[DEF]](p0)
542 ; ILP32-NEXT: $x12 = COPY [[UV]](s32)
543 ; ILP32-NEXT: $x13 = COPY [[UV1]](s32)
544 ; ILP32-NEXT: $x14 = COPY [[C1]](s32)
545 ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
546 ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
547 ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
548 ; ILP32-NEXT: PseudoRET
550 ; RV32D-ILP32-LABEL: name: va1_caller
551 ; RV32D-ILP32: bb.1 (%ir-block.0):
552 ; RV32D-ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
553 ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
554 ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
555 ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
556 ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
557 ; RV32D-ILP32-NEXT: $x10 = COPY [[DEF]](p0)
558 ; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32)
559 ; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32)
560 ; RV32D-ILP32-NEXT: $x14 = COPY [[C1]](s32)
561 ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
562 ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
563 ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
564 ; RV32D-ILP32-NEXT: PseudoRET
566 ; RV32D-ILP32F-LABEL: name: va1_caller
567 ; RV32D-ILP32F: bb.1 (%ir-block.0):
568 ; RV32D-ILP32F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
569 ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
570 ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
571 ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
572 ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
573 ; RV32D-ILP32F-NEXT: $x10 = COPY [[DEF]](p0)
574 ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32)
575 ; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32)
576 ; RV32D-ILP32F-NEXT: $x14 = COPY [[C1]](s32)
577 ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
578 ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
579 ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
580 ; RV32D-ILP32F-NEXT: PseudoRET
582 ; RV32D-ILP32D-LABEL: name: va1_caller
583 ; RV32D-ILP32D: bb.1 (%ir-block.0):
584 ; RV32D-ILP32D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
585 ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
586 ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
587 ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
588 ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
589 ; RV32D-ILP32D-NEXT: $x10 = COPY [[DEF]](p0)
590 ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32)
591 ; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32)
592 ; RV32D-ILP32D-NEXT: $x14 = COPY [[C1]](s32)
593 ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
594 ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
595 ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
596 ; RV32D-ILP32D-NEXT: PseudoRET
598 ; LP64-LABEL: name: va1_caller
599 ; LP64: bb.1 (%ir-block.0):
600 ; LP64-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
601 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
602 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
603 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
604 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
605 ; LP64-NEXT: $x10 = COPY [[DEF]](p0)
606 ; LP64-NEXT: $x11 = COPY [[C]](s64)
607 ; LP64-NEXT: $x12 = COPY [[ANYEXT]](s64)
608 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
609 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
610 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
611 ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
612 ; LP64-NEXT: PseudoRET
614 ; LP64F-LABEL: name: va1_caller
615 ; LP64F: bb.1 (%ir-block.0):
616 ; LP64F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
617 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
618 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
619 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
620 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
621 ; LP64F-NEXT: $x10 = COPY [[DEF]](p0)
622 ; LP64F-NEXT: $x11 = COPY [[C]](s64)
623 ; LP64F-NEXT: $x12 = COPY [[ANYEXT]](s64)
624 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
625 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
626 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
627 ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
628 ; LP64F-NEXT: PseudoRET
630 ; LP64D-LABEL: name: va1_caller
631 ; LP64D: bb.1 (%ir-block.0):
632 ; LP64D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
633 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
634 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
635 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
636 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
637 ; LP64D-NEXT: $x10 = COPY [[DEF]](p0)
638 ; LP64D-NEXT: $x11 = COPY [[C]](s64)
639 ; LP64D-NEXT: $x12 = COPY [[ANYEXT]](s64)
640 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
641 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
642 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
643 ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
644 ; LP64D-NEXT: PseudoRET
645 %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
649 ; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned"
650 ; register pair (where the first register is even-numbered).
652 define i64 @va2(ptr %fmt, ...) nounwind {
653 ; RV32-LABEL: name: va2
654 ; RV32: bb.1 (%ir-block.0):
655 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
657 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
658 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
659 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
660 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
661 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
662 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
663 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
664 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
665 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
666 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
667 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
668 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
669 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
670 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
671 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
672 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
673 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
674 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
675 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
676 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
677 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
678 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
679 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
680 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
681 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
682 ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
683 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
684 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
685 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va)
686 ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]]
687 ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
688 ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
689 ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
690 ; RV32-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
691 ; RV32-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
692 ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
693 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
694 ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
695 ; RV32-NEXT: $x10 = COPY [[UV]](s32)
696 ; RV32-NEXT: $x11 = COPY [[UV1]](s32)
697 ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11
699 ; RV64-LABEL: name: va2
700 ; RV64: bb.1 (%ir-block.0):
701 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
703 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
704 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
705 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
706 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
707 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
708 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
709 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
710 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
711 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
712 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
713 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
714 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
715 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
716 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
717 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
718 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
719 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
720 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
721 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
722 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
723 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
724 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
725 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
726 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
727 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
728 ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
729 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
730 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
731 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va)
732 ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]]
733 ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
734 ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
735 ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
736 ; RV64-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
737 ; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
738 ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
739 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
740 ; RV64-NEXT: $x10 = COPY [[LOAD1]](s64)
741 ; RV64-NEXT: PseudoRET implicit $x10
743 call void @llvm.va_start(ptr %va)
744 %argp.cur = load i32, ptr %va, align 4
745 %1 = add i32 %argp.cur, 7
747 %argp.cur.aligned = inttoptr i32 %1 to ptr
748 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
749 store ptr %argp.next, ptr %va, align 4
750 %3 = inttoptr i32 %2 to ptr
751 %4 = load double, ptr %3, align 8
752 %5 = bitcast double %4 to i64
753 call void @llvm.va_end(ptr %va)
757 define i64 @va2_va_arg(ptr %fmt, ...) nounwind {
758 ; RV32-LABEL: name: va2_va_arg
759 ; RV32: bb.1 (%ir-block.0):
760 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
762 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
763 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
764 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
765 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
766 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
767 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
768 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
769 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
770 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
771 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
772 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
773 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
774 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
775 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
776 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
777 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
778 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
779 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
780 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
781 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
782 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
783 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
784 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
785 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
786 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
787 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
788 ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8
789 ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VAARG]](s64)
790 ; RV32-NEXT: $x10 = COPY [[UV]](s32)
791 ; RV32-NEXT: $x11 = COPY [[UV1]](s32)
792 ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11
794 ; RV64-LABEL: name: va2_va_arg
795 ; RV64: bb.1 (%ir-block.0):
796 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
798 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
799 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
800 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
801 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
802 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
803 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
804 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
805 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
806 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
807 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
808 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
809 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
810 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
811 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
812 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
813 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
814 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
815 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
816 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
817 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
818 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
819 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
820 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
821 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
822 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
823 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
824 ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8
825 ; RV64-NEXT: $x10 = COPY [[VAARG]](s64)
826 ; RV64-NEXT: PseudoRET implicit $x10
828 call void @llvm.va_start(ptr %va)
829 %1 = va_arg ptr %va, double
830 call void @llvm.va_end(ptr %va)
831 %2 = bitcast double %1 to i64
835 define void @va2_caller() nounwind {
836 ; ILP32-LABEL: name: va2_caller
837 ; ILP32: bb.1 (%ir-block.0):
838 ; ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
839 ; ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
840 ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
841 ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
842 ; ILP32-NEXT: $x10 = COPY [[DEF]](p0)
843 ; ILP32-NEXT: $x12 = COPY [[UV]](s32)
844 ; ILP32-NEXT: $x13 = COPY [[UV1]](s32)
845 ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
846 ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
847 ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
848 ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
849 ; ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
850 ; ILP32-NEXT: PseudoRET
852 ; RV32D-ILP32-LABEL: name: va2_caller
853 ; RV32D-ILP32: bb.1 (%ir-block.0):
854 ; RV32D-ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
855 ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
856 ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
857 ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
858 ; RV32D-ILP32-NEXT: $x10 = COPY [[DEF]](p0)
859 ; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32)
860 ; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32)
861 ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
862 ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
863 ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
864 ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
865 ; RV32D-ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
866 ; RV32D-ILP32-NEXT: PseudoRET
868 ; RV32D-ILP32F-LABEL: name: va2_caller
869 ; RV32D-ILP32F: bb.1 (%ir-block.0):
870 ; RV32D-ILP32F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
871 ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
872 ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
873 ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
874 ; RV32D-ILP32F-NEXT: $x10 = COPY [[DEF]](p0)
875 ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32)
876 ; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32)
877 ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
878 ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
879 ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
880 ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
881 ; RV32D-ILP32F-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
882 ; RV32D-ILP32F-NEXT: PseudoRET
884 ; RV32D-ILP32D-LABEL: name: va2_caller
885 ; RV32D-ILP32D: bb.1 (%ir-block.0):
886 ; RV32D-ILP32D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
887 ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
888 ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
889 ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
890 ; RV32D-ILP32D-NEXT: $x10 = COPY [[DEF]](p0)
891 ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32)
892 ; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32)
893 ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
894 ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
895 ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
896 ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
897 ; RV32D-ILP32D-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
898 ; RV32D-ILP32D-NEXT: PseudoRET
900 ; LP64-LABEL: name: va2_caller
901 ; LP64: bb.1 (%ir-block.0):
902 ; LP64-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
903 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
904 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
905 ; LP64-NEXT: $x10 = COPY [[DEF]](p0)
906 ; LP64-NEXT: $x11 = COPY [[C]](s64)
907 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
908 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
909 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
910 ; LP64-NEXT: PseudoRET
912 ; LP64F-LABEL: name: va2_caller
913 ; LP64F: bb.1 (%ir-block.0):
914 ; LP64F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
915 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
916 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
917 ; LP64F-NEXT: $x10 = COPY [[DEF]](p0)
918 ; LP64F-NEXT: $x11 = COPY [[C]](s64)
919 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
920 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
921 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
922 ; LP64F-NEXT: PseudoRET
924 ; LP64D-LABEL: name: va2_caller
925 ; LP64D: bb.1 (%ir-block.0):
926 ; LP64D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
927 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
928 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
929 ; LP64D-NEXT: $x10 = COPY [[DEF]](p0)
930 ; LP64D-NEXT: $x11 = COPY [[C]](s64)
931 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
932 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
933 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
934 ; LP64D-NEXT: PseudoRET
935 %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00)
939 ; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the
940 ; vararg double is passed in a4 and a5 (rather than a3 and a4)
942 define i64 @va3(i32 %a, i64 %b, ...) nounwind {
943 ; RV32-LABEL: name: va3
944 ; RV32: bb.1 (%ir-block.0):
945 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
947 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
948 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
949 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
950 ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
951 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
952 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
953 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
954 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
955 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
956 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
957 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
958 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
959 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
960 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
961 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
962 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
963 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
964 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
965 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
966 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
967 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
968 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
969 ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
970 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
971 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
972 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va)
973 ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]]
974 ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
975 ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
976 ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
977 ; RV32-NEXT: %24:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
978 ; RV32-NEXT: G_STORE %24(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
979 ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
980 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
981 ; RV32-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[LOAD1]]
982 ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD1]](s64)
983 ; RV32-NEXT: $x10 = COPY [[UV]](s32)
984 ; RV32-NEXT: $x11 = COPY [[UV1]](s32)
985 ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11
987 ; RV64-LABEL: name: va3
988 ; RV64: bb.1 (%ir-block.0):
989 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
991 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
992 ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
993 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
994 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
995 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
996 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
997 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
998 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
999 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1000 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
1001 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1002 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1003 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
1004 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1005 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1006 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
1007 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1008 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1009 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
1010 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1011 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1012 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
1013 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1014 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
1015 ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8
1016 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
1017 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
1018 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va)
1019 ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]]
1020 ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
1021 ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
1022 ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1023 ; RV64-NEXT: %25:_(p0) = nuw nusw G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
1024 ; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
1025 ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
1026 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
1027 ; RV64-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[LOAD1]]
1028 ; RV64-NEXT: $x10 = COPY [[ADD1]](s64)
1029 ; RV64-NEXT: PseudoRET implicit $x10
1031 call void @llvm.va_start(ptr %va)
1032 %argp.cur = load i32, ptr %va, align 4
1033 %1 = add i32 %argp.cur, 7
1035 %argp.cur.aligned = inttoptr i32 %1 to ptr
1036 %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8
1037 store ptr %argp.next, ptr %va, align 4
1038 %3 = inttoptr i32 %2 to ptr
1039 %4 = load double, ptr %3, align 8
1040 call void @llvm.va_end(ptr %va)
1041 %5 = bitcast double %4 to i64
1046 define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind {
1047 ; RV32-LABEL: name: va3_va_arg
1048 ; RV32: bb.1 (%ir-block.0):
1049 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1051 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1052 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1053 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1054 ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
1055 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1056 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1057 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1058 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1059 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1060 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1061 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1062 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1063 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1064 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1065 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1066 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1067 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1068 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1069 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1070 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1071 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1072 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
1073 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
1074 ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8
1075 ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[VAARG]]
1076 ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD]](s64)
1077 ; RV32-NEXT: $x10 = COPY [[UV]](s32)
1078 ; RV32-NEXT: $x11 = COPY [[UV1]](s32)
1079 ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11
1081 ; RV64-LABEL: name: va3_va_arg
1082 ; RV64: bb.1 (%ir-block.0):
1083 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1085 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1086 ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
1087 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1088 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
1089 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1090 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1091 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
1092 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1093 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1094 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
1095 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1096 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1097 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
1098 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1099 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1100 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
1101 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1102 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1103 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
1104 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1105 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1106 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
1107 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1108 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
1109 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
1110 ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8
1111 ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[VAARG]]
1112 ; RV64-NEXT: $x10 = COPY [[ADD]](s64)
1113 ; RV64-NEXT: PseudoRET implicit $x10
1115 call void @llvm.va_start(ptr %va)
1116 %1 = va_arg ptr %va, double
1117 call void @llvm.va_end(ptr %va)
1118 %2 = bitcast double %1 to i64
1123 define void @va3_caller() nounwind {
1124 ; ILP32-LABEL: name: va3_caller
1125 ; ILP32: bb.1 (%ir-block.0):
1126 ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1127 ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1128 ; ILP32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1129 ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1130 ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
1131 ; ILP32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
1132 ; ILP32-NEXT: $x10 = COPY [[C]](s32)
1133 ; ILP32-NEXT: $x11 = COPY [[UV]](s32)
1134 ; ILP32-NEXT: $x12 = COPY [[UV1]](s32)
1135 ; ILP32-NEXT: $x14 = COPY [[UV2]](s32)
1136 ; ILP32-NEXT: $x15 = COPY [[UV3]](s32)
1137 ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
1138 ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1139 ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1140 ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1141 ; ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1142 ; ILP32-NEXT: PseudoRET
1144 ; RV32D-ILP32-LABEL: name: va3_caller
1145 ; RV32D-ILP32: bb.1 (%ir-block.0):
1146 ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1147 ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1148 ; RV32D-ILP32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1149 ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1150 ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
1151 ; RV32D-ILP32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
1152 ; RV32D-ILP32-NEXT: $x10 = COPY [[C]](s32)
1153 ; RV32D-ILP32-NEXT: $x11 = COPY [[UV]](s32)
1154 ; RV32D-ILP32-NEXT: $x12 = COPY [[UV1]](s32)
1155 ; RV32D-ILP32-NEXT: $x14 = COPY [[UV2]](s32)
1156 ; RV32D-ILP32-NEXT: $x15 = COPY [[UV3]](s32)
1157 ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
1158 ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1159 ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1160 ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1161 ; RV32D-ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1162 ; RV32D-ILP32-NEXT: PseudoRET
1164 ; RV32D-ILP32F-LABEL: name: va3_caller
1165 ; RV32D-ILP32F: bb.1 (%ir-block.0):
1166 ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1167 ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1168 ; RV32D-ILP32F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1169 ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1170 ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
1171 ; RV32D-ILP32F-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
1172 ; RV32D-ILP32F-NEXT: $x10 = COPY [[C]](s32)
1173 ; RV32D-ILP32F-NEXT: $x11 = COPY [[UV]](s32)
1174 ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV1]](s32)
1175 ; RV32D-ILP32F-NEXT: $x14 = COPY [[UV2]](s32)
1176 ; RV32D-ILP32F-NEXT: $x15 = COPY [[UV3]](s32)
1177 ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
1178 ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1179 ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1180 ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1181 ; RV32D-ILP32F-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1182 ; RV32D-ILP32F-NEXT: PseudoRET
1184 ; RV32D-ILP32D-LABEL: name: va3_caller
1185 ; RV32D-ILP32D: bb.1 (%ir-block.0):
1186 ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1187 ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1188 ; RV32D-ILP32D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1189 ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1190 ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
1191 ; RV32D-ILP32D-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
1192 ; RV32D-ILP32D-NEXT: $x10 = COPY [[C]](s32)
1193 ; RV32D-ILP32D-NEXT: $x11 = COPY [[UV]](s32)
1194 ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV1]](s32)
1195 ; RV32D-ILP32D-NEXT: $x14 = COPY [[UV2]](s32)
1196 ; RV32D-ILP32D-NEXT: $x15 = COPY [[UV3]](s32)
1197 ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
1198 ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1199 ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1200 ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1201 ; RV32D-ILP32D-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
1202 ; RV32D-ILP32D-NEXT: PseudoRET
1204 ; LP64-LABEL: name: va3_caller
1205 ; LP64: bb.1 (%ir-block.0):
1206 ; LP64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1207 ; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1208 ; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1209 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1210 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
1211 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
1212 ; LP64-NEXT: $x11 = COPY [[C1]](s64)
1213 ; LP64-NEXT: $x12 = COPY [[C2]](s64)
1214 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
1215 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1216 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1217 ; LP64-NEXT: PseudoRET
1219 ; LP64F-LABEL: name: va3_caller
1220 ; LP64F: bb.1 (%ir-block.0):
1221 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1222 ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1223 ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1224 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1225 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
1226 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
1227 ; LP64F-NEXT: $x11 = COPY [[C1]](s64)
1228 ; LP64F-NEXT: $x12 = COPY [[C2]](s64)
1229 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
1230 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1231 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1232 ; LP64F-NEXT: PseudoRET
1234 ; LP64D-LABEL: name: va3_caller
1235 ; LP64D: bb.1 (%ir-block.0):
1236 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
1237 ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111
1238 ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
1239 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1240 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
1241 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
1242 ; LP64D-NEXT: $x11 = COPY [[C1]](s64)
1243 ; LP64D-NEXT: $x12 = COPY [[C2]](s64)
1244 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
1245 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1246 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1247 ; LP64D-NEXT: PseudoRET
1248 %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00)
1252 declare void @llvm.va_copy(ptr, ptr)
1254 define i32 @va4_va_copy(i32 %argno, ...) nounwind {
1255 ; ILP32-LABEL: name: va4_va_copy
1256 ; ILP32: bb.1 (%ir-block.0):
1257 ; ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1258 ; ILP32-NEXT: {{ $}}
1259 ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1260 ; ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1261 ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1262 ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1263 ; ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1264 ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1265 ; ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1266 ; ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1267 ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1268 ; ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1269 ; ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1270 ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1271 ; ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1272 ; ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1273 ; ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1274 ; ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1275 ; ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1276 ; ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1277 ; ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1278 ; ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
1279 ; ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1280 ; ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1281 ; ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
1282 ; ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1283 ; ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1284 ; ILP32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1285 ; ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs)
1286 ; ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1287 ; ILP32-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1288 ; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
1289 ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1290 ; ILP32-NEXT: $x10 = COPY [[LOAD]](p0)
1291 ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
1292 ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1293 ; ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1294 ; ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1295 ; ILP32-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1296 ; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1297 ; ILP32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1298 ; ILP32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1299 ; ILP32-NEXT: $x10 = COPY [[ADD2]](s32)
1300 ; ILP32-NEXT: PseudoRET implicit $x10
1302 ; RV32D-ILP32-LABEL: name: va4_va_copy
1303 ; RV32D-ILP32: bb.1 (%ir-block.0):
1304 ; RV32D-ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1305 ; RV32D-ILP32-NEXT: {{ $}}
1306 ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1307 ; RV32D-ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1308 ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1309 ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1310 ; RV32D-ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1311 ; RV32D-ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1312 ; RV32D-ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1313 ; RV32D-ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1314 ; RV32D-ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1315 ; RV32D-ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1316 ; RV32D-ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1317 ; RV32D-ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1318 ; RV32D-ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1319 ; RV32D-ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1320 ; RV32D-ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1321 ; RV32D-ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1322 ; RV32D-ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1323 ; RV32D-ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1324 ; RV32D-ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1325 ; RV32D-ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
1326 ; RV32D-ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1327 ; RV32D-ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1328 ; RV32D-ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
1329 ; RV32D-ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1330 ; RV32D-ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1331 ; RV32D-ILP32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1332 ; RV32D-ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs)
1333 ; RV32D-ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1334 ; RV32D-ILP32-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1335 ; RV32D-ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
1336 ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1337 ; RV32D-ILP32-NEXT: $x10 = COPY [[LOAD]](p0)
1338 ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
1339 ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1340 ; RV32D-ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1341 ; RV32D-ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1342 ; RV32D-ILP32-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1343 ; RV32D-ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1344 ; RV32D-ILP32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1345 ; RV32D-ILP32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1346 ; RV32D-ILP32-NEXT: $x10 = COPY [[ADD2]](s32)
1347 ; RV32D-ILP32-NEXT: PseudoRET implicit $x10
1349 ; RV32D-ILP32F-LABEL: name: va4_va_copy
1350 ; RV32D-ILP32F: bb.1 (%ir-block.0):
1351 ; RV32D-ILP32F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1352 ; RV32D-ILP32F-NEXT: {{ $}}
1353 ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1354 ; RV32D-ILP32F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1355 ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1356 ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1357 ; RV32D-ILP32F-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1358 ; RV32D-ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1359 ; RV32D-ILP32F-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1360 ; RV32D-ILP32F-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1361 ; RV32D-ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1362 ; RV32D-ILP32F-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1363 ; RV32D-ILP32F-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1364 ; RV32D-ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1365 ; RV32D-ILP32F-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1366 ; RV32D-ILP32F-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1367 ; RV32D-ILP32F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1368 ; RV32D-ILP32F-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1369 ; RV32D-ILP32F-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1370 ; RV32D-ILP32F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1371 ; RV32D-ILP32F-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1372 ; RV32D-ILP32F-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
1373 ; RV32D-ILP32F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1374 ; RV32D-ILP32F-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1375 ; RV32D-ILP32F-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
1376 ; RV32D-ILP32F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1377 ; RV32D-ILP32F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1378 ; RV32D-ILP32F-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1379 ; RV32D-ILP32F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs)
1380 ; RV32D-ILP32F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1381 ; RV32D-ILP32F-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1382 ; RV32D-ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
1383 ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1384 ; RV32D-ILP32F-NEXT: $x10 = COPY [[LOAD]](p0)
1385 ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
1386 ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1387 ; RV32D-ILP32F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1388 ; RV32D-ILP32F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1389 ; RV32D-ILP32F-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1390 ; RV32D-ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1391 ; RV32D-ILP32F-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1392 ; RV32D-ILP32F-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1393 ; RV32D-ILP32F-NEXT: $x10 = COPY [[ADD2]](s32)
1394 ; RV32D-ILP32F-NEXT: PseudoRET implicit $x10
1396 ; RV32D-ILP32D-LABEL: name: va4_va_copy
1397 ; RV32D-ILP32D: bb.1 (%ir-block.0):
1398 ; RV32D-ILP32D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1399 ; RV32D-ILP32D-NEXT: {{ $}}
1400 ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1401 ; RV32D-ILP32D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1402 ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1403 ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1404 ; RV32D-ILP32D-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1405 ; RV32D-ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1406 ; RV32D-ILP32D-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1407 ; RV32D-ILP32D-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1408 ; RV32D-ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1409 ; RV32D-ILP32D-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1410 ; RV32D-ILP32D-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1411 ; RV32D-ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1412 ; RV32D-ILP32D-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1413 ; RV32D-ILP32D-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1414 ; RV32D-ILP32D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1415 ; RV32D-ILP32D-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1416 ; RV32D-ILP32D-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1417 ; RV32D-ILP32D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1418 ; RV32D-ILP32D-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1419 ; RV32D-ILP32D-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
1420 ; RV32D-ILP32D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1421 ; RV32D-ILP32D-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1422 ; RV32D-ILP32D-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
1423 ; RV32D-ILP32D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1424 ; RV32D-ILP32D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1425 ; RV32D-ILP32D-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1426 ; RV32D-ILP32D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs)
1427 ; RV32D-ILP32D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1428 ; RV32D-ILP32D-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1429 ; RV32D-ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
1430 ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1431 ; RV32D-ILP32D-NEXT: $x10 = COPY [[LOAD]](p0)
1432 ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
1433 ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1434 ; RV32D-ILP32D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1435 ; RV32D-ILP32D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1436 ; RV32D-ILP32D-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1437 ; RV32D-ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1438 ; RV32D-ILP32D-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1439 ; RV32D-ILP32D-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1440 ; RV32D-ILP32D-NEXT: $x10 = COPY [[ADD2]](s32)
1441 ; RV32D-ILP32D-NEXT: PseudoRET implicit $x10
1443 ; LP64-LABEL: name: va4_va_copy
1444 ; LP64: bb.1 (%ir-block.0):
1445 ; LP64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1447 ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1448 ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
1449 ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1450 ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1451 ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1452 ; LP64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
1453 ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1454 ; LP64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1455 ; LP64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
1456 ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1457 ; LP64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1458 ; LP64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
1459 ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1460 ; LP64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1461 ; LP64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
1462 ; LP64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1463 ; LP64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1464 ; LP64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
1465 ; LP64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1466 ; LP64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1467 ; LP64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
1468 ; LP64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1469 ; LP64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1470 ; LP64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
1471 ; LP64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
1472 ; LP64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1473 ; LP64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1474 ; LP64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs)
1475 ; LP64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1476 ; LP64-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1477 ; LP64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
1478 ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1479 ; LP64-NEXT: $x10 = COPY [[LOAD]](p0)
1480 ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
1481 ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1482 ; LP64-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1483 ; LP64-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1484 ; LP64-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1485 ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1486 ; LP64-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1487 ; LP64-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1488 ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
1489 ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
1490 ; LP64-NEXT: PseudoRET implicit $x10
1492 ; LP64F-LABEL: name: va4_va_copy
1493 ; LP64F: bb.1 (%ir-block.0):
1494 ; LP64F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1495 ; LP64F-NEXT: {{ $}}
1496 ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1497 ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
1498 ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1499 ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1500 ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1501 ; LP64F-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
1502 ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1503 ; LP64F-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1504 ; LP64F-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
1505 ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1506 ; LP64F-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1507 ; LP64F-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
1508 ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1509 ; LP64F-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1510 ; LP64F-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
1511 ; LP64F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1512 ; LP64F-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1513 ; LP64F-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
1514 ; LP64F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1515 ; LP64F-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1516 ; LP64F-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
1517 ; LP64F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1518 ; LP64F-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1519 ; LP64F-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
1520 ; LP64F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
1521 ; LP64F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1522 ; LP64F-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1523 ; LP64F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs)
1524 ; LP64F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1525 ; LP64F-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1526 ; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
1527 ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1528 ; LP64F-NEXT: $x10 = COPY [[LOAD]](p0)
1529 ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
1530 ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1531 ; LP64F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1532 ; LP64F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1533 ; LP64F-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1534 ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1535 ; LP64F-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1536 ; LP64F-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1537 ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
1538 ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
1539 ; LP64F-NEXT: PseudoRET implicit $x10
1541 ; LP64D-LABEL: name: va4_va_copy
1542 ; LP64D: bb.1 (%ir-block.0):
1543 ; LP64D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1544 ; LP64D-NEXT: {{ $}}
1545 ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1546 ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
1547 ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1548 ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1549 ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1550 ; LP64D-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
1551 ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1552 ; LP64D-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1553 ; LP64D-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
1554 ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1555 ; LP64D-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1556 ; LP64D-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
1557 ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1558 ; LP64D-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1559 ; LP64D-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
1560 ; LP64D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1561 ; LP64D-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1562 ; LP64D-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
1563 ; LP64D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1564 ; LP64D-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1565 ; LP64D-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
1566 ; LP64D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1567 ; LP64D-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1568 ; LP64D-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
1569 ; LP64D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
1570 ; LP64D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs
1571 ; LP64D-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs
1572 ; LP64D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs)
1573 ; LP64D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1574 ; LP64D-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0)
1575 ; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
1576 ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
1577 ; LP64D-NEXT: $x10 = COPY [[LOAD]](p0)
1578 ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
1579 ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
1580 ; LP64D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1581 ; LP64D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1582 ; LP64D-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1583 ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]]
1584 ; LP64D-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]]
1585 ; LP64D-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]]
1586 ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32)
1587 ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
1588 ; LP64D-NEXT: PseudoRET implicit $x10
1591 call void @llvm.va_start(ptr %vargs)
1592 %1 = va_arg ptr %vargs, i32
1593 call void @llvm.va_copy(ptr %wargs, ptr %vargs)
1594 %2 = load ptr, ptr %wargs, align 4
1595 call void @notdead(ptr %2)
1596 %3 = va_arg ptr %vargs, i32
1597 %4 = va_arg ptr %vargs, i32
1598 %5 = va_arg ptr %vargs, i32
1599 call void @llvm.va_end(ptr %vargs)
1600 call void @llvm.va_end(ptr %wargs)
1601 %add1 = add i32 %3, %1
1602 %add2 = add i32 %add1, %4
1603 %add3 = add i32 %add2, %5
1607 ; A function with no fixed arguments is not valid C, but can be
1608 ; specified in LLVM IR. We must ensure the vararg save area is
1609 ; still set up correctly.
1611 define i32 @va6_no_fixed_args(...) nounwind {
1612 ; RV32-LABEL: name: va6_no_fixed_args
1613 ; RV32: bb.1 (%ir-block.0):
1614 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1616 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
1617 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1618 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
1619 ; RV32-NEXT: G_STORE [[COPY]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 16)
1620 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1621 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1622 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
1623 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1624 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1625 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
1626 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1627 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1628 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
1629 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1630 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1631 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.0 + 16, align 16)
1632 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1633 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1634 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.0 + 20)
1635 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1636 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1637 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.0 + 24, align 8)
1638 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1639 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1640 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %fixed-stack.0 + 28)
1641 ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD6]], [[C]](s32)
1642 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
1643 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
1644 ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1645 ; RV32-NEXT: $x10 = COPY [[VAARG]](s32)
1646 ; RV32-NEXT: PseudoRET implicit $x10
1648 ; RV64-LABEL: name: va6_no_fixed_args
1649 ; RV64: bb.1 (%ir-block.0):
1650 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1652 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
1653 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1654 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
1655 ; RV64-NEXT: G_STORE [[COPY]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
1656 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1657 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1658 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
1659 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1660 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1661 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
1662 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1663 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1664 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
1665 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1666 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1667 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
1668 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1669 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1670 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
1671 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1672 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1673 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.0 + 48, align 16)
1674 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
1675 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1676 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD6]](p0) :: (store (s64) into %fixed-stack.0 + 56)
1677 ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD6]], [[C]](s64)
1678 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va
1679 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
1680 ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
1681 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
1682 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
1683 ; RV64-NEXT: PseudoRET implicit $x10
1685 call void @llvm.va_start(ptr %va)
1686 %1 = va_arg ptr %va, i32
1687 call void @llvm.va_end(ptr %va)
1691 ; TODO: improve constant materialization of stack addresses
1693 define i32 @va_large_stack(ptr %fmt, ...) {
1694 ; RV32-LABEL: name: va_large_stack
1695 ; RV32: bb.1 (%ir-block.0):
1696 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1698 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
1699 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1700 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1701 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
1702 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
1703 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
1704 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
1705 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
1706 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
1707 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
1708 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
1709 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
1710 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
1711 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
1712 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
1713 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
1714 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
1715 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
1716 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
1717 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
1718 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
1719 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
1720 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
1721 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
1722 ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large
1723 ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va
1724 ; RV32-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s32) into %ir.va)
1725 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va)
1726 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1727 ; RV32-NEXT: %21:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s32)
1728 ; RV32-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va)
1729 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
1730 ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
1731 ; RV32-NEXT: PseudoRET implicit $x10
1733 ; RV64-LABEL: name: va_large_stack
1734 ; RV64: bb.1 (%ir-block.0):
1735 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
1737 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
1738 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
1739 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1740 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
1741 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
1742 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
1743 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
1744 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
1745 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
1746 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
1747 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
1748 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
1749 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
1750 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
1751 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
1752 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
1753 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
1754 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
1755 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
1756 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
1757 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
1758 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
1759 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
1760 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
1761 ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large
1762 ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va
1763 ; RV64-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.va)
1764 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
1765 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1766 ; RV64-NEXT: %21:_(p0) = nuw nusw G_PTR_ADD [[LOAD]], [[C1]](s64)
1767 ; RV64-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4)
1768 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
1769 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
1770 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
1771 ; RV64-NEXT: PseudoRET implicit $x10
1772 %large = alloca [ 100000000 x i8 ]
1774 call void @llvm.va_start(ptr %va)
1775 %argp.cur = load ptr, ptr %va, align 4
1776 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
1777 store ptr %argp.next, ptr %va, align 4
1778 %1 = load i32, ptr %argp.cur, align 4
1779 call void @llvm.va_end(ptr %va)