1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefixes=RV32 %s
4 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefixes=RV64 %s
7 define void @va1arg(ptr %a, ...) {
8 ; RV32-LABEL: name: va1arg
9 ; RV32: bb.1 (%ir-block.0):
10 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
12 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
13 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
14 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
15 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
16 ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
17 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
18 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
19 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
20 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
21 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
22 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
23 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
24 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
25 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
26 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
27 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
28 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
29 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
30 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
31 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20)
32 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
33 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
34 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24)
35 ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32)
36 ; RV32-NEXT: PseudoRET
38 ; RV64-LABEL: name: va1arg
39 ; RV64: bb.1 (%ir-block.0):
40 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
42 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
43 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
44 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
45 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
46 ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
47 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
48 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
49 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
50 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
51 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
52 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
53 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
54 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
55 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
56 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
57 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
58 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
59 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
60 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
61 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40)
62 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
63 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
64 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48)
65 ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64)
66 ; RV64-NEXT: PseudoRET
70 define void @va2arg(ptr %a, ptr %b, ...) {
71 ; RV32-LABEL: name: va2arg
72 ; RV32: bb.1 (%ir-block.0):
73 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
75 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
76 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
77 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
78 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
79 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
80 ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
81 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
82 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
83 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
84 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
85 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
86 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
87 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
88 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
89 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
90 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
91 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
92 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.0 + 16, align 8)
93 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
94 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
95 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.0 + 20)
96 ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32)
97 ; RV32-NEXT: PseudoRET
99 ; RV64-LABEL: name: va2arg
100 ; RV64: bb.1 (%ir-block.0):
101 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
103 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
104 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
105 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
106 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
107 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12
108 ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
109 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
110 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
111 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
112 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
113 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
114 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
115 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
116 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
117 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
118 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
119 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
120 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16)
121 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
122 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
123 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40)
124 ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64)
125 ; RV64-NEXT: PseudoRET
129 define void @va3arg(ptr %a, ptr %b, ptr %c, ...) {
130 ; RV32-LABEL: name: va3arg
131 ; RV32: bb.1 (%ir-block.0):
132 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
134 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
135 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
136 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
137 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
138 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
139 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
140 ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
141 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
142 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
143 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
144 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
145 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
146 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
147 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
148 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
149 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12)
150 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
151 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
152 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16)
153 ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32)
154 ; RV32-NEXT: PseudoRET
156 ; RV64-LABEL: name: va3arg
157 ; RV64: bb.1 (%ir-block.0):
158 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
160 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
161 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
162 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
163 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
164 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
165 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
166 ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
167 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
168 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
169 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
170 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
171 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
172 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
173 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
174 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
175 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24)
176 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
177 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
178 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32)
179 ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64)
180 ; RV64-NEXT: PseudoRET
184 define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) {
185 ; RV32-LABEL: name: va4arg
186 ; RV32: bb.1 (%ir-block.0):
187 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
189 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
190 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
191 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
192 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
193 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
194 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
195 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
196 ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 16)
197 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
198 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
199 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
200 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
201 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
202 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8)
203 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
204 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
205 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12)
206 ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32)
207 ; RV32-NEXT: PseudoRET
209 ; RV64-LABEL: name: va4arg
210 ; RV64: bb.1 (%ir-block.0):
211 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
213 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
214 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
215 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
216 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
217 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
218 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
219 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
220 ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
221 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
222 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
223 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
224 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
225 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
226 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16)
227 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
228 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
229 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24)
230 ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64)
231 ; RV64-NEXT: PseudoRET
235 define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) {
236 ; RV32-LABEL: name: va5arg
237 ; RV32: bb.1 (%ir-block.0):
238 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
240 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
241 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
242 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
243 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
244 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
245 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
246 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
247 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15
248 ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
249 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
250 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
251 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4)
252 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
253 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
254 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8)
255 ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
256 ; RV32-NEXT: PseudoRET
258 ; RV64-LABEL: name: va5arg
259 ; RV64: bb.1 (%ir-block.0):
260 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
262 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
263 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
264 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
265 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
266 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
267 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
268 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
269 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15
270 ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
271 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
272 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
273 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8)
274 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
275 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
276 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16)
277 ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
278 ; RV64-NEXT: PseudoRET
282 define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) {
283 ; RV32-LABEL: name: va6arg
284 ; RV32: bb.1 (%ir-block.0):
285 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
287 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
288 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
289 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
290 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
291 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
292 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
293 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
294 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
295 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16
296 ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 8)
297 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
298 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
299 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4)
300 ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32)
301 ; RV32-NEXT: PseudoRET
303 ; RV64-LABEL: name: va6arg
304 ; RV64: bb.1 (%ir-block.0):
305 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
307 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
308 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
309 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
310 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
311 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
312 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
313 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
314 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
315 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16
316 ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16)
317 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
318 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
319 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8)
320 ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64)
321 ; RV64-NEXT: PseudoRET
325 define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...) {
326 ; RV32-LABEL: name: va7arg
327 ; RV32: bb.1 (%ir-block.0):
328 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
330 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
331 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
332 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
333 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
334 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
335 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
336 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
337 ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
338 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
339 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17
340 ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1)
341 ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32)
342 ; RV32-NEXT: PseudoRET
344 ; RV64-LABEL: name: va7arg
345 ; RV64: bb.1 (%ir-block.0):
346 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
348 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
349 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
350 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
351 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
352 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
353 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
354 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
355 ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
356 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
357 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17
358 ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1)
359 ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
360 ; RV64-NEXT: PseudoRET
364 define void @va8arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, ...) {
365 ; RV32-LABEL: name: va8arg
366 ; RV32: bb.1 (%ir-block.0):
367 ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
369 ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
370 ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
371 ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
372 ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
373 ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
374 ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
375 ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
376 ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
377 ; RV32-NEXT: PseudoRET
379 ; RV64-LABEL: name: va8arg
380 ; RV64: bb.1 (%ir-block.0):
381 ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17
383 ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
384 ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
385 ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12
386 ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13
387 ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14
388 ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15
389 ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16
390 ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17
391 ; RV64-NEXT: PseudoRET