1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc -mtriple=aarch64-linux-gnu -O0 -mattr=+sve -global-isel -global-isel-abort=1 -aarch64-enable-gisel-sve=1 \
3 ; RUN: -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s
7 define void @formal_argument_nxv16i8(<vscale x 16 x i8> %0) {
8 ; CHECK-LABEL: name: formal_argument_nxv16i8
9 ; CHECK: bb.1 (%ir-block.1):
10 ; CHECK-NEXT: liveins: $z0
12 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
13 ; CHECK-NEXT: RET_ReallyLR
17 define void @formal_argument_nxv8i16(<vscale x 8 x i16> %0) {
18 ; CHECK-LABEL: name: formal_argument_nxv8i16
19 ; CHECK: bb.1 (%ir-block.1):
20 ; CHECK-NEXT: liveins: $z0
22 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
23 ; CHECK-NEXT: RET_ReallyLR
27 define void @formal_argument_nxv4i32(<vscale x 4 x i32> %0) {
28 ; CHECK-LABEL: name: formal_argument_nxv4i32
29 ; CHECK: bb.1 (%ir-block.1):
30 ; CHECK-NEXT: liveins: $z0
32 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
33 ; CHECK-NEXT: RET_ReallyLR
37 define void @formal_argument_nxv2i64(<vscale x 2 x i64> %0) {
38 ; CHECK-LABEL: name: formal_argument_nxv2i64
39 ; CHECK: bb.1 (%ir-block.1):
40 ; CHECK-NEXT: liveins: $z0
42 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
43 ; CHECK-NEXT: RET_ReallyLR
47 define void @formal_argument_nxv4f32(<vscale x 4 x float> %0) {
48 ; CHECK-LABEL: name: formal_argument_nxv4f32
49 ; CHECK: bb.1 (%ir-block.1):
50 ; CHECK-NEXT: liveins: $z0
52 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
53 ; CHECK-NEXT: RET_ReallyLR
57 define void @formal_argument_nxv2f64(<vscale x 2 x double> %0) {
58 ; CHECK-LABEL: name: formal_argument_nxv2f64
59 ; CHECK: bb.1 (%ir-block.1):
60 ; CHECK-NEXT: liveins: $z0
62 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
63 ; CHECK-NEXT: RET_ReallyLR
67 define void @formal_argument_nxv2p0(<vscale x 2 x ptr> %0) {
68 ; CHECK-LABEL: name: formal_argument_nxv2p0
69 ; CHECK: bb.1 (%ir-block.1):
70 ; CHECK-NEXT: liveins: $z0
72 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $z0
73 ; CHECK-NEXT: RET_ReallyLR
79 define void @formal_argument_nxv32i8(<vscale x 32 x i8> %0) {
80 ; CHECK-LABEL: name: formal_argument_nxv32i8
81 ; CHECK: bb.1 (%ir-block.1):
82 ; CHECK-NEXT: liveins: $z0, $z1
84 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
85 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
86 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>)
87 ; CHECK-NEXT: RET_ReallyLR
91 define void @formal_argument_nxv16i16(<vscale x 16 x i16> %0) {
92 ; CHECK-LABEL: name: formal_argument_nxv16i16
93 ; CHECK: bb.1 (%ir-block.1):
94 ; CHECK-NEXT: liveins: $z0, $z1
96 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
97 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
98 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>)
99 ; CHECK-NEXT: RET_ReallyLR
103 define void @formal_argument_nxv8i32(<vscale x 8 x i32> %0) {
104 ; CHECK-LABEL: name: formal_argument_nxv8i32
105 ; CHECK: bb.1 (%ir-block.1):
106 ; CHECK-NEXT: liveins: $z0, $z1
108 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
109 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
110 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>)
111 ; CHECK-NEXT: RET_ReallyLR
115 define void @formal_argument_nxv4i64(<vscale x 4 x i64> %0) {
116 ; CHECK-LABEL: name: formal_argument_nxv4i64
117 ; CHECK: bb.1 (%ir-block.1):
118 ; CHECK-NEXT: liveins: $z0, $z1
120 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
121 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
122 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
123 ; CHECK-NEXT: RET_ReallyLR
127 define void @formal_argument_nxv8f32(<vscale x 8 x float> %0) {
128 ; CHECK-LABEL: name: formal_argument_nxv8f32
129 ; CHECK: bb.1 (%ir-block.1):
130 ; CHECK-NEXT: liveins: $z0, $z1
132 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
133 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
134 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>)
135 ; CHECK-NEXT: RET_ReallyLR
139 define void @formal_argument_nxv4f64(<vscale x 4 x double> %0) {
140 ; CHECK-LABEL: name: formal_argument_nxv4f64
141 ; CHECK: bb.1 (%ir-block.1):
142 ; CHECK-NEXT: liveins: $z0, $z1
144 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
145 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
146 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
147 ; CHECK-NEXT: RET_ReallyLR
151 define void @formal_argument_nxv4p0(<vscale x 4 x ptr> %0) {
152 ; CHECK-LABEL: name: formal_argument_nxv4p0
153 ; CHECK: bb.1 (%ir-block.1):
154 ; CHECK-NEXT: liveins: $z0, $z1
156 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
157 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
158 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 4 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>)
159 ; CHECK-NEXT: RET_ReallyLR
165 define void @formal_argument_nxv64i8(<vscale x 64 x i8> %0) {
166 ; CHECK-LABEL: name: formal_argument_nxv64i8
167 ; CHECK: bb.1 (%ir-block.1):
168 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
170 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
171 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
172 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2
173 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3
174 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>)
175 ; CHECK-NEXT: RET_ReallyLR
179 define void @formal_argument_nxv32i16(<vscale x 32 x i16> %0) {
180 ; CHECK-LABEL: name: formal_argument_nxv32i16
181 ; CHECK: bb.1 (%ir-block.1):
182 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
184 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
185 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
186 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2
187 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3
188 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>)
189 ; CHECK-NEXT: RET_ReallyLR
193 define void @formal_argument_nxv16i32(<vscale x 16 x i32> %0) {
194 ; CHECK-LABEL: name: formal_argument_nxv16i32
195 ; CHECK: bb.1 (%ir-block.1):
196 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
198 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
199 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
200 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
201 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
202 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>)
203 ; CHECK-NEXT: RET_ReallyLR
207 define void @formal_argument_nxv8i64(<vscale x 8 x i64> %0) {
208 ; CHECK-LABEL: name: formal_argument_nxv8i64
209 ; CHECK: bb.1 (%ir-block.1):
210 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
212 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
213 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
214 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
215 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
216 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
217 ; CHECK-NEXT: RET_ReallyLR
221 define void @formal_argument_nxv16f32(<vscale x 16 x float> %0) {
222 ; CHECK-LABEL: name: formal_argument_nxv16f32
223 ; CHECK: bb.1 (%ir-block.1):
224 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
226 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
227 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
228 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
229 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
230 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>)
231 ; CHECK-NEXT: RET_ReallyLR
235 define void @formal_argument_nxv8f64(<vscale x 8 x double> %0) {
236 ; CHECK-LABEL: name: formal_argument_nxv8f64
237 ; CHECK: bb.1 (%ir-block.1):
238 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
240 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
241 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
242 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
243 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
244 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
245 ; CHECK-NEXT: RET_ReallyLR
249 define void @formal_argument_nxv8p0(<vscale x 8 x ptr> %0) {
250 ; CHECK-LABEL: name: formal_argument_nxv8p0
251 ; CHECK: bb.1 (%ir-block.1):
252 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3
254 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
255 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
256 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
257 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
258 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 8 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>)
259 ; CHECK-NEXT: RET_ReallyLR
265 define void @formal_argument_nxv128i8(<vscale x 128 x i8> %0) {
266 ; CHECK-LABEL: name: formal_argument_nxv128i8
267 ; CHECK: bb.1 (%ir-block.1):
268 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
270 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z0
271 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z1
272 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z2
273 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z3
274 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z4
275 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z5
276 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z6
277 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $z7
278 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 128 x s8>) = G_CONCAT_VECTORS [[COPY]](<vscale x 16 x s8>), [[COPY1]](<vscale x 16 x s8>), [[COPY2]](<vscale x 16 x s8>), [[COPY3]](<vscale x 16 x s8>), [[COPY4]](<vscale x 16 x s8>), [[COPY5]](<vscale x 16 x s8>), [[COPY6]](<vscale x 16 x s8>), [[COPY7]](<vscale x 16 x s8>)
279 ; CHECK-NEXT: RET_ReallyLR
283 define void @formal_argument_nxv64i16(<vscale x 64 x i16> %0) {
284 ; CHECK-LABEL: name: formal_argument_nxv64i16
285 ; CHECK: bb.1 (%ir-block.1):
286 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
288 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z0
289 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z1
290 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z2
291 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z3
292 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z4
293 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z5
294 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z6
295 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $z7
296 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 64 x s16>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s16>), [[COPY1]](<vscale x 8 x s16>), [[COPY2]](<vscale x 8 x s16>), [[COPY3]](<vscale x 8 x s16>), [[COPY4]](<vscale x 8 x s16>), [[COPY5]](<vscale x 8 x s16>), [[COPY6]](<vscale x 8 x s16>), [[COPY7]](<vscale x 8 x s16>)
297 ; CHECK-NEXT: RET_ReallyLR
301 define void @formal_argument_nxv32i32(<vscale x 32 x i32> %0) {
302 ; CHECK-LABEL: name: formal_argument_nxv32i32
303 ; CHECK: bb.1 (%ir-block.1):
304 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
306 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
307 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
308 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
309 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
310 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4
311 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5
312 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6
313 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7
314 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>)
315 ; CHECK-NEXT: RET_ReallyLR
319 define void @formal_argument_nxv16i64(<vscale x 16 x i64> %0) {
320 ; CHECK-LABEL: name: formal_argument_nxv16i64
321 ; CHECK: bb.1 (%ir-block.1):
322 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
324 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
325 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
326 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
327 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
328 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
329 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
330 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
331 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
332 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
333 ; CHECK-NEXT: RET_ReallyLR
337 define void @formal_argument_nxv32f32(<vscale x 32 x float> %0) {
338 ; CHECK-LABEL: name: formal_argument_nxv32f32
339 ; CHECK: bb.1 (%ir-block.1):
340 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
342 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
343 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
344 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z2
345 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z3
346 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z4
347 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z5
348 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z6
349 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z7
350 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 32 x s32>) = G_CONCAT_VECTORS [[COPY]](<vscale x 4 x s32>), [[COPY1]](<vscale x 4 x s32>), [[COPY2]](<vscale x 4 x s32>), [[COPY3]](<vscale x 4 x s32>), [[COPY4]](<vscale x 4 x s32>), [[COPY5]](<vscale x 4 x s32>), [[COPY6]](<vscale x 4 x s32>), [[COPY7]](<vscale x 4 x s32>)
351 ; CHECK-NEXT: RET_ReallyLR
355 define void @formal_argument_nxv16f64(<vscale x 16 x double> %0) {
356 ; CHECK-LABEL: name: formal_argument_nxv16f64
357 ; CHECK: bb.1 (%ir-block.1):
358 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
360 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
361 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
362 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
363 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
364 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
365 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
366 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
367 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
368 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
369 ; CHECK-NEXT: RET_ReallyLR
373 define void @formal_argument_nxv16p0(<vscale x 16 x ptr> %0) {
374 ; CHECK-LABEL: name: formal_argument_nxv16p0
375 ; CHECK: bb.1 (%ir-block.1):
376 ; CHECK-NEXT: liveins: $z0, $z1, $z2, $z3, $z4, $z5, $z6, $z7
378 ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z0
379 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z1
380 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z2
381 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z3
382 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z4
383 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z5
384 ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z6
385 ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $z7
386 ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x p0>) = G_CONCAT_VECTORS [[COPY]](<vscale x 2 x s64>), [[COPY1]](<vscale x 2 x s64>), [[COPY2]](<vscale x 2 x s64>), [[COPY3]](<vscale x 2 x s64>), [[COPY4]](<vscale x 2 x s64>), [[COPY5]](<vscale x 2 x s64>), [[COPY6]](<vscale x 2 x s64>), [[COPY7]](<vscale x 2 x s64>)
387 ; CHECK-NEXT: RET_ReallyLR