1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
5 define fastcc <4 x i8> @ret_v4i8(ptr %p) {
6 ; CHECK-LABEL: ret_v4i8:
8 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
9 ; CHECK-NEXT: vle8.v v8, (a0)
11 %v = load <4 x i8>, ptr %p
15 define fastcc <4 x i32> @ret_v4i32(ptr %p) {
16 ; CHECK-LABEL: ret_v4i32:
18 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
19 ; CHECK-NEXT: vle32.v v8, (a0)
21 %v = load <4 x i32>, ptr %p
25 define fastcc <8 x i32> @ret_v8i32(ptr %p) {
26 ; CHECK-LABEL: ret_v8i32:
28 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
29 ; CHECK-NEXT: vle32.v v8, (a0)
31 %v = load <8 x i32>, ptr %p
35 define fastcc <16 x i64> @ret_v16i64(ptr %p) {
36 ; LMULMAX8-LABEL: ret_v16i64:
38 ; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
39 ; LMULMAX8-NEXT: vle64.v v8, (a0)
42 ; LMULMAX4-LABEL: ret_v16i64:
44 ; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
45 ; LMULMAX4-NEXT: vle64.v v8, (a0)
46 ; LMULMAX4-NEXT: addi a0, a0, 64
47 ; LMULMAX4-NEXT: vle64.v v12, (a0)
49 %v = load <16 x i64>, ptr %p
53 define fastcc <8 x i1> @ret_mask_v8i1(ptr %p) {
54 ; CHECK-LABEL: ret_mask_v8i1:
56 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
57 ; CHECK-NEXT: vlm.v v0, (a0)
59 %v = load <8 x i1>, ptr %p
63 define fastcc <32 x i1> @ret_mask_v32i1(ptr %p) {
64 ; CHECK-LABEL: ret_mask_v32i1:
66 ; CHECK-NEXT: li a1, 32
67 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
68 ; CHECK-NEXT: vlm.v v0, (a0)
70 %v = load <32 x i1>, ptr %p
74 ; Return the vector via registers v8-v23
75 define fastcc <64 x i32> @ret_split_v64i32(ptr %x) {
76 ; LMULMAX8-LABEL: ret_split_v64i32:
78 ; LMULMAX8-NEXT: li a1, 32
79 ; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
80 ; LMULMAX8-NEXT: vle32.v v8, (a0)
81 ; LMULMAX8-NEXT: addi a0, a0, 128
82 ; LMULMAX8-NEXT: vle32.v v16, (a0)
85 ; LMULMAX4-LABEL: ret_split_v64i32:
87 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
88 ; LMULMAX4-NEXT: vle32.v v8, (a0)
89 ; LMULMAX4-NEXT: addi a1, a0, 64
90 ; LMULMAX4-NEXT: vle32.v v12, (a1)
91 ; LMULMAX4-NEXT: addi a1, a0, 128
92 ; LMULMAX4-NEXT: vle32.v v16, (a1)
93 ; LMULMAX4-NEXT: addi a0, a0, 192
94 ; LMULMAX4-NEXT: vle32.v v20, (a0)
96 %v = load <64 x i32>, ptr %x
100 ; Return the vector fully via the stack
101 define fastcc <128 x i32> @ret_split_v128i32(ptr %x) {
102 ; LMULMAX8-LABEL: ret_split_v128i32:
104 ; LMULMAX8-NEXT: addi a2, a1, 128
105 ; LMULMAX8-NEXT: li a3, 32
106 ; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma
107 ; LMULMAX8-NEXT: vle32.v v8, (a2)
108 ; LMULMAX8-NEXT: addi a2, a1, 256
109 ; LMULMAX8-NEXT: vle32.v v16, (a1)
110 ; LMULMAX8-NEXT: addi a1, a1, 384
111 ; LMULMAX8-NEXT: vle32.v v24, (a1)
112 ; LMULMAX8-NEXT: vle32.v v0, (a2)
113 ; LMULMAX8-NEXT: vse32.v v16, (a0)
114 ; LMULMAX8-NEXT: addi a1, a0, 384
115 ; LMULMAX8-NEXT: vse32.v v24, (a1)
116 ; LMULMAX8-NEXT: addi a1, a0, 256
117 ; LMULMAX8-NEXT: vse32.v v0, (a1)
118 ; LMULMAX8-NEXT: addi a0, a0, 128
119 ; LMULMAX8-NEXT: vse32.v v8, (a0)
122 ; LMULMAX4-LABEL: ret_split_v128i32:
124 ; LMULMAX4-NEXT: addi a2, a1, 64
125 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
126 ; LMULMAX4-NEXT: vle32.v v8, (a2)
127 ; LMULMAX4-NEXT: addi a2, a1, 128
128 ; LMULMAX4-NEXT: vle32.v v12, (a2)
129 ; LMULMAX4-NEXT: addi a2, a1, 192
130 ; LMULMAX4-NEXT: vle32.v v16, (a2)
131 ; LMULMAX4-NEXT: addi a2, a1, 256
132 ; LMULMAX4-NEXT: vle32.v v20, (a2)
133 ; LMULMAX4-NEXT: addi a2, a1, 320
134 ; LMULMAX4-NEXT: vle32.v v24, (a2)
135 ; LMULMAX4-NEXT: addi a2, a1, 384
136 ; LMULMAX4-NEXT: vle32.v v28, (a1)
137 ; LMULMAX4-NEXT: addi a1, a1, 448
138 ; LMULMAX4-NEXT: vle32.v v0, (a1)
139 ; LMULMAX4-NEXT: vle32.v v4, (a2)
140 ; LMULMAX4-NEXT: vse32.v v28, (a0)
141 ; LMULMAX4-NEXT: addi a1, a0, 448
142 ; LMULMAX4-NEXT: vse32.v v0, (a1)
143 ; LMULMAX4-NEXT: addi a1, a0, 384
144 ; LMULMAX4-NEXT: vse32.v v4, (a1)
145 ; LMULMAX4-NEXT: addi a1, a0, 320
146 ; LMULMAX4-NEXT: vse32.v v24, (a1)
147 ; LMULMAX4-NEXT: addi a1, a0, 256
148 ; LMULMAX4-NEXT: vse32.v v20, (a1)
149 ; LMULMAX4-NEXT: addi a1, a0, 192
150 ; LMULMAX4-NEXT: vse32.v v16, (a1)
151 ; LMULMAX4-NEXT: addi a1, a0, 128
152 ; LMULMAX4-NEXT: vse32.v v12, (a1)
153 ; LMULMAX4-NEXT: addi a0, a0, 64
154 ; LMULMAX4-NEXT: vse32.v v8, (a0)
156 %v = load <128 x i32>, ptr %x
160 define fastcc <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) {
161 ; CHECK-LABEL: ret_v8i8_param_v4i8:
163 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
164 ; CHECK-NEXT: vadd.vi v8, v8, 2
166 %r = add <4 x i8> %v, <i8 2, i8 2, i8 2, i8 2>
170 define fastcc <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) {
171 ; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8:
173 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
174 ; CHECK-NEXT: vadd.vv v8, v8, v9
176 %r = add <4 x i8> %v, %w
180 define fastcc <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) {
181 ; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64:
183 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
184 ; CHECK-NEXT: vadd.vv v8, v8, v10
186 %r = add <4 x i64> %v, %w
190 define fastcc <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
191 ; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1:
193 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
194 ; CHECK-NEXT: vmxor.mm v0, v0, v8
196 %r = xor <8 x i1> %v, %w
200 define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w) {
201 ; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1:
203 ; CHECK-NEXT: li a0, 32
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vmand.mm v0, v0, v8
207 %r = and <32 x i1> %v, %w
211 define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
212 ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
214 ; LMULMAX8-NEXT: li a2, 32
215 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
216 ; LMULMAX8-NEXT: vle32.v v24, (a0)
217 ; LMULMAX8-NEXT: vadd.vv v8, v8, v16
218 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24
219 ; LMULMAX8-NEXT: vadd.vx v8, v8, a1
222 ; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
224 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
225 ; LMULMAX4-NEXT: addi a1, a0, 64
226 ; LMULMAX4-NEXT: vle32.v v24, (a1)
227 ; LMULMAX4-NEXT: vle32.v v28, (a0)
228 ; LMULMAX4-NEXT: vadd.vv v8, v8, v16
229 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20
230 ; LMULMAX4-NEXT: vadd.vv v12, v12, v24
231 ; LMULMAX4-NEXT: vadd.vv v8, v8, v28
232 ; LMULMAX4-NEXT: vadd.vx v8, v8, a2
233 ; LMULMAX4-NEXT: vadd.vx v12, v12, a2
235 %r = add <32 x i32> %x, %y
236 %s = add <32 x i32> %r, %z
237 %head = insertelement <32 x i32> poison, i32 %w, i32 0
238 %splat = shufflevector <32 x i32> %head, <32 x i32> poison, <32 x i32> zeroinitializer
239 %t = add <32 x i32> %s, %splat
243 declare <32 x i32> @ext2(<32 x i32>, <32 x i32>, i32, i32)
244 declare <32 x i32> @ext3(<32 x i32>, <32 x i32>, <32 x i32>, i32, i32)
246 define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, i32 %w) {
247 ; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
249 ; LMULMAX8-NEXT: addi sp, sp, -16
250 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 16
251 ; LMULMAX8-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
252 ; LMULMAX8-NEXT: .cfi_offset ra, -8
253 ; LMULMAX8-NEXT: vmv8r.v v24, v8
254 ; LMULMAX8-NEXT: li a1, 2
255 ; LMULMAX8-NEXT: vmv8r.v v8, v16
256 ; LMULMAX8-NEXT: vmv8r.v v16, v24
257 ; LMULMAX8-NEXT: call ext2@plt
258 ; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
259 ; LMULMAX8-NEXT: addi sp, sp, 16
262 ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_i32:
264 ; LMULMAX4-NEXT: addi sp, sp, -16
265 ; LMULMAX4-NEXT: .cfi_def_cfa_offset 16
266 ; LMULMAX4-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
267 ; LMULMAX4-NEXT: .cfi_offset ra, -8
268 ; LMULMAX4-NEXT: vmv4r.v v24, v12
269 ; LMULMAX4-NEXT: vmv4r.v v28, v8
270 ; LMULMAX4-NEXT: li a1, 2
271 ; LMULMAX4-NEXT: vmv4r.v v8, v16
272 ; LMULMAX4-NEXT: vmv4r.v v12, v20
273 ; LMULMAX4-NEXT: vmv4r.v v16, v28
274 ; LMULMAX4-NEXT: vmv4r.v v20, v24
275 ; LMULMAX4-NEXT: call ext2@plt
276 ; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
277 ; LMULMAX4-NEXT: addi sp, sp, 16
279 %t = call fastcc <32 x i32> @ext2(<32 x i32> %y, <32 x i32> %x, i32 %w, i32 2)
283 define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %w) {
284 ; LMULMAX8-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
286 ; LMULMAX8-NEXT: addi sp, sp, -256
287 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
288 ; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
289 ; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
290 ; LMULMAX8-NEXT: .cfi_offset ra, -8
291 ; LMULMAX8-NEXT: .cfi_offset s0, -16
292 ; LMULMAX8-NEXT: addi s0, sp, 256
293 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
294 ; LMULMAX8-NEXT: andi sp, sp, -128
295 ; LMULMAX8-NEXT: li a2, 32
296 ; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
297 ; LMULMAX8-NEXT: vle32.v v24, (a0)
298 ; LMULMAX8-NEXT: mv a3, sp
299 ; LMULMAX8-NEXT: mv a0, sp
300 ; LMULMAX8-NEXT: li a2, 42
301 ; LMULMAX8-NEXT: vse32.v v8, (a3)
302 ; LMULMAX8-NEXT: vmv.v.v v8, v24
303 ; LMULMAX8-NEXT: call ext3@plt
304 ; LMULMAX8-NEXT: addi sp, s0, -256
305 ; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
306 ; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
307 ; LMULMAX8-NEXT: addi sp, sp, 256
310 ; LMULMAX4-LABEL: ret_v32i32_call_v32i32_v32i32_v32i32_i32:
312 ; LMULMAX4-NEXT: addi sp, sp, -256
313 ; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
314 ; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
315 ; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
316 ; LMULMAX4-NEXT: .cfi_offset ra, -8
317 ; LMULMAX4-NEXT: .cfi_offset s0, -16
318 ; LMULMAX4-NEXT: addi s0, sp, 256
319 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
320 ; LMULMAX4-NEXT: andi sp, sp, -128
321 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
322 ; LMULMAX4-NEXT: vle32.v v24, (a0)
323 ; LMULMAX4-NEXT: addi a0, a0, 64
324 ; LMULMAX4-NEXT: vle32.v v28, (a0)
325 ; LMULMAX4-NEXT: addi a0, sp, 64
326 ; LMULMAX4-NEXT: vse32.v v12, (a0)
327 ; LMULMAX4-NEXT: mv a1, sp
328 ; LMULMAX4-NEXT: mv a0, sp
329 ; LMULMAX4-NEXT: li a3, 42
330 ; LMULMAX4-NEXT: vse32.v v8, (a1)
331 ; LMULMAX4-NEXT: vmv.v.v v8, v24
332 ; LMULMAX4-NEXT: vmv.v.v v12, v28
333 ; LMULMAX4-NEXT: call ext3@plt
334 ; LMULMAX4-NEXT: addi sp, s0, -256
335 ; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
336 ; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
337 ; LMULMAX4-NEXT: addi sp, sp, 256
339 %t = call fastcc <32 x i32> @ext3(<32 x i32> %z, <32 x i32> %y, <32 x i32> %x, i32 %w, i32 42)
343 ; A test case where the normal calling convention would pass directly via the
344 ; stack, but with fastcc can pass indirectly with the extra GPR registers
346 define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %8) {
347 ; LMULMAX8-LABEL: vector_arg_indirect_stack:
349 ; LMULMAX8-NEXT: li a0, 32
350 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
351 ; LMULMAX8-NEXT: vle32.v v16, (t2)
352 ; LMULMAX8-NEXT: vadd.vv v8, v8, v16
355 ; LMULMAX4-LABEL: vector_arg_indirect_stack:
357 ; LMULMAX4-NEXT: addi a0, t2, 64
358 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
359 ; LMULMAX4-NEXT: vle32.v v16, (t2)
360 ; LMULMAX4-NEXT: vle32.v v20, (a0)
361 ; LMULMAX4-NEXT: vadd.vv v8, v8, v16
362 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20
364 %s = add <32 x i32> %x, %z
368 ; Calling the function above. Ensure we pass the arguments correctly.
369 define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
370 ; LMULMAX8-LABEL: pass_vector_arg_indirect_stack:
372 ; LMULMAX8-NEXT: addi sp, sp, -256
373 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 256
374 ; LMULMAX8-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
375 ; LMULMAX8-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
376 ; LMULMAX8-NEXT: .cfi_offset ra, -8
377 ; LMULMAX8-NEXT: .cfi_offset s0, -16
378 ; LMULMAX8-NEXT: addi s0, sp, 256
379 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
380 ; LMULMAX8-NEXT: andi sp, sp, -128
381 ; LMULMAX8-NEXT: li a0, 32
382 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
383 ; LMULMAX8-NEXT: vmv.v.i v8, 0
384 ; LMULMAX8-NEXT: mv a0, sp
385 ; LMULMAX8-NEXT: li a1, 1
386 ; LMULMAX8-NEXT: li a2, 2
387 ; LMULMAX8-NEXT: li a3, 3
388 ; LMULMAX8-NEXT: li a4, 4
389 ; LMULMAX8-NEXT: li a5, 5
390 ; LMULMAX8-NEXT: li a6, 6
391 ; LMULMAX8-NEXT: li a7, 7
392 ; LMULMAX8-NEXT: mv t2, sp
393 ; LMULMAX8-NEXT: li t3, 8
394 ; LMULMAX8-NEXT: vse32.v v8, (a0)
395 ; LMULMAX8-NEXT: li a0, 0
396 ; LMULMAX8-NEXT: vmv.v.i v16, 0
397 ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt
398 ; LMULMAX8-NEXT: addi sp, s0, -256
399 ; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
400 ; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
401 ; LMULMAX8-NEXT: addi sp, sp, 256
404 ; LMULMAX4-LABEL: pass_vector_arg_indirect_stack:
406 ; LMULMAX4-NEXT: addi sp, sp, -256
407 ; LMULMAX4-NEXT: .cfi_def_cfa_offset 256
408 ; LMULMAX4-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
409 ; LMULMAX4-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
410 ; LMULMAX4-NEXT: .cfi_offset ra, -8
411 ; LMULMAX4-NEXT: .cfi_offset s0, -16
412 ; LMULMAX4-NEXT: addi s0, sp, 256
413 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
414 ; LMULMAX4-NEXT: andi sp, sp, -128
415 ; LMULMAX4-NEXT: addi a0, sp, 64
416 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
417 ; LMULMAX4-NEXT: vmv.v.i v8, 0
418 ; LMULMAX4-NEXT: vse32.v v8, (a0)
419 ; LMULMAX4-NEXT: mv a0, sp
420 ; LMULMAX4-NEXT: li a1, 1
421 ; LMULMAX4-NEXT: li a2, 2
422 ; LMULMAX4-NEXT: li a3, 3
423 ; LMULMAX4-NEXT: li a4, 4
424 ; LMULMAX4-NEXT: li a5, 5
425 ; LMULMAX4-NEXT: li a6, 6
426 ; LMULMAX4-NEXT: li a7, 7
427 ; LMULMAX4-NEXT: mv t2, sp
428 ; LMULMAX4-NEXT: li t4, 8
429 ; LMULMAX4-NEXT: vse32.v v8, (a0)
430 ; LMULMAX4-NEXT: li a0, 0
431 ; LMULMAX4-NEXT: vmv.v.i v12, 0
432 ; LMULMAX4-NEXT: vmv.v.i v16, 0
433 ; LMULMAX4-NEXT: vmv.v.i v20, 0
434 ; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt
435 ; LMULMAX4-NEXT: addi sp, s0, -256
436 ; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
437 ; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
438 ; LMULMAX4-NEXT: addi sp, sp, 256
440 %s = call fastcc <32 x i32> @vector_arg_indirect_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 8)
444 ; A pathological test case where even with fastcc we must use the stack for arguments %13 and %z
445 define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, i32 %last) {
446 ; LMULMAX8-LABEL: vector_arg_direct_stack:
448 ; LMULMAX8-NEXT: li a0, 32
449 ; LMULMAX8-NEXT: addi a1, sp, 8
450 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
451 ; LMULMAX8-NEXT: vle32.v v24, (a1)
452 ; LMULMAX8-NEXT: vadd.vv v8, v8, v16
453 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24
456 ; LMULMAX4-LABEL: vector_arg_direct_stack:
458 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
459 ; LMULMAX4-NEXT: addi a0, sp, 8
460 ; LMULMAX4-NEXT: vle32.v v24, (a0)
461 ; LMULMAX4-NEXT: addi a0, sp, 72
462 ; LMULMAX4-NEXT: vle32.v v28, (a0)
463 ; LMULMAX4-NEXT: vadd.vv v12, v12, v20
464 ; LMULMAX4-NEXT: vadd.vv v8, v8, v16
465 ; LMULMAX4-NEXT: vadd.vv v8, v8, v24
466 ; LMULMAX4-NEXT: vadd.vv v12, v12, v28
468 %s = add <32 x i32> %x, %y
469 %t = add <32 x i32> %s, %z
473 ; Calling the function above. Ensure we pass the arguments correctly.
474 define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32> %y, <32 x i32> %z) {
475 ; LMULMAX8-LABEL: pass_vector_arg_direct_stack:
477 ; LMULMAX8-NEXT: addi sp, sp, -160
478 ; LMULMAX8-NEXT: .cfi_def_cfa_offset 160
479 ; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
480 ; LMULMAX8-NEXT: .cfi_offset ra, -8
481 ; LMULMAX8-NEXT: li a0, 32
482 ; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
483 ; LMULMAX8-NEXT: vmv.v.i v8, 0
484 ; LMULMAX8-NEXT: addi a0, sp, 8
485 ; LMULMAX8-NEXT: vse32.v v8, (a0)
486 ; LMULMAX8-NEXT: li a0, 1
487 ; LMULMAX8-NEXT: sd a0, 136(sp)
488 ; LMULMAX8-NEXT: li a0, 13
489 ; LMULMAX8-NEXT: li a1, 1
490 ; LMULMAX8-NEXT: li a2, 2
491 ; LMULMAX8-NEXT: li a3, 3
492 ; LMULMAX8-NEXT: li a4, 4
493 ; LMULMAX8-NEXT: li a5, 5
494 ; LMULMAX8-NEXT: li a6, 6
495 ; LMULMAX8-NEXT: li a7, 7
496 ; LMULMAX8-NEXT: li t2, 8
497 ; LMULMAX8-NEXT: li t3, 9
498 ; LMULMAX8-NEXT: li t4, 10
499 ; LMULMAX8-NEXT: li t5, 11
500 ; LMULMAX8-NEXT: li t6, 12
501 ; LMULMAX8-NEXT: sd a0, 0(sp)
502 ; LMULMAX8-NEXT: li a0, 0
503 ; LMULMAX8-NEXT: vmv.v.i v16, 0
504 ; LMULMAX8-NEXT: call vector_arg_direct_stack@plt
505 ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
506 ; LMULMAX8-NEXT: addi sp, sp, 160
509 ; LMULMAX4-LABEL: pass_vector_arg_direct_stack:
511 ; LMULMAX4-NEXT: addi sp, sp, -160
512 ; LMULMAX4-NEXT: .cfi_def_cfa_offset 160
513 ; LMULMAX4-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
514 ; LMULMAX4-NEXT: .cfi_offset ra, -8
515 ; LMULMAX4-NEXT: li a0, 1
516 ; LMULMAX4-NEXT: sd a0, 136(sp)
517 ; LMULMAX4-NEXT: li a0, 13
518 ; LMULMAX4-NEXT: sd a0, 0(sp)
519 ; LMULMAX4-NEXT: addi a0, sp, 72
520 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
521 ; LMULMAX4-NEXT: vmv.v.i v8, 0
522 ; LMULMAX4-NEXT: vse32.v v8, (a0)
523 ; LMULMAX4-NEXT: addi a0, sp, 8
524 ; LMULMAX4-NEXT: li a1, 1
525 ; LMULMAX4-NEXT: li a2, 2
526 ; LMULMAX4-NEXT: li a3, 3
527 ; LMULMAX4-NEXT: li a4, 4
528 ; LMULMAX4-NEXT: li a5, 5
529 ; LMULMAX4-NEXT: li a6, 6
530 ; LMULMAX4-NEXT: li a7, 7
531 ; LMULMAX4-NEXT: li t2, 8
532 ; LMULMAX4-NEXT: li t3, 9
533 ; LMULMAX4-NEXT: li t4, 10
534 ; LMULMAX4-NEXT: li t5, 11
535 ; LMULMAX4-NEXT: li t6, 12
536 ; LMULMAX4-NEXT: vse32.v v8, (a0)
537 ; LMULMAX4-NEXT: li a0, 0
538 ; LMULMAX4-NEXT: vmv.v.i v12, 0
539 ; LMULMAX4-NEXT: vmv.v.i v16, 0
540 ; LMULMAX4-NEXT: vmv.v.i v20, 0
541 ; LMULMAX4-NEXT: call vector_arg_direct_stack@plt
542 ; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
543 ; LMULMAX4-NEXT: addi sp, sp, 160
545 %s = call fastcc <32 x i32> @vector_arg_direct_stack(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, <32 x i32> zeroinitializer, i32 1)
549 ; A pathological test case where even with fastcc we must use the stack for
550 ; mask argument %m2. %m1 is passed via v0.
551 define fastcc <4 x i1> @vector_mask_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, <32 x i32> %x, <32 x i32> %y, <32 x i32> %z, <4 x i1> %m1, <4 x i1> %m2, i32 %last) {
552 ; CHECK-LABEL: vector_mask_arg_direct_stack:
554 ; CHECK-NEXT: addi a0, sp, 136
555 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
556 ; CHECK-NEXT: vlm.v v8, (a0)
557 ; CHECK-NEXT: vmxor.mm v0, v0, v8
559 %r = xor <4 x i1> %m1, %m2