1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 ; Check that we correctly scale the split part indirect offsets by VSCALE.
6 define <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> %x, <vscale x 32 x i32> %y) {
7 ; CHECK-LABEL: callee_scalable_vector_split_indirect:
9 ; CHECK-NEXT: csrr a1, vlenb
10 ; CHECK-NEXT: slli a1, a1, 3
11 ; CHECK-NEXT: add a1, a0, a1
12 ; CHECK-NEXT: vl8re32.v v24, (a0)
13 ; CHECK-NEXT: vl8re32.v v0, (a1)
14 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
15 ; CHECK-NEXT: vadd.vv v8, v8, v24
16 ; CHECK-NEXT: vadd.vv v16, v16, v0
18 %a = add <vscale x 32 x i32> %x, %y
19 ret <vscale x 32 x i32> %a
22 ; Call the function above. Check that we set the arguments correctly.
23 define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x i32> %x) {
24 ; RV32-LABEL: caller_scalable_vector_split_indirect:
26 ; RV32-NEXT: addi sp, sp, -144
27 ; RV32-NEXT: .cfi_def_cfa_offset 144
28 ; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
29 ; RV32-NEXT: sw s0, 136(sp) # 4-byte Folded Spill
30 ; RV32-NEXT: .cfi_offset ra, -4
31 ; RV32-NEXT: .cfi_offset s0, -8
32 ; RV32-NEXT: addi s0, sp, 144
33 ; RV32-NEXT: .cfi_def_cfa s0, 0
34 ; RV32-NEXT: csrr a0, vlenb
35 ; RV32-NEXT: slli a0, a0, 4
36 ; RV32-NEXT: sub sp, sp, a0
37 ; RV32-NEXT: andi sp, sp, -128
38 ; RV32-NEXT: addi a0, sp, 128
39 ; RV32-NEXT: vs8r.v v8, (a0)
40 ; RV32-NEXT: csrr a1, vlenb
41 ; RV32-NEXT: slli a1, a1, 3
42 ; RV32-NEXT: add a1, a0, a1
43 ; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
44 ; RV32-NEXT: vmv.v.i v8, 0
45 ; RV32-NEXT: addi a0, sp, 128
46 ; RV32-NEXT: vs8r.v v16, (a1)
47 ; RV32-NEXT: vmv.v.i v16, 0
48 ; RV32-NEXT: call callee_scalable_vector_split_indirect
49 ; RV32-NEXT: addi sp, s0, -144
50 ; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
51 ; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
52 ; RV32-NEXT: addi sp, sp, 144
55 ; RV64-LABEL: caller_scalable_vector_split_indirect:
57 ; RV64-NEXT: addi sp, sp, -144
58 ; RV64-NEXT: .cfi_def_cfa_offset 144
59 ; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
60 ; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
61 ; RV64-NEXT: .cfi_offset ra, -8
62 ; RV64-NEXT: .cfi_offset s0, -16
63 ; RV64-NEXT: addi s0, sp, 144
64 ; RV64-NEXT: .cfi_def_cfa s0, 0
65 ; RV64-NEXT: csrr a0, vlenb
66 ; RV64-NEXT: slli a0, a0, 4
67 ; RV64-NEXT: sub sp, sp, a0
68 ; RV64-NEXT: andi sp, sp, -128
69 ; RV64-NEXT: addi a0, sp, 128
70 ; RV64-NEXT: vs8r.v v8, (a0)
71 ; RV64-NEXT: csrr a1, vlenb
72 ; RV64-NEXT: slli a1, a1, 3
73 ; RV64-NEXT: add a1, a0, a1
74 ; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
75 ; RV64-NEXT: vmv.v.i v8, 0
76 ; RV64-NEXT: addi a0, sp, 128
77 ; RV64-NEXT: vs8r.v v16, (a1)
78 ; RV64-NEXT: vmv.v.i v16, 0
79 ; RV64-NEXT: call callee_scalable_vector_split_indirect
80 ; RV64-NEXT: addi sp, s0, -144
81 ; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
82 ; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
83 ; RV64-NEXT: addi sp, sp, 144
86 %a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
87 ret <vscale x 32 x i32> %a
90 define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
91 ; RV32-LABEL: caller_tuple_return:
93 ; RV32-NEXT: addi sp, sp, -16
94 ; RV32-NEXT: .cfi_def_cfa_offset 16
95 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
96 ; RV32-NEXT: .cfi_offset ra, -4
97 ; RV32-NEXT: call callee_tuple_return
98 ; RV32-NEXT: vmv2r.v v12, v8
99 ; RV32-NEXT: vmv2r.v v8, v10
100 ; RV32-NEXT: vmv2r.v v10, v12
101 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
102 ; RV32-NEXT: addi sp, sp, 16
105 ; RV64-LABEL: caller_tuple_return:
107 ; RV64-NEXT: addi sp, sp, -16
108 ; RV64-NEXT: .cfi_def_cfa_offset 16
109 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
110 ; RV64-NEXT: .cfi_offset ra, -8
111 ; RV64-NEXT: call callee_tuple_return
112 ; RV64-NEXT: vmv2r.v v12, v8
113 ; RV64-NEXT: vmv2r.v v8, v10
114 ; RV64-NEXT: vmv2r.v v10, v12
115 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
116 ; RV64-NEXT: addi sp, sp, 16
118 %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
119 %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
120 %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
121 %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %c, 0
122 %e = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %d, <vscale x 4 x i32> %b, 1
123 ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %e
126 declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
128 define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) {
129 ; RV32-LABEL: caller_tuple_argument:
131 ; RV32-NEXT: addi sp, sp, -16
132 ; RV32-NEXT: .cfi_def_cfa_offset 16
133 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
134 ; RV32-NEXT: .cfi_offset ra, -4
135 ; RV32-NEXT: vmv2r.v v12, v8
136 ; RV32-NEXT: vmv2r.v v8, v10
137 ; RV32-NEXT: vmv2r.v v10, v12
138 ; RV32-NEXT: call callee_tuple_argument
139 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
140 ; RV32-NEXT: addi sp, sp, 16
143 ; RV64-LABEL: caller_tuple_argument:
145 ; RV64-NEXT: addi sp, sp, -16
146 ; RV64-NEXT: .cfi_def_cfa_offset 16
147 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
148 ; RV64-NEXT: .cfi_offset ra, -8
149 ; RV64-NEXT: vmv2r.v v12, v8
150 ; RV64-NEXT: vmv2r.v v8, v10
151 ; RV64-NEXT: vmv2r.v v10, v12
152 ; RV64-NEXT: call callee_tuple_argument
153 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
154 ; RV64-NEXT: addi sp, sp, 16
156 %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
157 %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
158 %c = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %b, 0
159 %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, <vscale x 4 x i32> %a, 1
160 call void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %d)
164 declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
168 define <vscale x 1 x i64> @case1(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1) {
169 ; CHECK-LABEL: case1:
171 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
172 ; CHECK-NEXT: vadd.vv v8, v8, v9
174 %a = add <vscale x 1 x i64> %0, %1
175 ret <vscale x 1 x i64> %a
181 define <vscale x 1 x i64> @case2_1(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
182 ; CHECK-LABEL: case2_1:
184 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
185 ; CHECK-NEXT: vadd.vv v8, v8, v9
187 %a = add <vscale x 1 x i64> %0, %2
188 ret <vscale x 1 x i64> %a
190 define <vscale x 2 x i64> @case2_2(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
191 ; CHECK-LABEL: case2_2:
193 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
194 ; CHECK-NEXT: vadd.vv v8, v10, v10
196 %a = add <vscale x 2 x i64> %1, %1
197 ret <vscale x 2 x i64> %a
201 ; %1 -> {v10-v11, v12-v13}
203 define <vscale x 1 x i64> @case3_1(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
204 ; CHECK-LABEL: case3_1:
206 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
207 ; CHECK-NEXT: vadd.vv v8, v8, v9
209 %add = add <vscale x 1 x i64> %0, %2
210 ret <vscale x 1 x i64> %add
212 define <vscale x 2 x i64> @case3_2(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
213 ; CHECK-LABEL: case3_2:
215 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
216 ; CHECK-NEXT: vadd.vv v8, v10, v12
218 %a = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 0
219 %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 1
220 %add = add <vscale x 2 x i64> %a, %b
221 ret <vscale x 2 x i64> %add
225 ; %1 -> {by-ref, by-ref}
227 define <vscale x 8 x i64> @case4_1(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
228 ; CHECK-LABEL: case4_1:
230 ; CHECK-NEXT: csrr a1, vlenb
231 ; CHECK-NEXT: slli a1, a1, 3
232 ; CHECK-NEXT: add a1, a0, a1
233 ; CHECK-NEXT: vl8re64.v v8, (a1)
234 ; CHECK-NEXT: vl8re64.v v16, (a0)
235 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
236 ; CHECK-NEXT: vadd.vv v8, v16, v8
238 %a = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 0
239 %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 1
240 %add = add <vscale x 8 x i64> %a, %b
241 ret <vscale x 8 x i64> %add
243 define <vscale x 1 x i64> @case4_2(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
244 ; CHECK-LABEL: case4_2:
246 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
247 ; CHECK-NEXT: vadd.vv v8, v8, v9
249 %add = add <vscale x 1 x i64> %0, %2
250 ret <vscale x 1 x i64> %add
253 declare <vscale x 1 x i64> @callee1()
254 declare void @callee2(<vscale x 1 x i64>)
255 declare void @callee3(<vscale x 4 x i32>)
256 define void @caller() {
257 ; RV32-LABEL: caller:
259 ; RV32-NEXT: addi sp, sp, -16
260 ; RV32-NEXT: .cfi_def_cfa_offset 16
261 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
262 ; RV32-NEXT: .cfi_offset ra, -4
263 ; RV32-NEXT: call callee1
264 ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
265 ; RV32-NEXT: vadd.vv v8, v8, v8
266 ; RV32-NEXT: call callee2
267 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
268 ; RV32-NEXT: addi sp, sp, 16
271 ; RV64-LABEL: caller:
273 ; RV64-NEXT: addi sp, sp, -16
274 ; RV64-NEXT: .cfi_def_cfa_offset 16
275 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
276 ; RV64-NEXT: .cfi_offset ra, -8
277 ; RV64-NEXT: call callee1
278 ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
279 ; RV64-NEXT: vadd.vv v8, v8, v8
280 ; RV64-NEXT: call callee2
281 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
282 ; RV64-NEXT: addi sp, sp, 16
284 %a = call <vscale x 1 x i64> @callee1()
285 %add = add <vscale x 1 x i64> %a, %a
286 call void @callee2(<vscale x 1 x i64> %add)
290 declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple()
291 define void @caller_tuple() {
292 ; RV32-LABEL: caller_tuple:
294 ; RV32-NEXT: addi sp, sp, -16
295 ; RV32-NEXT: .cfi_def_cfa_offset 16
296 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
297 ; RV32-NEXT: .cfi_offset ra, -4
298 ; RV32-NEXT: call callee_tuple
299 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
300 ; RV32-NEXT: vadd.vv v8, v8, v10
301 ; RV32-NEXT: call callee3
302 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
303 ; RV32-NEXT: addi sp, sp, 16
306 ; RV64-LABEL: caller_tuple:
308 ; RV64-NEXT: addi sp, sp, -16
309 ; RV64-NEXT: .cfi_def_cfa_offset 16
310 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
311 ; RV64-NEXT: .cfi_offset ra, -8
312 ; RV64-NEXT: call callee_tuple
313 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
314 ; RV64-NEXT: vadd.vv v8, v8, v10
315 ; RV64-NEXT: call callee3
316 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
317 ; RV64-NEXT: addi sp, sp, 16
319 %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple()
320 %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
321 %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
322 %add = add <vscale x 4 x i32> %b, %c
323 call void @callee3(<vscale x 4 x i32> %add)
327 declare {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} @callee_nested()
328 define void @caller_nested() {
329 ; RV32-LABEL: caller_nested:
331 ; RV32-NEXT: addi sp, sp, -16
332 ; RV32-NEXT: .cfi_def_cfa_offset 16
333 ; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
334 ; RV32-NEXT: .cfi_offset ra, -4
335 ; RV32-NEXT: call callee_nested
336 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
337 ; RV32-NEXT: vadd.vv v8, v8, v10
338 ; RV32-NEXT: vadd.vv v8, v8, v12
339 ; RV32-NEXT: call callee3
340 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
341 ; RV32-NEXT: addi sp, sp, 16
344 ; RV64-LABEL: caller_nested:
346 ; RV64-NEXT: addi sp, sp, -16
347 ; RV64-NEXT: .cfi_def_cfa_offset 16
348 ; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
349 ; RV64-NEXT: .cfi_offset ra, -8
350 ; RV64-NEXT: call callee_nested
351 ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
352 ; RV64-NEXT: vadd.vv v8, v8, v10
353 ; RV64-NEXT: vadd.vv v8, v8, v12
354 ; RV64-NEXT: call callee3
355 ; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
356 ; RV64-NEXT: addi sp, sp, 16
358 %a = call {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} @callee_nested()
359 %b = extractvalue {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} %a, 0
360 %c = extractvalue {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} %a, 1
361 %c0 = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, 0
362 %c1 = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, 1
363 %add0 = add <vscale x 4 x i32> %b, %c0
364 %add1 = add <vscale x 4 x i32> %add0, %c1
365 call void @callee3(<vscale x 4 x i32> %add1)