1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v < %s \
3 ; RUN: | FileCheck %s -check-prefix=RV64IV
5 define void @local_var_mf8() {
6 ; RV64IV-LABEL: local_var_mf8:
8 ; RV64IV-NEXT: addi sp, sp, -16
9 ; RV64IV-NEXT: .cfi_def_cfa_offset 16
10 ; RV64IV-NEXT: csrr a0, vlenb
11 ; RV64IV-NEXT: slli a0, a0, 1
12 ; RV64IV-NEXT: sub sp, sp, a0
13 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
14 ; RV64IV-NEXT: csrr a0, vlenb
15 ; RV64IV-NEXT: add a0, sp, a0
16 ; RV64IV-NEXT: addi a0, a0, 16
17 ; RV64IV-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
18 ; RV64IV-NEXT: vle8.v v8, (a0)
19 ; RV64IV-NEXT: addi a0, sp, 16
20 ; RV64IV-NEXT: vle8.v v8, (a0)
21 ; RV64IV-NEXT: csrr a0, vlenb
22 ; RV64IV-NEXT: slli a0, a0, 1
23 ; RV64IV-NEXT: add sp, sp, a0
24 ; RV64IV-NEXT: addi sp, sp, 16
26 %local0 = alloca <vscale x 1 x i8>
27 %local1 = alloca <vscale x 1 x i8>
28 load volatile <vscale x 1 x i8>, ptr %local0
29 load volatile <vscale x 1 x i8>, ptr %local1
33 define void @local_var_m1() {
34 ; RV64IV-LABEL: local_var_m1:
36 ; RV64IV-NEXT: addi sp, sp, -16
37 ; RV64IV-NEXT: .cfi_def_cfa_offset 16
38 ; RV64IV-NEXT: csrr a0, vlenb
39 ; RV64IV-NEXT: slli a0, a0, 1
40 ; RV64IV-NEXT: sub sp, sp, a0
41 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
42 ; RV64IV-NEXT: csrr a0, vlenb
43 ; RV64IV-NEXT: add a0, sp, a0
44 ; RV64IV-NEXT: addi a0, a0, 16
45 ; RV64IV-NEXT: vl1r.v v8, (a0)
46 ; RV64IV-NEXT: addi a0, sp, 16
47 ; RV64IV-NEXT: vl1r.v v8, (a0)
48 ; RV64IV-NEXT: csrr a0, vlenb
49 ; RV64IV-NEXT: slli a0, a0, 1
50 ; RV64IV-NEXT: add sp, sp, a0
51 ; RV64IV-NEXT: addi sp, sp, 16
53 %local0 = alloca <vscale x 8 x i8>
54 %local1 = alloca <vscale x 8 x i8>
55 load volatile <vscale x 8 x i8>, ptr %local0
56 load volatile <vscale x 8 x i8>, ptr %local1
60 define void @local_var_m2() {
61 ; RV64IV-LABEL: local_var_m2:
63 ; RV64IV-NEXT: addi sp, sp, -16
64 ; RV64IV-NEXT: .cfi_def_cfa_offset 16
65 ; RV64IV-NEXT: csrr a0, vlenb
66 ; RV64IV-NEXT: slli a0, a0, 2
67 ; RV64IV-NEXT: sub sp, sp, a0
68 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
69 ; RV64IV-NEXT: csrr a0, vlenb
70 ; RV64IV-NEXT: slli a0, a0, 1
71 ; RV64IV-NEXT: add a0, sp, a0
72 ; RV64IV-NEXT: addi a0, a0, 16
73 ; RV64IV-NEXT: vl2r.v v8, (a0)
74 ; RV64IV-NEXT: addi a0, sp, 16
75 ; RV64IV-NEXT: vl2r.v v8, (a0)
76 ; RV64IV-NEXT: csrr a0, vlenb
77 ; RV64IV-NEXT: slli a0, a0, 2
78 ; RV64IV-NEXT: add sp, sp, a0
79 ; RV64IV-NEXT: addi sp, sp, 16
81 %local0 = alloca <vscale x 16 x i8>
82 %local1 = alloca <vscale x 16 x i8>
83 load volatile <vscale x 16 x i8>, ptr %local0
84 load volatile <vscale x 16 x i8>, ptr %local1
88 define void @local_var_m4() {
89 ; RV64IV-LABEL: local_var_m4:
91 ; RV64IV-NEXT: addi sp, sp, -48
92 ; RV64IV-NEXT: .cfi_def_cfa_offset 48
93 ; RV64IV-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
94 ; RV64IV-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
95 ; RV64IV-NEXT: .cfi_offset ra, -8
96 ; RV64IV-NEXT: .cfi_offset s0, -16
97 ; RV64IV-NEXT: addi s0, sp, 48
98 ; RV64IV-NEXT: .cfi_def_cfa s0, 0
99 ; RV64IV-NEXT: csrr a0, vlenb
100 ; RV64IV-NEXT: slli a0, a0, 3
101 ; RV64IV-NEXT: sub sp, sp, a0
102 ; RV64IV-NEXT: andi sp, sp, -32
103 ; RV64IV-NEXT: csrr a0, vlenb
104 ; RV64IV-NEXT: slli a0, a0, 2
105 ; RV64IV-NEXT: add a0, sp, a0
106 ; RV64IV-NEXT: addi a0, a0, 32
107 ; RV64IV-NEXT: vl4r.v v8, (a0)
108 ; RV64IV-NEXT: addi a0, sp, 32
109 ; RV64IV-NEXT: vl4r.v v8, (a0)
110 ; RV64IV-NEXT: addi sp, s0, -48
111 ; RV64IV-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
112 ; RV64IV-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
113 ; RV64IV-NEXT: addi sp, sp, 48
115 %local0 = alloca <vscale x 32 x i8>
116 %local1 = alloca <vscale x 32 x i8>
117 load volatile <vscale x 32 x i8>, ptr %local0
118 load volatile <vscale x 32 x i8>, ptr %local1
122 define void @local_var_m8() {
123 ; RV64IV-LABEL: local_var_m8:
125 ; RV64IV-NEXT: addi sp, sp, -80
126 ; RV64IV-NEXT: .cfi_def_cfa_offset 80
127 ; RV64IV-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
128 ; RV64IV-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
129 ; RV64IV-NEXT: .cfi_offset ra, -8
130 ; RV64IV-NEXT: .cfi_offset s0, -16
131 ; RV64IV-NEXT: addi s0, sp, 80
132 ; RV64IV-NEXT: .cfi_def_cfa s0, 0
133 ; RV64IV-NEXT: csrr a0, vlenb
134 ; RV64IV-NEXT: slli a0, a0, 4
135 ; RV64IV-NEXT: sub sp, sp, a0
136 ; RV64IV-NEXT: andi sp, sp, -64
137 ; RV64IV-NEXT: csrr a0, vlenb
138 ; RV64IV-NEXT: slli a0, a0, 3
139 ; RV64IV-NEXT: add a0, sp, a0
140 ; RV64IV-NEXT: addi a0, a0, 64
141 ; RV64IV-NEXT: vl8r.v v8, (a0)
142 ; RV64IV-NEXT: addi a0, sp, 64
143 ; RV64IV-NEXT: vl8r.v v8, (a0)
144 ; RV64IV-NEXT: addi sp, s0, -80
145 ; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
146 ; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
147 ; RV64IV-NEXT: addi sp, sp, 80
149 %local0 = alloca <vscale x 64 x i8>
150 %local1 = alloca <vscale x 64 x i8>
151 load volatile <vscale x 64 x i8>, ptr %local0
152 load volatile <vscale x 64 x i8>, ptr %local1
156 define void @local_var_m2_mix_local_scalar() {
157 ; RV64IV-LABEL: local_var_m2_mix_local_scalar:
159 ; RV64IV-NEXT: addi sp, sp, -16
160 ; RV64IV-NEXT: .cfi_def_cfa_offset 16
161 ; RV64IV-NEXT: csrr a0, vlenb
162 ; RV64IV-NEXT: slli a0, a0, 2
163 ; RV64IV-NEXT: sub sp, sp, a0
164 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
165 ; RV64IV-NEXT: lw zero, 12(sp)
166 ; RV64IV-NEXT: csrr a0, vlenb
167 ; RV64IV-NEXT: slli a0, a0, 1
168 ; RV64IV-NEXT: add a0, sp, a0
169 ; RV64IV-NEXT: addi a0, a0, 16
170 ; RV64IV-NEXT: vl2r.v v8, (a0)
171 ; RV64IV-NEXT: addi a0, sp, 16
172 ; RV64IV-NEXT: vl2r.v v8, (a0)
173 ; RV64IV-NEXT: lw zero, 8(sp)
174 ; RV64IV-NEXT: csrr a0, vlenb
175 ; RV64IV-NEXT: slli a0, a0, 2
176 ; RV64IV-NEXT: add sp, sp, a0
177 ; RV64IV-NEXT: addi sp, sp, 16
179 %local_scalar0 = alloca i32
180 %local0 = alloca <vscale x 16 x i8>
181 %local1 = alloca <vscale x 16 x i8>
182 %local_scalar1 = alloca i32
183 load volatile i32, ptr %local_scalar0
184 load volatile <vscale x 16 x i8>, ptr %local0
185 load volatile <vscale x 16 x i8>, ptr %local1
186 load volatile i32, ptr %local_scalar1
190 define void @local_var_m2_with_varsize_object(i64 %n) {
191 ; RV64IV-LABEL: local_var_m2_with_varsize_object:
193 ; RV64IV-NEXT: addi sp, sp, -32
194 ; RV64IV-NEXT: .cfi_def_cfa_offset 32
195 ; RV64IV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
196 ; RV64IV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
197 ; RV64IV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
198 ; RV64IV-NEXT: .cfi_offset ra, -8
199 ; RV64IV-NEXT: .cfi_offset s0, -16
200 ; RV64IV-NEXT: .cfi_offset s1, -24
201 ; RV64IV-NEXT: addi s0, sp, 32
202 ; RV64IV-NEXT: .cfi_def_cfa s0, 0
203 ; RV64IV-NEXT: csrr a1, vlenb
204 ; RV64IV-NEXT: slli a1, a1, 2
205 ; RV64IV-NEXT: sub sp, sp, a1
206 ; RV64IV-NEXT: addi a0, a0, 15
207 ; RV64IV-NEXT: andi a0, a0, -16
208 ; RV64IV-NEXT: sub a0, sp, a0
209 ; RV64IV-NEXT: mv sp, a0
210 ; RV64IV-NEXT: csrr a1, vlenb
211 ; RV64IV-NEXT: slli a1, a1, 1
212 ; RV64IV-NEXT: sub a1, s0, a1
213 ; RV64IV-NEXT: addi a1, a1, -32
214 ; RV64IV-NEXT: csrr s1, vlenb
215 ; RV64IV-NEXT: slli s1, s1, 1
216 ; RV64IV-NEXT: sub s1, s0, s1
217 ; RV64IV-NEXT: addi s1, s1, -32
218 ; RV64IV-NEXT: call notdead
219 ; RV64IV-NEXT: vl2r.v v8, (s1)
220 ; RV64IV-NEXT: csrr a0, vlenb
221 ; RV64IV-NEXT: slli a0, a0, 2
222 ; RV64IV-NEXT: sub a0, s0, a0
223 ; RV64IV-NEXT: addi a0, a0, -32
224 ; RV64IV-NEXT: vl2r.v v8, (a0)
225 ; RV64IV-NEXT: addi sp, s0, -32
226 ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
227 ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
228 ; RV64IV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
229 ; RV64IV-NEXT: addi sp, sp, 32
231 %1 = alloca i8, i64 %n
232 %2 = alloca <vscale x 16 x i8>
233 %3 = alloca <vscale x 16 x i8>
234 call void @notdead(ptr %1, ptr %2)
235 load volatile <vscale x 16 x i8>, ptr %2
236 load volatile <vscale x 16 x i8>, ptr %3
240 define void @local_var_m2_with_bp(i64 %n) {
241 ; RV64IV-LABEL: local_var_m2_with_bp:
243 ; RV64IV-NEXT: addi sp, sp, -256
244 ; RV64IV-NEXT: .cfi_def_cfa_offset 256
245 ; RV64IV-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
246 ; RV64IV-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
247 ; RV64IV-NEXT: sd s1, 232(sp) # 8-byte Folded Spill
248 ; RV64IV-NEXT: sd s2, 224(sp) # 8-byte Folded Spill
249 ; RV64IV-NEXT: .cfi_offset ra, -8
250 ; RV64IV-NEXT: .cfi_offset s0, -16
251 ; RV64IV-NEXT: .cfi_offset s1, -24
252 ; RV64IV-NEXT: .cfi_offset s2, -32
253 ; RV64IV-NEXT: addi s0, sp, 256
254 ; RV64IV-NEXT: .cfi_def_cfa s0, 0
255 ; RV64IV-NEXT: csrr a1, vlenb
256 ; RV64IV-NEXT: slli a1, a1, 2
257 ; RV64IV-NEXT: sub sp, sp, a1
258 ; RV64IV-NEXT: andi sp, sp, -128
259 ; RV64IV-NEXT: mv s1, sp
260 ; RV64IV-NEXT: addi a0, a0, 15
261 ; RV64IV-NEXT: andi a0, a0, -16
262 ; RV64IV-NEXT: sub a0, sp, a0
263 ; RV64IV-NEXT: mv sp, a0
264 ; RV64IV-NEXT: addi a1, s1, 128
265 ; RV64IV-NEXT: csrr a2, vlenb
266 ; RV64IV-NEXT: slli a2, a2, 1
267 ; RV64IV-NEXT: add a2, s1, a2
268 ; RV64IV-NEXT: addi a2, a2, 224
269 ; RV64IV-NEXT: csrr s2, vlenb
270 ; RV64IV-NEXT: slli s2, s2, 1
271 ; RV64IV-NEXT: add s2, s1, s2
272 ; RV64IV-NEXT: addi s2, s2, 224
273 ; RV64IV-NEXT: call notdead2
274 ; RV64IV-NEXT: lw zero, 124(s1)
275 ; RV64IV-NEXT: vl2r.v v8, (s2)
276 ; RV64IV-NEXT: addi a0, s1, 224
277 ; RV64IV-NEXT: vl2r.v v8, (a0)
278 ; RV64IV-NEXT: lw zero, 120(s1)
279 ; RV64IV-NEXT: addi sp, s0, -256
280 ; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
281 ; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
282 ; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload
283 ; RV64IV-NEXT: ld s2, 224(sp) # 8-byte Folded Reload
284 ; RV64IV-NEXT: addi sp, sp, 256
286 %1 = alloca i8, i64 %n
287 %2 = alloca i32, align 128
288 %local_scalar0 = alloca i32
289 %local0 = alloca <vscale x 16 x i8>
290 %local1 = alloca <vscale x 16 x i8>
291 %local_scalar1 = alloca i32
292 call void @notdead2(ptr %1, ptr %2, ptr %local0)
293 load volatile i32, ptr %local_scalar0
294 load volatile <vscale x 16 x i8>, ptr %local0
295 load volatile <vscale x 16 x i8>, ptr %local1
296 load volatile i32, ptr %local_scalar1
300 define i64 @fixed_object(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8) nounwind {
301 ; RV64IV-LABEL: fixed_object:
303 ; RV64IV-NEXT: addi sp, sp, -16
304 ; RV64IV-NEXT: csrr a0, vlenb
305 ; RV64IV-NEXT: slli a0, a0, 3
306 ; RV64IV-NEXT: sub sp, sp, a0
307 ; RV64IV-NEXT: csrr a0, vlenb
308 ; RV64IV-NEXT: slli a0, a0, 3
309 ; RV64IV-NEXT: add a0, sp, a0
310 ; RV64IV-NEXT: ld a0, 16(a0)
311 ; RV64IV-NEXT: csrr a1, vlenb
312 ; RV64IV-NEXT: slli a1, a1, 3
313 ; RV64IV-NEXT: add sp, sp, a1
314 ; RV64IV-NEXT: addi sp, sp, 16
316 %fixed_size = alloca i32
317 %rvv_vector = alloca <vscale x 8 x i64>, align 8
321 declare void @notdead(ptr, ptr)
322 declare void @notdead2(ptr, ptr, ptr)