1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,NOZBA
4 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zba -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,ZBA
7 define void @lmul1() nounwind {
10 ; CHECK-NEXT: csrr a0, vlenb
11 ; CHECK-NEXT: slli a0, a0, 1
12 ; CHECK-NEXT: sub sp, sp, a0
13 ; CHECK-NEXT: csrr a0, vlenb
14 ; CHECK-NEXT: slli a0, a0, 1
15 ; CHECK-NEXT: add sp, sp, a0
17 %v = alloca <vscale x 1 x i64>
21 define void @lmul2() nounwind {
24 ; CHECK-NEXT: csrr a0, vlenb
25 ; CHECK-NEXT: slli a0, a0, 1
26 ; CHECK-NEXT: sub sp, sp, a0
27 ; CHECK-NEXT: csrr a0, vlenb
28 ; CHECK-NEXT: slli a0, a0, 1
29 ; CHECK-NEXT: add sp, sp, a0
31 %v = alloca <vscale x 2 x i64>
35 define void @lmul4() nounwind {
38 ; CHECK-NEXT: addi sp, sp, -48
39 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
40 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
41 ; CHECK-NEXT: addi s0, sp, 48
42 ; CHECK-NEXT: csrr a0, vlenb
43 ; CHECK-NEXT: slli a0, a0, 2
44 ; CHECK-NEXT: sub sp, sp, a0
45 ; CHECK-NEXT: andi sp, sp, -32
46 ; CHECK-NEXT: addi sp, s0, -48
47 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
48 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
49 ; CHECK-NEXT: addi sp, sp, 48
51 %v = alloca <vscale x 4 x i64>
55 define void @lmul8() nounwind {
58 ; CHECK-NEXT: addi sp, sp, -80
59 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
60 ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
61 ; CHECK-NEXT: addi s0, sp, 80
62 ; CHECK-NEXT: csrr a0, vlenb
63 ; CHECK-NEXT: slli a0, a0, 3
64 ; CHECK-NEXT: sub sp, sp, a0
65 ; CHECK-NEXT: andi sp, sp, -64
66 ; CHECK-NEXT: addi sp, s0, -80
67 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
68 ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
69 ; CHECK-NEXT: addi sp, sp, 80
71 %v = alloca <vscale x 8 x i64>
75 define void @lmul1_and_2() nounwind {
76 ; CHECK-LABEL: lmul1_and_2:
78 ; CHECK-NEXT: csrr a0, vlenb
79 ; CHECK-NEXT: slli a0, a0, 2
80 ; CHECK-NEXT: sub sp, sp, a0
81 ; CHECK-NEXT: csrr a0, vlenb
82 ; CHECK-NEXT: slli a0, a0, 2
83 ; CHECK-NEXT: add sp, sp, a0
85 %v1 = alloca <vscale x 1 x i64>
86 %v2 = alloca <vscale x 2 x i64>
90 define void @lmul2_and_4() nounwind {
91 ; CHECK-LABEL: lmul2_and_4:
93 ; CHECK-NEXT: addi sp, sp, -48
94 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
95 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
96 ; CHECK-NEXT: addi s0, sp, 48
97 ; CHECK-NEXT: csrr a0, vlenb
98 ; CHECK-NEXT: slli a0, a0, 3
99 ; CHECK-NEXT: sub sp, sp, a0
100 ; CHECK-NEXT: andi sp, sp, -32
101 ; CHECK-NEXT: addi sp, s0, -48
102 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
103 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
104 ; CHECK-NEXT: addi sp, sp, 48
106 %v1 = alloca <vscale x 2 x i64>
107 %v2 = alloca <vscale x 4 x i64>
111 define void @lmul1_and_4() nounwind {
112 ; CHECK-LABEL: lmul1_and_4:
114 ; CHECK-NEXT: addi sp, sp, -48
115 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
116 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
117 ; CHECK-NEXT: addi s0, sp, 48
118 ; CHECK-NEXT: csrr a0, vlenb
119 ; CHECK-NEXT: slli a0, a0, 3
120 ; CHECK-NEXT: sub sp, sp, a0
121 ; CHECK-NEXT: andi sp, sp, -32
122 ; CHECK-NEXT: addi sp, s0, -48
123 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
124 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
125 ; CHECK-NEXT: addi sp, sp, 48
127 %v1 = alloca <vscale x 1 x i64>
128 %v2 = alloca <vscale x 4 x i64>
132 define void @lmul2_and_1() nounwind {
133 ; CHECK-LABEL: lmul2_and_1:
135 ; CHECK-NEXT: csrr a0, vlenb
136 ; CHECK-NEXT: slli a0, a0, 2
137 ; CHECK-NEXT: sub sp, sp, a0
138 ; CHECK-NEXT: csrr a0, vlenb
139 ; CHECK-NEXT: slli a0, a0, 2
140 ; CHECK-NEXT: add sp, sp, a0
142 %v1 = alloca <vscale x 2 x i64>
143 %v2 = alloca <vscale x 1 x i64>
147 define void @lmul4_and_1() nounwind {
148 ; CHECK-LABEL: lmul4_and_1:
150 ; CHECK-NEXT: addi sp, sp, -48
151 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
152 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
153 ; CHECK-NEXT: addi s0, sp, 48
154 ; CHECK-NEXT: csrr a0, vlenb
155 ; CHECK-NEXT: slli a0, a0, 3
156 ; CHECK-NEXT: sub sp, sp, a0
157 ; CHECK-NEXT: andi sp, sp, -32
158 ; CHECK-NEXT: addi sp, s0, -48
159 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
160 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
161 ; CHECK-NEXT: addi sp, sp, 48
163 %v1 = alloca <vscale x 4 x i64>
164 %v2 = alloca <vscale x 1 x i64>
168 define void @lmul4_and_2() nounwind {
169 ; CHECK-LABEL: lmul4_and_2:
171 ; CHECK-NEXT: addi sp, sp, -48
172 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
173 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
174 ; CHECK-NEXT: addi s0, sp, 48
175 ; CHECK-NEXT: csrr a0, vlenb
176 ; CHECK-NEXT: slli a0, a0, 3
177 ; CHECK-NEXT: sub sp, sp, a0
178 ; CHECK-NEXT: andi sp, sp, -32
179 ; CHECK-NEXT: addi sp, s0, -48
180 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
181 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
182 ; CHECK-NEXT: addi sp, sp, 48
184 %v1 = alloca <vscale x 4 x i64>
185 %v2 = alloca <vscale x 2 x i64>
189 define void @lmul4_and_2_x2_0() nounwind {
190 ; CHECK-LABEL: lmul4_and_2_x2_0:
192 ; CHECK-NEXT: addi sp, sp, -48
193 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
194 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
195 ; CHECK-NEXT: addi s0, sp, 48
196 ; CHECK-NEXT: csrr a0, vlenb
197 ; CHECK-NEXT: slli a0, a0, 4
198 ; CHECK-NEXT: sub sp, sp, a0
199 ; CHECK-NEXT: andi sp, sp, -32
200 ; CHECK-NEXT: addi sp, s0, -48
201 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
202 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
203 ; CHECK-NEXT: addi sp, sp, 48
205 %v1 = alloca <vscale x 4 x i64>
206 %v2 = alloca <vscale x 2 x i64>
207 %v3 = alloca <vscale x 4 x i64>
208 %v4 = alloca <vscale x 2 x i64>
212 define void @lmul4_and_2_x2_1() nounwind {
213 ; NOZBA-LABEL: lmul4_and_2_x2_1:
215 ; NOZBA-NEXT: addi sp, sp, -48
216 ; NOZBA-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
217 ; NOZBA-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
218 ; NOZBA-NEXT: addi s0, sp, 48
219 ; NOZBA-NEXT: csrr a0, vlenb
220 ; NOZBA-NEXT: li a1, 12
221 ; NOZBA-NEXT: mul a0, a0, a1
222 ; NOZBA-NEXT: sub sp, sp, a0
223 ; NOZBA-NEXT: andi sp, sp, -32
224 ; NOZBA-NEXT: addi sp, s0, -48
225 ; NOZBA-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
226 ; NOZBA-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
227 ; NOZBA-NEXT: addi sp, sp, 48
230 ; ZBA-LABEL: lmul4_and_2_x2_1:
232 ; ZBA-NEXT: addi sp, sp, -48
233 ; ZBA-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
234 ; ZBA-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
235 ; ZBA-NEXT: addi s0, sp, 48
236 ; ZBA-NEXT: csrr a0, vlenb
237 ; ZBA-NEXT: slli a0, a0, 2
238 ; ZBA-NEXT: sh1add a0, a0, a0
239 ; ZBA-NEXT: sub sp, sp, a0
240 ; ZBA-NEXT: andi sp, sp, -32
241 ; ZBA-NEXT: addi sp, s0, -48
242 ; ZBA-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
243 ; ZBA-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
244 ; ZBA-NEXT: addi sp, sp, 48
246 %v1 = alloca <vscale x 4 x i64>
247 %v3 = alloca <vscale x 4 x i64>
248 %v2 = alloca <vscale x 2 x i64>
249 %v4 = alloca <vscale x 2 x i64>
254 define void @gpr_and_lmul1_and_2() nounwind {
255 ; CHECK-LABEL: gpr_and_lmul1_and_2:
257 ; CHECK-NEXT: addi sp, sp, -16
258 ; CHECK-NEXT: csrr a0, vlenb
259 ; CHECK-NEXT: slli a0, a0, 2
260 ; CHECK-NEXT: sub sp, sp, a0
261 ; CHECK-NEXT: li a0, 3
262 ; CHECK-NEXT: sd a0, 8(sp)
263 ; CHECK-NEXT: csrr a0, vlenb
264 ; CHECK-NEXT: slli a0, a0, 2
265 ; CHECK-NEXT: add sp, sp, a0
266 ; CHECK-NEXT: addi sp, sp, 16
269 %v1 = alloca <vscale x 1 x i64>
270 %v2 = alloca <vscale x 2 x i64>
271 store volatile i64 3, ptr %x1
275 define void @gpr_and_lmul1_and_4() nounwind {
276 ; CHECK-LABEL: gpr_and_lmul1_and_4:
278 ; CHECK-NEXT: addi sp, sp, -48
279 ; CHECK-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
280 ; CHECK-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
281 ; CHECK-NEXT: addi s0, sp, 48
282 ; CHECK-NEXT: csrr a0, vlenb
283 ; CHECK-NEXT: slli a0, a0, 3
284 ; CHECK-NEXT: sub sp, sp, a0
285 ; CHECK-NEXT: andi sp, sp, -32
286 ; CHECK-NEXT: li a0, 3
287 ; CHECK-NEXT: sd a0, 8(sp)
288 ; CHECK-NEXT: addi sp, s0, -48
289 ; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
290 ; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
291 ; CHECK-NEXT: addi sp, sp, 48
294 %v1 = alloca <vscale x 1 x i64>
295 %v2 = alloca <vscale x 4 x i64>
296 store volatile i64 3, ptr %x1
300 define void @lmul_1_2_4_8() nounwind {
301 ; CHECK-LABEL: lmul_1_2_4_8:
303 ; CHECK-NEXT: addi sp, sp, -80
304 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
305 ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
306 ; CHECK-NEXT: addi s0, sp, 80
307 ; CHECK-NEXT: csrr a0, vlenb
308 ; CHECK-NEXT: slli a0, a0, 4
309 ; CHECK-NEXT: sub sp, sp, a0
310 ; CHECK-NEXT: andi sp, sp, -64
311 ; CHECK-NEXT: addi sp, s0, -80
312 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
313 ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
314 ; CHECK-NEXT: addi sp, sp, 80
316 %v1 = alloca <vscale x 1 x i64>
317 %v2 = alloca <vscale x 2 x i64>
318 %v4 = alloca <vscale x 4 x i64>
319 %v8 = alloca <vscale x 8 x i64>
323 define void @lmul_1_2_4_8_x2_0() nounwind {
324 ; CHECK-LABEL: lmul_1_2_4_8_x2_0:
326 ; CHECK-NEXT: addi sp, sp, -80
327 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
328 ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
329 ; CHECK-NEXT: addi s0, sp, 80
330 ; CHECK-NEXT: csrr a0, vlenb
331 ; CHECK-NEXT: slli a0, a0, 5
332 ; CHECK-NEXT: sub sp, sp, a0
333 ; CHECK-NEXT: andi sp, sp, -64
334 ; CHECK-NEXT: addi sp, s0, -80
335 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
336 ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
337 ; CHECK-NEXT: addi sp, sp, 80
339 %v1 = alloca <vscale x 1 x i64>
340 %v2 = alloca <vscale x 1 x i64>
341 %v3 = alloca <vscale x 2 x i64>
342 %v4 = alloca <vscale x 2 x i64>
343 %v5 = alloca <vscale x 4 x i64>
344 %v6 = alloca <vscale x 4 x i64>
345 %v7 = alloca <vscale x 8 x i64>
346 %v8 = alloca <vscale x 8 x i64>
350 define void @lmul_1_2_4_8_x2_1() nounwind {
351 ; CHECK-LABEL: lmul_1_2_4_8_x2_1:
353 ; CHECK-NEXT: addi sp, sp, -80
354 ; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
355 ; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
356 ; CHECK-NEXT: addi s0, sp, 80
357 ; CHECK-NEXT: csrr a0, vlenb
358 ; CHECK-NEXT: slli a0, a0, 5
359 ; CHECK-NEXT: sub sp, sp, a0
360 ; CHECK-NEXT: andi sp, sp, -64
361 ; CHECK-NEXT: addi sp, s0, -80
362 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
363 ; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
364 ; CHECK-NEXT: addi sp, sp, 80
366 %v8 = alloca <vscale x 8 x i64>
367 %v7 = alloca <vscale x 8 x i64>
368 %v6 = alloca <vscale x 4 x i64>
369 %v5 = alloca <vscale x 4 x i64>
370 %v4 = alloca <vscale x 2 x i64>
371 %v3 = alloca <vscale x 2 x i64>
372 %v2 = alloca <vscale x 1 x i64>
373 %v1 = alloca <vscale x 1 x i64>
377 define void @masks() nounwind {
378 ; CHECK-LABEL: masks:
380 ; CHECK-NEXT: csrr a0, vlenb
381 ; CHECK-NEXT: slli a0, a0, 2
382 ; CHECK-NEXT: sub sp, sp, a0
383 ; CHECK-NEXT: csrr a0, vlenb
384 ; CHECK-NEXT: slli a0, a0, 2
385 ; CHECK-NEXT: add sp, sp, a0
387 %v1 = alloca <vscale x 1 x i1>
388 %v2 = alloca <vscale x 2 x i1>
389 %v4 = alloca <vscale x 4 x i1>
390 %v8 = alloca <vscale x 8 x i1>
394 define void @lmul_8_x5() nounwind {
395 ; NOZBA-LABEL: lmul_8_x5:
397 ; NOZBA-NEXT: addi sp, sp, -80
398 ; NOZBA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
399 ; NOZBA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
400 ; NOZBA-NEXT: addi s0, sp, 80
401 ; NOZBA-NEXT: csrr a0, vlenb
402 ; NOZBA-NEXT: li a1, 40
403 ; NOZBA-NEXT: mul a0, a0, a1
404 ; NOZBA-NEXT: sub sp, sp, a0
405 ; NOZBA-NEXT: andi sp, sp, -64
406 ; NOZBA-NEXT: addi sp, s0, -80
407 ; NOZBA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
408 ; NOZBA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
409 ; NOZBA-NEXT: addi sp, sp, 80
412 ; ZBA-LABEL: lmul_8_x5:
414 ; ZBA-NEXT: addi sp, sp, -80
415 ; ZBA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
416 ; ZBA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
417 ; ZBA-NEXT: addi s0, sp, 80
418 ; ZBA-NEXT: csrr a0, vlenb
419 ; ZBA-NEXT: slli a0, a0, 3
420 ; ZBA-NEXT: sh2add a0, a0, a0
421 ; ZBA-NEXT: sub sp, sp, a0
422 ; ZBA-NEXT: andi sp, sp, -64
423 ; ZBA-NEXT: addi sp, s0, -80
424 ; ZBA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
425 ; ZBA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
426 ; ZBA-NEXT: addi sp, sp, 80
428 %v1 = alloca <vscale x 8 x i64>
429 %v2 = alloca <vscale x 8 x i64>
430 %v3 = alloca <vscale x 8 x i64>
431 %v4 = alloca <vscale x 8 x i64>
432 %v5 = alloca <vscale x 8 x i64>
436 define void @lmul_8_x9() nounwind {
437 ; NOZBA-LABEL: lmul_8_x9:
439 ; NOZBA-NEXT: addi sp, sp, -80
440 ; NOZBA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
441 ; NOZBA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
442 ; NOZBA-NEXT: addi s0, sp, 80
443 ; NOZBA-NEXT: csrr a0, vlenb
444 ; NOZBA-NEXT: li a1, 72
445 ; NOZBA-NEXT: mul a0, a0, a1
446 ; NOZBA-NEXT: sub sp, sp, a0
447 ; NOZBA-NEXT: andi sp, sp, -64
448 ; NOZBA-NEXT: addi sp, s0, -80
449 ; NOZBA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
450 ; NOZBA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
451 ; NOZBA-NEXT: addi sp, sp, 80
454 ; ZBA-LABEL: lmul_8_x9:
456 ; ZBA-NEXT: addi sp, sp, -80
457 ; ZBA-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
458 ; ZBA-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
459 ; ZBA-NEXT: addi s0, sp, 80
460 ; ZBA-NEXT: csrr a0, vlenb
461 ; ZBA-NEXT: slli a0, a0, 3
462 ; ZBA-NEXT: sh3add a0, a0, a0
463 ; ZBA-NEXT: sub sp, sp, a0
464 ; ZBA-NEXT: andi sp, sp, -64
465 ; ZBA-NEXT: addi sp, s0, -80
466 ; ZBA-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
467 ; ZBA-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
468 ; ZBA-NEXT: addi sp, sp, 80
470 %v1 = alloca <vscale x 8 x i64>
471 %v2 = alloca <vscale x 8 x i64>
472 %v3 = alloca <vscale x 8 x i64>
473 %v4 = alloca <vscale x 8 x i64>
474 %v5 = alloca <vscale x 8 x i64>
475 %v6 = alloca <vscale x 8 x i64>
476 %v7 = alloca <vscale x 8 x i64>
477 %v8 = alloca <vscale x 8 x i64>
478 %v9 = alloca <vscale x 8 x i64>