1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefix=RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefix=RV64
6 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
7 ; RUN: | FileCheck %s --check-prefix=RV32
8 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
9 ; RUN: | FileCheck %s --check-prefix=RV64
11 ; FIXME: We are over-aligning the stack on V, wasting stack space.
13 define ptr @scalar_stack_align16() nounwind {
14 ; RV32-LABEL: scalar_stack_align16:
16 ; RV32-NEXT: addi sp, sp, -48
17 ; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
18 ; RV32-NEXT: csrr a0, vlenb
19 ; RV32-NEXT: slli a0, a0, 1
20 ; RV32-NEXT: sub sp, sp, a0
21 ; RV32-NEXT: addi a0, sp, 32
22 ; RV32-NEXT: call extern
23 ; RV32-NEXT: addi a0, sp, 16
24 ; RV32-NEXT: csrr a1, vlenb
25 ; RV32-NEXT: slli a1, a1, 1
26 ; RV32-NEXT: add sp, sp, a1
27 ; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
28 ; RV32-NEXT: addi sp, sp, 48
31 ; RV64-LABEL: scalar_stack_align16:
33 ; RV64-NEXT: addi sp, sp, -48
34 ; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
35 ; RV64-NEXT: csrr a0, vlenb
36 ; RV64-NEXT: slli a0, a0, 1
37 ; RV64-NEXT: sub sp, sp, a0
38 ; RV64-NEXT: addi a0, sp, 32
39 ; RV64-NEXT: call extern
40 ; RV64-NEXT: addi a0, sp, 16
41 ; RV64-NEXT: csrr a1, vlenb
42 ; RV64-NEXT: slli a1, a1, 1
43 ; RV64-NEXT: add sp, sp, a1
44 ; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
45 ; RV64-NEXT: addi sp, sp, 48
47 %a = alloca <vscale x 2 x i32>
48 %c = alloca i64, align 16
49 call void @extern(ptr %a)
53 declare void @extern(ptr)