1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \
3 ; RUN: | FileCheck %s -check-prefix=RV64IV
5 define <vscale x 1 x i64> @access_fixed_object(ptr %val) {
6 ; RV64IV-LABEL: access_fixed_object:
8 ; RV64IV-NEXT: addi sp, sp, -528
9 ; RV64IV-NEXT: .cfi_def_cfa_offset 528
10 ; RV64IV-NEXT: addi a1, sp, 8
11 ; RV64IV-NEXT: vl1re64.v v8, (a1)
12 ; RV64IV-NEXT: ld a1, 520(sp)
13 ; RV64IV-NEXT: sd a1, 0(a0)
14 ; RV64IV-NEXT: addi sp, sp, 528
15 ; RV64IV-NEXT: .cfi_def_cfa_offset 0
18 %array = alloca [64 x i64]
19 %v = load <vscale x 1 x i64>, ptr %array
20 %len = load i64, ptr %local
21 store i64 %len, ptr %val
22 ret <vscale x 1 x i64> %v
25 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
31 define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
32 ; RV64IV-LABEL: access_fixed_and_vector_objects:
34 ; RV64IV-NEXT: addi sp, sp, -528
35 ; RV64IV-NEXT: .cfi_def_cfa_offset 528
36 ; RV64IV-NEXT: csrr a0, vlenb
37 ; RV64IV-NEXT: sub sp, sp, a0
38 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb
39 ; RV64IV-NEXT: addi a0, sp, 8
40 ; RV64IV-NEXT: vl1re64.v v8, (a0)
41 ; RV64IV-NEXT: addi a0, sp, 528
42 ; RV64IV-NEXT: vl1re64.v v9, (a0)
43 ; RV64IV-NEXT: ld a0, 520(sp)
44 ; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma
45 ; RV64IV-NEXT: vadd.vv v8, v8, v9
46 ; RV64IV-NEXT: csrr a0, vlenb
47 ; RV64IV-NEXT: add sp, sp, a0
48 ; RV64IV-NEXT: .cfi_def_cfa sp, 528
49 ; RV64IV-NEXT: addi sp, sp, 528
50 ; RV64IV-NEXT: .cfi_def_cfa_offset 0
53 %vector = alloca <vscale x 1 x i64>
54 %array = alloca [64 x i64]
55 %v1 = load <vscale x 1 x i64>, ptr %array
56 %v2 = load <vscale x 1 x i64>, ptr %vector
57 %len = load i64, ptr %local
59 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
60 <vscale x 1 x i64> undef,
61 <vscale x 1 x i64> %v1,
62 <vscale x 1 x i64> %v2,
65 ret <vscale x 1 x i64> %a
68 define <vscale x 1 x i64> @probe_fixed_and_vector_objects(ptr %val, <vscale x 1 x i64> %dummy) "probe-stack"="inline-asm" {
69 ; RV64IV-LABEL: probe_fixed_and_vector_objects:
71 ; RV64IV-NEXT: addi sp, sp, -528
72 ; RV64IV-NEXT: .cfi_def_cfa_offset 528
73 ; RV64IV-NEXT: csrr t1, vlenb
74 ; RV64IV-NEXT: .cfi_def_cfa t1, -8
75 ; RV64IV-NEXT: lui t2, 1
76 ; RV64IV-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
77 ; RV64IV-NEXT: sub sp, sp, t2
78 ; RV64IV-NEXT: sd zero, 0(sp)
79 ; RV64IV-NEXT: sub t1, t1, t2
80 ; RV64IV-NEXT: bge t1, t2, .LBB2_1
81 ; RV64IV-NEXT: # %bb.2:
82 ; RV64IV-NEXT: .cfi_def_cfa_register sp
83 ; RV64IV-NEXT: sub sp, sp, t1
84 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb
85 ; RV64IV-NEXT: addi a0, sp, 8
86 ; RV64IV-NEXT: vl1re64.v v9, (a0)
87 ; RV64IV-NEXT: addi a0, sp, 528
88 ; RV64IV-NEXT: vl1re64.v v10, (a0)
89 ; RV64IV-NEXT: ld a0, 520(sp)
90 ; RV64IV-NEXT: vsetvli zero, a0, e64, m1, tu, ma
91 ; RV64IV-NEXT: vadd.vv v8, v9, v10
92 ; RV64IV-NEXT: csrr a0, vlenb
93 ; RV64IV-NEXT: add sp, sp, a0
94 ; RV64IV-NEXT: .cfi_def_cfa sp, 528
95 ; RV64IV-NEXT: addi sp, sp, 528
96 ; RV64IV-NEXT: .cfi_def_cfa_offset 0
99 %vector = alloca <vscale x 1 x i64>
100 %array = alloca [64 x i64]
101 %v1 = load <vscale x 1 x i64>, ptr %array
102 %v2 = load <vscale x 1 x i64>, ptr %vector
103 %len = load i64, ptr %local
105 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64(
106 <vscale x 1 x i64> %dummy,
107 <vscale x 1 x i64> %v1,
108 <vscale x 1 x i64> %v2,
111 ret <vscale x 1 x i64> %a