1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v \
3 ; RUN: -target-abi=lp64d -verify-machineinstrs -O0 < %s | FileCheck %s
5 declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
6 declare i64 @llvm.riscv.vsetvlimax(i64, i64)
7 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
10 <vscale x 1 x double>,
12 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
18 define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind {
19 ; CHECK-LABEL: fixed_length:
20 ; CHECK: # %bb.0: # %entry
21 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
22 ; CHECK-NEXT: vmv1r.v v10, v9
23 ; CHECK-NEXT: # kill: def $v11 killed $v10
24 ; CHECK-NEXT: # kill: def $v9 killed $v8
25 ; CHECK-NEXT: # implicit-def: $v9
26 ; CHECK-NEXT: vfadd.vv v9, v8, v10
27 ; CHECK-NEXT: # implicit-def: $v8
28 ; CHECK-NEXT: vfadd.vv v8, v9, v10
31 %1 = fadd <2 x double> %a, %b
32 %2 = fadd <2 x double> %1, %b
36 define <vscale x 1 x double> @scalable(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
37 ; CHECK-LABEL: scalable:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
40 ; CHECK-NEXT: vmv1r.v v10, v9
41 ; CHECK-NEXT: # implicit-def: $v9
42 ; CHECK-NEXT: vfadd.vv v9, v8, v10
43 ; CHECK-NEXT: # implicit-def: $v8
44 ; CHECK-NEXT: vfadd.vv v8, v9, v10
47 %1 = fadd <vscale x 1 x double> %a, %b
48 %2 = fadd <vscale x 1 x double> %1, %b
49 ret <vscale x 1 x double> %2
53 define <vscale x 1 x double> @intrinsic_same_vlmax(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
54 ; CHECK-LABEL: intrinsic_same_vlmax:
55 ; CHECK: # %bb.0: # %entry
56 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
57 ; CHECK-NEXT: vmv1r.v v10, v9
58 ; CHECK-NEXT: # implicit-def: $v9
59 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
60 ; CHECK-NEXT: vfadd.vv v9, v8, v10
61 ; CHECK-NEXT: # implicit-def: $v8
62 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
63 ; CHECK-NEXT: vfadd.vv v8, v9, v10
66 %0 = tail call i64 @llvm.riscv.vsetvlimax(i64 2, i64 7)
67 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
68 <vscale x 1 x double> undef,
69 <vscale x 1 x double> %a,
70 <vscale x 1 x double> %b,
72 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
73 <vscale x 1 x double> undef,
74 <vscale x 1 x double> %1,
75 <vscale x 1 x double> %b,
77 ret <vscale x 1 x double> %2
81 define <vscale x 1 x double> @intrinsic_same_avl_imm(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
82 ; CHECK-LABEL: intrinsic_same_avl_imm:
83 ; CHECK: # %bb.0: # %entry
84 ; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma
85 ; CHECK-NEXT: vmv1r.v v10, v9
86 ; CHECK-NEXT: # implicit-def: $v9
87 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
88 ; CHECK-NEXT: vfadd.vv v9, v8, v10
89 ; CHECK-NEXT: # implicit-def: $v8
90 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
91 ; CHECK-NEXT: vfadd.vv v8, v9, v10
94 %0 = tail call i64 @llvm.riscv.vsetvli(i64 2, i64 2, i64 7)
95 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
96 <vscale x 1 x double> undef,
97 <vscale x 1 x double> %a,
98 <vscale x 1 x double> %b,
100 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
101 <vscale x 1 x double> undef,
102 <vscale x 1 x double> %1,
103 <vscale x 1 x double> %b,
105 ret <vscale x 1 x double> %2
108 define <vscale x 1 x double> @intrinsic_same_avl_reg(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
109 ; CHECK-LABEL: intrinsic_same_avl_reg:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
112 ; CHECK-NEXT: vmv1r.v v10, v9
113 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
114 ; CHECK-NEXT: # implicit-def: $v9
115 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
116 ; CHECK-NEXT: vfadd.vv v9, v8, v10
117 ; CHECK-NEXT: # implicit-def: $v8
118 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
119 ; CHECK-NEXT: vfadd.vv v8, v9, v10
122 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
123 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
124 <vscale x 1 x double> undef,
125 <vscale x 1 x double> %a,
126 <vscale x 1 x double> %b,
128 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
129 <vscale x 1 x double> undef,
130 <vscale x 1 x double> %1,
131 <vscale x 1 x double> %b,
133 ret <vscale x 1 x double> %2
136 define <vscale x 1 x double> @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
137 ; CHECK-LABEL: intrinsic_diff_avl_reg:
138 ; CHECK: # %bb.0: # %entry
139 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
140 ; CHECK-NEXT: vmv1r.v v10, v9
141 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
142 ; CHECK-NEXT: # implicit-def: $v9
143 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
144 ; CHECK-NEXT: vfadd.vv v9, v8, v10
145 ; CHECK-NEXT: vsetvli a0, a1, e32, mf2, ta, ma
146 ; CHECK-NEXT: # implicit-def: $v8
147 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
148 ; CHECK-NEXT: vfadd.vv v8, v9, v10
151 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
152 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
153 <vscale x 1 x double> undef,
154 <vscale x 1 x double> %a,
155 <vscale x 1 x double> %b,
157 %2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl2, i64 2, i64 7)
158 %3 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
159 <vscale x 1 x double> undef,
160 <vscale x 1 x double> %1,
161 <vscale x 1 x double> %b,
163 ret <vscale x 1 x double> %3