1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+a,+c,+v \
3 ; RUN: -target-abi=lp64d -verify-machineinstrs -O0 < %s | FileCheck %s
5 declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
6 declare i64 @llvm.riscv.vsetvlimax(i64, i64)
7 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
10 <vscale x 1 x double>,
12 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
18 define <2 x double> @fixed_length(<2 x double> %a, <2 x double> %b) nounwind {
19 ; CHECK-LABEL: fixed_length:
20 ; CHECK: # %bb.0: # %entry
21 ; CHECK-NEXT: vmv1r.v v10, v9
22 ; CHECK-NEXT: # kill: def $v11 killed $v10
23 ; CHECK-NEXT: # kill: def $v9 killed $v8
24 ; CHECK-NEXT: # implicit-def: $v9
25 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
26 ; CHECK-NEXT: vfadd.vv v9, v8, v10
27 ; CHECK-NEXT: # implicit-def: $v8
28 ; CHECK-NEXT: vfadd.vv v8, v9, v10
31 %1 = fadd <2 x double> %a, %b
32 %2 = fadd <2 x double> %1, %b
36 define <vscale x 1 x double> @scalable(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
37 ; CHECK-LABEL: scalable:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vmv1r.v v10, v9
40 ; CHECK-NEXT: # implicit-def: $v9
41 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
42 ; CHECK-NEXT: vfadd.vv v9, v8, v10
43 ; CHECK-NEXT: # implicit-def: $v8
44 ; CHECK-NEXT: vfadd.vv v8, v9, v10
47 %1 = fadd <vscale x 1 x double> %a, %b
48 %2 = fadd <vscale x 1 x double> %1, %b
49 ret <vscale x 1 x double> %2
53 define <vscale x 1 x double> @intrinsic_same_vlmax(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
54 ; CHECK-LABEL: intrinsic_same_vlmax:
55 ; CHECK: # %bb.0: # %entry
56 ; CHECK-NEXT: vmv1r.v v10, v9
57 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
58 ; CHECK-NEXT: # implicit-def: $v9
59 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
60 ; CHECK-NEXT: vfadd.vv v9, v8, v10
61 ; CHECK-NEXT: # implicit-def: $v8
62 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
63 ; CHECK-NEXT: vfadd.vv v8, v9, v10
66 %0 = tail call i64 @llvm.riscv.vsetvlimax(i64 2, i64 7)
67 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
68 <vscale x 1 x double> undef,
69 <vscale x 1 x double> %a,
70 <vscale x 1 x double> %b,
72 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
73 <vscale x 1 x double> undef,
74 <vscale x 1 x double> %1,
75 <vscale x 1 x double> %b,
77 ret <vscale x 1 x double> %2
81 define <vscale x 1 x double> @intrinsic_same_avl_imm(<vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
82 ; CHECK-LABEL: intrinsic_same_avl_imm:
83 ; CHECK: # %bb.0: # %entry
84 ; CHECK-NEXT: vmv1r.v v10, v9
85 ; CHECK-NEXT: vsetivli a0, 2, e32, mf2, ta, ma
86 ; CHECK-NEXT: # implicit-def: $v9
87 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
88 ; CHECK-NEXT: vfadd.vv v9, v8, v10
89 ; CHECK-NEXT: # implicit-def: $v8
90 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
91 ; CHECK-NEXT: vfadd.vv v8, v9, v10
94 %0 = tail call i64 @llvm.riscv.vsetvli(i64 2, i64 2, i64 7)
95 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
96 <vscale x 1 x double> undef,
97 <vscale x 1 x double> %a,
98 <vscale x 1 x double> %b,
100 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
101 <vscale x 1 x double> undef,
102 <vscale x 1 x double> %1,
103 <vscale x 1 x double> %b,
105 ret <vscale x 1 x double> %2
108 define <vscale x 1 x double> @intrinsic_same_avl_reg(i64 %avl, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
109 ; CHECK-LABEL: intrinsic_same_avl_reg:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: vmv1r.v v10, v9
112 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
113 ; CHECK-NEXT: # implicit-def: $v9
114 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
115 ; CHECK-NEXT: vfadd.vv v9, v8, v10
116 ; CHECK-NEXT: # implicit-def: $v8
117 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
118 ; CHECK-NEXT: vfadd.vv v8, v9, v10
121 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
122 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
123 <vscale x 1 x double> undef,
124 <vscale x 1 x double> %a,
125 <vscale x 1 x double> %b,
127 %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
128 <vscale x 1 x double> undef,
129 <vscale x 1 x double> %1,
130 <vscale x 1 x double> %b,
132 ret <vscale x 1 x double> %2
135 define <vscale x 1 x double> @intrinsic_diff_avl_reg(i64 %avl, i64 %avl2, <vscale x 1 x double> %a, <vscale x 1 x double> %b) nounwind {
136 ; CHECK-LABEL: intrinsic_diff_avl_reg:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: vmv1r.v v10, v9
139 ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma
140 ; CHECK-NEXT: # implicit-def: $v9
141 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
142 ; CHECK-NEXT: vfadd.vv v9, v8, v10
143 ; CHECK-NEXT: vsetvli a0, a1, e32, mf2, ta, ma
144 ; CHECK-NEXT: # implicit-def: $v8
145 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
146 ; CHECK-NEXT: vfadd.vv v8, v9, v10
149 %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 7)
150 %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
151 <vscale x 1 x double> undef,
152 <vscale x 1 x double> %a,
153 <vscale x 1 x double> %b,
155 %2 = tail call i64 @llvm.riscv.vsetvli(i64 %avl2, i64 2, i64 7)
156 %3 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
157 <vscale x 1 x double> undef,
158 <vscale x 1 x double> %1,
159 <vscale x 1 x double> %b,
161 ret <vscale x 1 x double> %3