1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=riscv32 -mattr='+v' -O3 %s -o - | FileCheck %s
4 declare <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
10 declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
17 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
23 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
29 define <vscale x 1 x i8> @simple_vadd_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
30 ; CHECK-LABEL: simple_vadd_vv:
31 ; CHECK: # %bb.0: # %entry
32 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
33 ; CHECK-NEXT: vadd.vv v9, v8, v9
34 ; CHECK-NEXT: vadd.vv v8, v8, v8
35 ; CHECK-NEXT: vadd.vv v8, v8, v9
38 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
39 <vscale x 1 x i8> undef,
44 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
45 <vscale x 1 x i8> undef,
50 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
51 <vscale x 1 x i8> undef,
56 ret <vscale x 1 x i8> %c
59 define <vscale x 1 x i8> @simple_vadd_vsub_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
60 ; CHECK-LABEL: simple_vadd_vsub_vv:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
63 ; CHECK-NEXT: vsub.vv v9, v8, v9
64 ; CHECK-NEXT: vadd.vv v8, v8, v8
65 ; CHECK-NEXT: vadd.vv v8, v8, v9
68 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
69 <vscale x 1 x i8> undef,
74 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
75 <vscale x 1 x i8> undef,
80 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
81 <vscale x 1 x i8> undef,
86 ret <vscale x 1 x i8> %c
89 define <vscale x 1 x i8> @simple_vmul_vv(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
90 ; CHECK-LABEL: simple_vmul_vv:
91 ; CHECK: # %bb.0: # %entry
92 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
93 ; CHECK-NEXT: vmul.vv v9, v8, v9
94 ; CHECK-NEXT: vmul.vv v8, v8, v8
95 ; CHECK-NEXT: vmul.vv v8, v8, v9
98 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
99 <vscale x 1 x i8> undef,
100 <vscale x 1 x i8> %0,
101 <vscale x 1 x i8> %1,
104 %b = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
105 <vscale x 1 x i8> undef,
106 <vscale x 1 x i8> %0,
107 <vscale x 1 x i8> %a,
110 %c = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
111 <vscale x 1 x i8> undef,
112 <vscale x 1 x i8> %0,
113 <vscale x 1 x i8> %b,
116 ret <vscale x 1 x i8> %c
119 ; With passthru and masks.
120 define <vscale x 1 x i8> @vadd_vv_passthru(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
121 ; CHECK-LABEL: vadd_vv_passthru:
122 ; CHECK: # %bb.0: # %entry
123 ; CHECK-NEXT: vmv1r.v v10, v8
124 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
125 ; CHECK-NEXT: vadd.vv v10, v8, v9
126 ; CHECK-NEXT: vmv1r.v v9, v8
127 ; CHECK-NEXT: vadd.vv v9, v8, v8
128 ; CHECK-NEXT: vadd.vv v8, v9, v10
131 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
132 <vscale x 1 x i8> %0,
133 <vscale x 1 x i8> %0,
134 <vscale x 1 x i8> %1,
137 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
138 <vscale x 1 x i8> %0,
139 <vscale x 1 x i8> %0,
140 <vscale x 1 x i8> %a,
143 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
144 <vscale x 1 x i8> %0,
145 <vscale x 1 x i8> %0,
146 <vscale x 1 x i8> %b,
149 ret <vscale x 1 x i8> %c
152 define <vscale x 1 x i8> @vadd_vv_passthru_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
153 ; CHECK-LABEL: vadd_vv_passthru_negative:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vmv1r.v v10, v8
156 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
157 ; CHECK-NEXT: vadd.vv v10, v8, v9
158 ; CHECK-NEXT: vadd.vv v9, v8, v10
159 ; CHECK-NEXT: vadd.vv v8, v8, v9
162 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
163 <vscale x 1 x i8> %0,
164 <vscale x 1 x i8> %0,
165 <vscale x 1 x i8> %1,
168 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
169 <vscale x 1 x i8> %1,
170 <vscale x 1 x i8> %0,
171 <vscale x 1 x i8> %a,
174 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8.nxv1i8(
175 <vscale x 1 x i8> %0,
176 <vscale x 1 x i8> %0,
177 <vscale x 1 x i8> %b,
180 ret <vscale x 1 x i8> %c
183 define <vscale x 1 x i8> @vadd_vv_mask(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m) nounwind {
184 ; CHECK-LABEL: vadd_vv_mask:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: vmv1r.v v10, v8
187 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
188 ; CHECK-NEXT: vadd.vv v10, v8, v9, v0.t
189 ; CHECK-NEXT: vmv1r.v v9, v8
190 ; CHECK-NEXT: vadd.vv v9, v8, v8, v0.t
191 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
194 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
195 <vscale x 1 x i8> %0,
196 <vscale x 1 x i8> %0,
197 <vscale x 1 x i8> %1,
198 <vscale x 1 x i1> %m,
201 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
202 <vscale x 1 x i8> %0,
203 <vscale x 1 x i8> %0,
204 <vscale x 1 x i8> %a,
205 <vscale x 1 x i1> %m,
208 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
209 <vscale x 1 x i8> %0,
210 <vscale x 1 x i8> %0,
211 <vscale x 1 x i8> %b,
212 <vscale x 1 x i1> %m,
215 ret <vscale x 1 x i8> %c
218 define <vscale x 1 x i8> @vadd_vv_mask_negative(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %m, <vscale x 1 x i1> %m2) nounwind {
219 ; CHECK-LABEL: vadd_vv_mask_negative:
220 ; CHECK: # %bb.0: # %entry
221 ; CHECK-NEXT: vmv1r.v v11, v8
222 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
223 ; CHECK-NEXT: vadd.vv v11, v8, v9, v0.t
224 ; CHECK-NEXT: vmv1r.v v9, v8
225 ; CHECK-NEXT: vadd.vv v9, v8, v11, v0.t
226 ; CHECK-NEXT: vmv1r.v v0, v10
227 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
230 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
231 <vscale x 1 x i8> %0,
232 <vscale x 1 x i8> %0,
233 <vscale x 1 x i8> %1,
234 <vscale x 1 x i1> %m,
237 %b = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
238 <vscale x 1 x i8> %0,
239 <vscale x 1 x i8> %0,
240 <vscale x 1 x i8> %a,
241 <vscale x 1 x i1> %m,
244 %c = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8(
245 <vscale x 1 x i8> %0,
246 <vscale x 1 x i8> %0,
247 <vscale x 1 x i8> %b,
248 <vscale x 1 x i1> %m2,
251 ret <vscale x 1 x i8> %c