1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zfbfmin,+experimental-zvfbfwma\
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zfbfmin,+experimental-zvfbfwma \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.nxv1f32.nxv1bf16(
10 <vscale x 1 x bfloat>,
13 define <vscale x 1 x float> @intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv1f32_nxv1bf16_nxv1bf16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
17 ; CHECK-NEXT: vfwmaccbf16.vv v8, v9, v10
20 %a = call <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.nxv1f32.nxv1bf16(
21 <vscale x 1 x float> %0,
22 <vscale x 1 x bfloat> %1,
23 <vscale x 1 x bfloat> %2,
24 iXLen 7, iXLen %3, iXLen 0)
26 ret <vscale x 1 x float> %a
29 declare <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.mask.nxv1f32.nxv1bf16(
31 <vscale x 1 x bfloat>,
32 <vscale x 1 x bfloat>,
36 define <vscale x 1 x float> @intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16(<vscale x 1 x float> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv1f32_nxv1bf16_nxv1bf16:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
40 ; CHECK-NEXT: vfwmaccbf16.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.mask.nxv1f32.nxv1bf16(
44 <vscale x 1 x float> %0,
45 <vscale x 1 x bfloat> %1,
46 <vscale x 1 x bfloat> %2,
48 iXLen 7, iXLen %4, iXLen 0)
50 ret <vscale x 1 x float> %a
53 declare <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.nxv2f32.nxv2bf16(
55 <vscale x 2 x bfloat>,
56 <vscale x 2 x bfloat>,
59 define <vscale x 2 x float> @intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
60 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv2f32_nxv2bf16_nxv2bf16:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
63 ; CHECK-NEXT: vfwmaccbf16.vv v8, v9, v10
66 %a = call <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.nxv2f32.nxv2bf16(
67 <vscale x 2 x float> %0,
68 <vscale x 2 x bfloat> %1,
69 <vscale x 2 x bfloat> %2,
70 iXLen 7, iXLen %3, iXLen 0)
72 ret <vscale x 2 x float> %a
75 declare <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.mask.nxv2f32.nxv2bf16(
77 <vscale x 2 x bfloat>,
78 <vscale x 2 x bfloat>,
82 define <vscale x 2 x float> @intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16(<vscale x 2 x float> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv2f32_nxv2bf16_nxv2bf16:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
86 ; CHECK-NEXT: vfwmaccbf16.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.mask.nxv2f32.nxv2bf16(
90 <vscale x 2 x float> %0,
91 <vscale x 2 x bfloat> %1,
92 <vscale x 2 x bfloat> %2,
94 iXLen 7, iXLen %4, iXLen 0)
96 ret <vscale x 2 x float> %a
99 declare <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.nxv4f32.nxv4bf16(
100 <vscale x 4 x float>,
101 <vscale x 4 x bfloat>,
102 <vscale x 4 x bfloat>,
103 iXLen, iXLen, iXLen);
105 define <vscale x 4 x float> @intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
106 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv4f32_nxv4bf16_nxv4bf16:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
109 ; CHECK-NEXT: vfwmaccbf16.vv v8, v10, v11
112 %a = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.nxv4f32.nxv4bf16(
113 <vscale x 4 x float> %0,
114 <vscale x 4 x bfloat> %1,
115 <vscale x 4 x bfloat> %2,
116 iXLen 7, iXLen %3, iXLen 0)
118 ret <vscale x 4 x float> %a
121 declare <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.mask.nxv4f32.nxv4bf16(
122 <vscale x 4 x float>,
123 <vscale x 4 x bfloat>,
124 <vscale x 4 x bfloat>,
126 iXLen, iXLen, iXLen);
128 define <vscale x 4 x float> @intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16(<vscale x 4 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv4f32_nxv4bf16_nxv4bf16:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
132 ; CHECK-NEXT: vfwmaccbf16.vv v8, v10, v11, v0.t
135 %a = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.mask.nxv4f32.nxv4bf16(
136 <vscale x 4 x float> %0,
137 <vscale x 4 x bfloat> %1,
138 <vscale x 4 x bfloat> %2,
139 <vscale x 4 x i1> %3,
140 iXLen 7, iXLen %4, iXLen 0)
142 ret <vscale x 4 x float> %a
145 declare <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.nxv8f32.nxv8bf16(
146 <vscale x 8 x float>,
147 <vscale x 8 x bfloat>,
148 <vscale x 8 x bfloat>,
149 iXLen, iXLen, iXLen);
151 define <vscale x 8 x float> @intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
152 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv8f32_nxv8bf16_nxv8bf16:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
155 ; CHECK-NEXT: vfwmaccbf16.vv v8, v12, v14
158 %a = call <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.nxv8f32.nxv8bf16(
159 <vscale x 8 x float> %0,
160 <vscale x 8 x bfloat> %1,
161 <vscale x 8 x bfloat> %2,
162 iXLen 7, iXLen %3, iXLen 0)
164 ret <vscale x 8 x float> %a
167 declare <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.mask.nxv8f32.nxv8bf16(
168 <vscale x 8 x float>,
169 <vscale x 8 x bfloat>,
170 <vscale x 8 x bfloat>,
172 iXLen, iXLen, iXLen);
174 define <vscale x 8 x float> @intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16(<vscale x 8 x float> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv8f32_nxv8bf16_nxv8bf16:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
178 ; CHECK-NEXT: vfwmaccbf16.vv v8, v12, v14, v0.t
181 %a = call <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.mask.nxv8f32.nxv8bf16(
182 <vscale x 8 x float> %0,
183 <vscale x 8 x bfloat> %1,
184 <vscale x 8 x bfloat> %2,
185 <vscale x 8 x i1> %3,
186 iXLen 7, iXLen %4, iXLen 0)
188 ret <vscale x 8 x float> %a
191 declare <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.nxv16f32.nxv16bf16(
192 <vscale x 16 x float>,
193 <vscale x 16 x bfloat>,
194 <vscale x 16 x bfloat>,
195 iXLen, iXLen, iXLen);
197 define <vscale x 16 x float> @intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
198 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vv_nxv16f32_nxv16bf16_nxv16bf16:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
201 ; CHECK-NEXT: vfwmaccbf16.vv v8, v16, v20
204 %a = call <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.nxv16f32.nxv16bf16(
205 <vscale x 16 x float> %0,
206 <vscale x 16 x bfloat> %1,
207 <vscale x 16 x bfloat> %2,
208 iXLen 7, iXLen %3, iXLen 0)
210 ret <vscale x 16 x float> %a
213 declare <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.mask.nxv16f32.nxv16bf16(
214 <vscale x 16 x float>,
215 <vscale x 16 x bfloat>,
216 <vscale x 16 x bfloat>,
218 iXLen, iXLen, iXLen);
220 define <vscale x 16 x float> @intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16(<vscale x 16 x float> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vv_nxv16f32_nxv16bf16_nxv16bf16:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
224 ; CHECK-NEXT: vfwmaccbf16.vv v8, v16, v20, v0.t
227 %a = call <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.mask.nxv16f32.nxv16bf16(
228 <vscale x 16 x float> %0,
229 <vscale x 16 x bfloat> %1,
230 <vscale x 16 x bfloat> %2,
231 <vscale x 16 x i1> %3,
232 iXLen 7, iXLen %4, iXLen 0)
234 ret <vscale x 16 x float> %a
237 declare <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.nxv1f32.bf16(
238 <vscale x 1 x float>,
240 <vscale x 1 x bfloat>,
241 iXLen, iXLen, iXLen);
243 define <vscale x 1 x float> @intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
244 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv1f32_bf16_nxv1bf16:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
247 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v9
250 %a = call <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.nxv1f32.bf16(
251 <vscale x 1 x float> %0,
253 <vscale x 1 x bfloat> %2,
254 iXLen 7, iXLen %3, iXLen 0)
256 ret <vscale x 1 x float> %a
259 declare <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.mask.nxv1f32.bf16(
260 <vscale x 1 x float>,
262 <vscale x 1 x bfloat>,
264 iXLen, iXLen, iXLen);
266 define <vscale x 1 x float> @intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16(<vscale x 1 x float> %0, bfloat %1, <vscale x 1 x bfloat> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv1f32_bf16_nxv1bf16:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
270 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v9, v0.t
273 %a = call <vscale x 1 x float> @llvm.riscv.vfwmaccbf16.mask.nxv1f32.bf16(
274 <vscale x 1 x float> %0,
276 <vscale x 1 x bfloat> %2,
277 <vscale x 1 x i1> %3,
278 iXLen 7, iXLen %4, iXLen 0)
280 ret <vscale x 1 x float> %a
283 declare <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.nxv2f32.bf16(
284 <vscale x 2 x float>,
286 <vscale x 2 x bfloat>,
287 iXLen, iXLen, iXLen);
289 define <vscale x 2 x float> @intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
290 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv2f32_bf16_nxv2bf16:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
293 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v9
296 %a = call <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.nxv2f32.bf16(
297 <vscale x 2 x float> %0,
299 <vscale x 2 x bfloat> %2,
300 iXLen 7, iXLen %3, iXLen 0)
302 ret <vscale x 2 x float> %a
305 declare <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.mask.nxv2f32.bf16(
306 <vscale x 2 x float>,
308 <vscale x 2 x bfloat>,
310 iXLen, iXLen, iXLen);
312 define <vscale x 2 x float> @intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16(<vscale x 2 x float> %0, bfloat %1, <vscale x 2 x bfloat> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv2f32_bf16_nxv2bf16:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
316 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v9, v0.t
319 %a = call <vscale x 2 x float> @llvm.riscv.vfwmaccbf16.mask.nxv2f32.bf16(
320 <vscale x 2 x float> %0,
322 <vscale x 2 x bfloat> %2,
323 <vscale x 2 x i1> %3,
324 iXLen 7, iXLen %4, iXLen 0)
326 ret <vscale x 2 x float> %a
329 declare <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.nxv4f32.bf16(
330 <vscale x 4 x float>,
332 <vscale x 4 x bfloat>,
333 iXLen, iXLen, iXLen);
335 define <vscale x 4 x float> @intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, iXLen %3) nounwind {
336 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv4f32_bf16_nxv4bf16:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
339 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v10
342 %a = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.nxv4f32.bf16(
343 <vscale x 4 x float> %0,
345 <vscale x 4 x bfloat> %2,
346 iXLen 7, iXLen %3, iXLen 0)
348 ret <vscale x 4 x float> %a
351 declare <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.mask.nxv4f32.bf16(
352 <vscale x 4 x float>,
354 <vscale x 4 x bfloat>,
356 iXLen, iXLen, iXLen);
358 define <vscale x 4 x float> @intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16(<vscale x 4 x float> %0, bfloat %1, <vscale x 4 x bfloat> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
359 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv4f32_bf16_nxv4bf16:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
362 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v10, v0.t
365 %a = call <vscale x 4 x float> @llvm.riscv.vfwmaccbf16.mask.nxv4f32.bf16(
366 <vscale x 4 x float> %0,
368 <vscale x 4 x bfloat> %2,
369 <vscale x 4 x i1> %3,
370 iXLen 7, iXLen %4, iXLen 0)
372 ret <vscale x 4 x float> %a
375 declare <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.nxv8f32.bf16(
376 <vscale x 8 x float>,
378 <vscale x 8 x bfloat>,
379 iXLen, iXLen, iXLen);
381 define <vscale x 8 x float> @intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
382 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv8f32_bf16_nxv8bf16:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
385 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v12
388 %a = call <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.nxv8f32.bf16(
389 <vscale x 8 x float> %0,
391 <vscale x 8 x bfloat> %2,
392 iXLen 7, iXLen %3, iXLen 0)
394 ret <vscale x 8 x float> %a
397 declare <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.mask.nxv8f32.bf16(
398 <vscale x 8 x float>,
400 <vscale x 8 x bfloat>,
402 iXLen, iXLen, iXLen);
404 define <vscale x 8 x float> @intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16(<vscale x 8 x float> %0, bfloat %1, <vscale x 8 x bfloat> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
405 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv8f32_bf16_nxv8bf16:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
408 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v12, v0.t
411 %a = call <vscale x 8 x float> @llvm.riscv.vfwmaccbf16.mask.nxv8f32.bf16(
412 <vscale x 8 x float> %0,
414 <vscale x 8 x bfloat> %2,
415 <vscale x 8 x i1> %3,
416 iXLen 7, iXLen %4, iXLen 0)
418 ret <vscale x 8 x float> %a
421 declare <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.nxv16f32.bf16(
422 <vscale x 16 x float>,
424 <vscale x 16 x bfloat>,
425 iXLen, iXLen, iXLen);
427 define <vscale x 16 x float> @intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
428 ; CHECK-LABEL: intrinsic_vfwmaccbf16_vf_nxv16f32_bf16_nxv16bf16:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
431 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v16
434 %a = call <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.nxv16f32.bf16(
435 <vscale x 16 x float> %0,
437 <vscale x 16 x bfloat> %2,
438 iXLen 7, iXLen %3, iXLen 0)
440 ret <vscale x 16 x float> %a
443 declare <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.mask.nxv16f32.bf16(
444 <vscale x 16 x float>,
446 <vscale x 16 x bfloat>,
448 iXLen, iXLen, iXLen);
450 define <vscale x 16 x float> @intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16(<vscale x 16 x float> %0, bfloat %1, <vscale x 16 x bfloat> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vfwmaccbf16_mask_vf_nxv16f32_bf16_nxv16bf16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
454 ; CHECK-NEXT: vfwmaccbf16.vf v8, fa0, v16, v0.t
457 %a = call <vscale x 16 x float> @llvm.riscv.vfwmaccbf16.mask.nxv16f32.bf16(
458 <vscale x 16 x float> %0,
460 <vscale x 16 x bfloat> %2,
461 <vscale x 16 x i1> %3,
462 iXLen 7, iXLen %4, iXLen 0)
464 ret <vscale x 16 x float> %a