1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
13 define <vscale x 1 x half> @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: fsrmi a1, 0
17 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
18 ; CHECK-NEXT: vfmul.vv v8, v8, v9
22 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.nxv1f16(
23 <vscale x 1 x half> undef,
24 <vscale x 1 x half> %0,
25 <vscale x 1 x half> %1,
28 ret <vscale x 1 x half> %a
31 declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
38 define <vscale x 1 x half> @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: fsrmi a1, 0
42 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
43 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
47 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16(
48 <vscale x 1 x half> %0,
49 <vscale x 1 x half> %1,
50 <vscale x 1 x half> %2,
52 iXLen 0, iXLen %4, iXLen 1)
54 ret <vscale x 1 x half> %a
57 declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
63 define <vscale x 2 x half> @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: fsrmi a1, 0
67 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
68 ; CHECK-NEXT: vfmul.vv v8, v8, v9
72 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.nxv2f16(
73 <vscale x 2 x half> undef,
74 <vscale x 2 x half> %0,
75 <vscale x 2 x half> %1,
78 ret <vscale x 2 x half> %a
81 declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
88 define <vscale x 2 x half> @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16_nxv2f16:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: fsrmi a1, 0
92 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
93 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
97 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16(
98 <vscale x 2 x half> %0,
99 <vscale x 2 x half> %1,
100 <vscale x 2 x half> %2,
101 <vscale x 2 x i1> %3,
102 iXLen 0, iXLen %4, iXLen 1)
104 ret <vscale x 2 x half> %a
107 declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
113 define <vscale x 4 x half> @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
114 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: fsrmi a1, 0
117 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
118 ; CHECK-NEXT: vfmul.vv v8, v8, v9
119 ; CHECK-NEXT: fsrm a1
122 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.nxv4f16(
123 <vscale x 4 x half> undef,
124 <vscale x 4 x half> %0,
125 <vscale x 4 x half> %1,
128 ret <vscale x 4 x half> %a
131 declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
136 iXLen, iXLen, iXLen);
138 define <vscale x 4 x half> @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16_nxv4f16:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: fsrmi a1, 0
142 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
143 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
144 ; CHECK-NEXT: fsrm a1
147 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16(
148 <vscale x 4 x half> %0,
149 <vscale x 4 x half> %1,
150 <vscale x 4 x half> %2,
151 <vscale x 4 x i1> %3,
152 iXLen 0, iXLen %4, iXLen 1)
154 ret <vscale x 4 x half> %a
157 declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
163 define <vscale x 8 x half> @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
164 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: fsrmi a1, 0
167 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
168 ; CHECK-NEXT: vfmul.vv v8, v8, v10
169 ; CHECK-NEXT: fsrm a1
172 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.nxv8f16(
173 <vscale x 8 x half> undef,
174 <vscale x 8 x half> %0,
175 <vscale x 8 x half> %1,
178 ret <vscale x 8 x half> %a
181 declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
186 iXLen, iXLen, iXLen);
188 define <vscale x 8 x half> @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16_nxv8f16:
190 ; CHECK: # %bb.0: # %entry
191 ; CHECK-NEXT: fsrmi a1, 0
192 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
193 ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t
194 ; CHECK-NEXT: fsrm a1
197 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16(
198 <vscale x 8 x half> %0,
199 <vscale x 8 x half> %1,
200 <vscale x 8 x half> %2,
201 <vscale x 8 x i1> %3,
202 iXLen 0, iXLen %4, iXLen 1)
204 ret <vscale x 8 x half> %a
207 declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
208 <vscale x 16 x half>,
209 <vscale x 16 x half>,
210 <vscale x 16 x half>,
213 define <vscale x 16 x half> @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
214 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16:
215 ; CHECK: # %bb.0: # %entry
216 ; CHECK-NEXT: fsrmi a1, 0
217 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
218 ; CHECK-NEXT: vfmul.vv v8, v8, v12
219 ; CHECK-NEXT: fsrm a1
222 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.nxv16f16(
223 <vscale x 16 x half> undef,
224 <vscale x 16 x half> %0,
225 <vscale x 16 x half> %1,
228 ret <vscale x 16 x half> %a
231 declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
232 <vscale x 16 x half>,
233 <vscale x 16 x half>,
234 <vscale x 16 x half>,
236 iXLen, iXLen, iXLen);
238 define <vscale x 16 x half> @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16_nxv16f16:
240 ; CHECK: # %bb.0: # %entry
241 ; CHECK-NEXT: fsrmi a1, 0
242 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
243 ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t
244 ; CHECK-NEXT: fsrm a1
247 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16(
248 <vscale x 16 x half> %0,
249 <vscale x 16 x half> %1,
250 <vscale x 16 x half> %2,
251 <vscale x 16 x i1> %3,
252 iXLen 0, iXLen %4, iXLen 1)
254 ret <vscale x 16 x half> %a
257 declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
258 <vscale x 32 x half>,
259 <vscale x 32 x half>,
260 <vscale x 32 x half>,
263 define <vscale x 32 x half> @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
264 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16:
265 ; CHECK: # %bb.0: # %entry
266 ; CHECK-NEXT: fsrmi a1, 0
267 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
268 ; CHECK-NEXT: vfmul.vv v8, v8, v16
269 ; CHECK-NEXT: fsrm a1
272 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.nxv32f16(
273 <vscale x 32 x half> undef,
274 <vscale x 32 x half> %0,
275 <vscale x 32 x half> %1,
278 ret <vscale x 32 x half> %a
281 declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
282 <vscale x 32 x half>,
283 <vscale x 32 x half>,
284 <vscale x 32 x half>,
286 iXLen, iXLen, iXLen);
288 define <vscale x 32 x half> @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
289 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16:
290 ; CHECK: # %bb.0: # %entry
291 ; CHECK-NEXT: vl8re16.v v24, (a0)
292 ; CHECK-NEXT: fsrmi a0, 0
293 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
294 ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
295 ; CHECK-NEXT: fsrm a0
298 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16(
299 <vscale x 32 x half> %0,
300 <vscale x 32 x half> %1,
301 <vscale x 32 x half> %2,
302 <vscale x 32 x i1> %3,
303 iXLen 0, iXLen %4, iXLen 1)
305 ret <vscale x 32 x half> %a
308 declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
309 <vscale x 1 x float>,
310 <vscale x 1 x float>,
311 <vscale x 1 x float>,
314 define <vscale x 1 x float> @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
315 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32:
316 ; CHECK: # %bb.0: # %entry
317 ; CHECK-NEXT: fsrmi a1, 0
318 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
319 ; CHECK-NEXT: vfmul.vv v8, v8, v9
320 ; CHECK-NEXT: fsrm a1
323 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32(
324 <vscale x 1 x float> undef,
325 <vscale x 1 x float> %0,
326 <vscale x 1 x float> %1,
329 ret <vscale x 1 x float> %a
332 declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
333 <vscale x 1 x float>,
334 <vscale x 1 x float>,
335 <vscale x 1 x float>,
337 iXLen, iXLen, iXLen);
339 define <vscale x 1 x float> @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
340 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32_nxv1f32:
341 ; CHECK: # %bb.0: # %entry
342 ; CHECK-NEXT: fsrmi a1, 0
343 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
344 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
345 ; CHECK-NEXT: fsrm a1
348 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32(
349 <vscale x 1 x float> %0,
350 <vscale x 1 x float> %1,
351 <vscale x 1 x float> %2,
352 <vscale x 1 x i1> %3,
353 iXLen 0, iXLen %4, iXLen 1)
355 ret <vscale x 1 x float> %a
358 declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
359 <vscale x 2 x float>,
360 <vscale x 2 x float>,
361 <vscale x 2 x float>,
364 define <vscale x 2 x float> @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
365 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32:
366 ; CHECK: # %bb.0: # %entry
367 ; CHECK-NEXT: fsrmi a1, 0
368 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
369 ; CHECK-NEXT: vfmul.vv v8, v8, v9
370 ; CHECK-NEXT: fsrm a1
373 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32(
374 <vscale x 2 x float> undef,
375 <vscale x 2 x float> %0,
376 <vscale x 2 x float> %1,
379 ret <vscale x 2 x float> %a
382 declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
383 <vscale x 2 x float>,
384 <vscale x 2 x float>,
385 <vscale x 2 x float>,
387 iXLen, iXLen, iXLen);
389 define <vscale x 2 x float> @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
390 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32_nxv2f32:
391 ; CHECK: # %bb.0: # %entry
392 ; CHECK-NEXT: fsrmi a1, 0
393 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
394 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
395 ; CHECK-NEXT: fsrm a1
398 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32(
399 <vscale x 2 x float> %0,
400 <vscale x 2 x float> %1,
401 <vscale x 2 x float> %2,
402 <vscale x 2 x i1> %3,
403 iXLen 0, iXLen %4, iXLen 1)
405 ret <vscale x 2 x float> %a
408 declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
409 <vscale x 4 x float>,
410 <vscale x 4 x float>,
411 <vscale x 4 x float>,
414 define <vscale x 4 x float> @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
415 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32:
416 ; CHECK: # %bb.0: # %entry
417 ; CHECK-NEXT: fsrmi a1, 0
418 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
419 ; CHECK-NEXT: vfmul.vv v8, v8, v10
420 ; CHECK-NEXT: fsrm a1
423 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32(
424 <vscale x 4 x float> undef,
425 <vscale x 4 x float> %0,
426 <vscale x 4 x float> %1,
429 ret <vscale x 4 x float> %a
432 declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
433 <vscale x 4 x float>,
434 <vscale x 4 x float>,
435 <vscale x 4 x float>,
437 iXLen, iXLen, iXLen);
439 define <vscale x 4 x float> @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
440 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32_nxv4f32:
441 ; CHECK: # %bb.0: # %entry
442 ; CHECK-NEXT: fsrmi a1, 0
443 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
444 ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t
445 ; CHECK-NEXT: fsrm a1
448 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32(
449 <vscale x 4 x float> %0,
450 <vscale x 4 x float> %1,
451 <vscale x 4 x float> %2,
452 <vscale x 4 x i1> %3,
453 iXLen 0, iXLen %4, iXLen 1)
455 ret <vscale x 4 x float> %a
458 declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
459 <vscale x 8 x float>,
460 <vscale x 8 x float>,
461 <vscale x 8 x float>,
464 define <vscale x 8 x float> @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
465 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: fsrmi a1, 0
468 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
469 ; CHECK-NEXT: vfmul.vv v8, v8, v12
470 ; CHECK-NEXT: fsrm a1
473 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32(
474 <vscale x 8 x float> undef,
475 <vscale x 8 x float> %0,
476 <vscale x 8 x float> %1,
479 ret <vscale x 8 x float> %a
482 declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
483 <vscale x 8 x float>,
484 <vscale x 8 x float>,
485 <vscale x 8 x float>,
487 iXLen, iXLen, iXLen);
489 define <vscale x 8 x float> @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
490 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32_nxv8f32:
491 ; CHECK: # %bb.0: # %entry
492 ; CHECK-NEXT: fsrmi a1, 0
493 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
494 ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t
495 ; CHECK-NEXT: fsrm a1
498 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32(
499 <vscale x 8 x float> %0,
500 <vscale x 8 x float> %1,
501 <vscale x 8 x float> %2,
502 <vscale x 8 x i1> %3,
503 iXLen 0, iXLen %4, iXLen 1)
505 ret <vscale x 8 x float> %a
508 declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
509 <vscale x 16 x float>,
510 <vscale x 16 x float>,
511 <vscale x 16 x float>,
514 define <vscale x 16 x float> @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
515 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32:
516 ; CHECK: # %bb.0: # %entry
517 ; CHECK-NEXT: fsrmi a1, 0
518 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
519 ; CHECK-NEXT: vfmul.vv v8, v8, v16
520 ; CHECK-NEXT: fsrm a1
523 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32(
524 <vscale x 16 x float> undef,
525 <vscale x 16 x float> %0,
526 <vscale x 16 x float> %1,
529 ret <vscale x 16 x float> %a
532 declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
533 <vscale x 16 x float>,
534 <vscale x 16 x float>,
535 <vscale x 16 x float>,
537 iXLen, iXLen, iXLen);
539 define <vscale x 16 x float> @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
540 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32:
541 ; CHECK: # %bb.0: # %entry
542 ; CHECK-NEXT: vl8re32.v v24, (a0)
543 ; CHECK-NEXT: fsrmi a0, 0
544 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
545 ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
546 ; CHECK-NEXT: fsrm a0
549 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32(
550 <vscale x 16 x float> %0,
551 <vscale x 16 x float> %1,
552 <vscale x 16 x float> %2,
553 <vscale x 16 x i1> %3,
554 iXLen 0, iXLen %4, iXLen 1)
556 ret <vscale x 16 x float> %a
559 declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
560 <vscale x 1 x double>,
561 <vscale x 1 x double>,
562 <vscale x 1 x double>,
565 define <vscale x 1 x double> @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
566 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: fsrmi a1, 0
569 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
570 ; CHECK-NEXT: vfmul.vv v8, v8, v9
571 ; CHECK-NEXT: fsrm a1
574 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64(
575 <vscale x 1 x double> undef,
576 <vscale x 1 x double> %0,
577 <vscale x 1 x double> %1,
580 ret <vscale x 1 x double> %a
583 declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
584 <vscale x 1 x double>,
585 <vscale x 1 x double>,
586 <vscale x 1 x double>,
588 iXLen, iXLen, iXLen);
590 define <vscale x 1 x double> @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
591 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64_nxv1f64:
592 ; CHECK: # %bb.0: # %entry
593 ; CHECK-NEXT: fsrmi a1, 0
594 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
595 ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t
596 ; CHECK-NEXT: fsrm a1
599 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64(
600 <vscale x 1 x double> %0,
601 <vscale x 1 x double> %1,
602 <vscale x 1 x double> %2,
603 <vscale x 1 x i1> %3,
604 iXLen 0, iXLen %4, iXLen 1)
606 ret <vscale x 1 x double> %a
609 declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
610 <vscale x 2 x double>,
611 <vscale x 2 x double>,
612 <vscale x 2 x double>,
615 define <vscale x 2 x double> @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
616 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64:
617 ; CHECK: # %bb.0: # %entry
618 ; CHECK-NEXT: fsrmi a1, 0
619 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
620 ; CHECK-NEXT: vfmul.vv v8, v8, v10
621 ; CHECK-NEXT: fsrm a1
624 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64(
625 <vscale x 2 x double> undef,
626 <vscale x 2 x double> %0,
627 <vscale x 2 x double> %1,
630 ret <vscale x 2 x double> %a
633 declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
634 <vscale x 2 x double>,
635 <vscale x 2 x double>,
636 <vscale x 2 x double>,
638 iXLen, iXLen, iXLen);
640 define <vscale x 2 x double> @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
641 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64_nxv2f64:
642 ; CHECK: # %bb.0: # %entry
643 ; CHECK-NEXT: fsrmi a1, 0
644 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
645 ; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t
646 ; CHECK-NEXT: fsrm a1
649 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64(
650 <vscale x 2 x double> %0,
651 <vscale x 2 x double> %1,
652 <vscale x 2 x double> %2,
653 <vscale x 2 x i1> %3,
654 iXLen 0, iXLen %4, iXLen 1)
656 ret <vscale x 2 x double> %a
659 declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
660 <vscale x 4 x double>,
661 <vscale x 4 x double>,
662 <vscale x 4 x double>,
665 define <vscale x 4 x double> @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
666 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: fsrmi a1, 0
669 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
670 ; CHECK-NEXT: vfmul.vv v8, v8, v12
671 ; CHECK-NEXT: fsrm a1
674 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64(
675 <vscale x 4 x double> undef,
676 <vscale x 4 x double> %0,
677 <vscale x 4 x double> %1,
680 ret <vscale x 4 x double> %a
683 declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
684 <vscale x 4 x double>,
685 <vscale x 4 x double>,
686 <vscale x 4 x double>,
688 iXLen, iXLen, iXLen);
690 define <vscale x 4 x double> @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
691 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64_nxv4f64:
692 ; CHECK: # %bb.0: # %entry
693 ; CHECK-NEXT: fsrmi a1, 0
694 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
695 ; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t
696 ; CHECK-NEXT: fsrm a1
699 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64(
700 <vscale x 4 x double> %0,
701 <vscale x 4 x double> %1,
702 <vscale x 4 x double> %2,
703 <vscale x 4 x i1> %3,
704 iXLen 0, iXLen %4, iXLen 1)
706 ret <vscale x 4 x double> %a
709 declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
710 <vscale x 8 x double>,
711 <vscale x 8 x double>,
712 <vscale x 8 x double>,
715 define <vscale x 8 x double> @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
716 ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64:
717 ; CHECK: # %bb.0: # %entry
718 ; CHECK-NEXT: fsrmi a1, 0
719 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
720 ; CHECK-NEXT: vfmul.vv v8, v8, v16
721 ; CHECK-NEXT: fsrm a1
724 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64(
725 <vscale x 8 x double> undef,
726 <vscale x 8 x double> %0,
727 <vscale x 8 x double> %1,
730 ret <vscale x 8 x double> %a
733 declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
734 <vscale x 8 x double>,
735 <vscale x 8 x double>,
736 <vscale x 8 x double>,
738 iXLen, iXLen, iXLen);
740 define <vscale x 8 x double> @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
741 ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64:
742 ; CHECK: # %bb.0: # %entry
743 ; CHECK-NEXT: vl8re64.v v24, (a0)
744 ; CHECK-NEXT: fsrmi a0, 0
745 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
746 ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t
747 ; CHECK-NEXT: fsrm a0
750 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64(
751 <vscale x 8 x double> %0,
752 <vscale x 8 x double> %1,
753 <vscale x 8 x double> %2,
754 <vscale x 8 x i1> %3,
755 iXLen 0, iXLen %4, iXLen 1)
757 ret <vscale x 8 x double> %a
760 declare <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
766 define <vscale x 1 x half> @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
767 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16:
768 ; CHECK: # %bb.0: # %entry
769 ; CHECK-NEXT: fsrmi a1, 0
770 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
771 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
772 ; CHECK-NEXT: fsrm a1
775 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.nxv1f16.f16(
776 <vscale x 1 x half> undef,
777 <vscale x 1 x half> %0,
781 ret <vscale x 1 x half> %a
784 declare <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
789 iXLen, iXLen, iXLen);
791 define <vscale x 1 x half> @intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
792 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_nxv1f16_f16:
793 ; CHECK: # %bb.0: # %entry
794 ; CHECK-NEXT: fsrmi a1, 0
795 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
796 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
797 ; CHECK-NEXT: fsrm a1
800 %a = call <vscale x 1 x half> @llvm.riscv.vfmul.mask.nxv1f16.f16(
801 <vscale x 1 x half> %0,
802 <vscale x 1 x half> %1,
804 <vscale x 1 x i1> %3,
805 iXLen 0, iXLen %4, iXLen 1)
807 ret <vscale x 1 x half> %a
810 declare <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
816 define <vscale x 2 x half> @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
817 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16:
818 ; CHECK: # %bb.0: # %entry
819 ; CHECK-NEXT: fsrmi a1, 0
820 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
821 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
822 ; CHECK-NEXT: fsrm a1
825 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.nxv2f16.f16(
826 <vscale x 2 x half> undef,
827 <vscale x 2 x half> %0,
831 ret <vscale x 2 x half> %a
834 declare <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
839 iXLen, iXLen, iXLen);
841 define <vscale x 2 x half> @intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
842 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_nxv2f16_f16:
843 ; CHECK: # %bb.0: # %entry
844 ; CHECK-NEXT: fsrmi a1, 0
845 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
846 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
847 ; CHECK-NEXT: fsrm a1
850 %a = call <vscale x 2 x half> @llvm.riscv.vfmul.mask.nxv2f16.f16(
851 <vscale x 2 x half> %0,
852 <vscale x 2 x half> %1,
854 <vscale x 2 x i1> %3,
855 iXLen 0, iXLen %4, iXLen 1)
857 ret <vscale x 2 x half> %a
860 declare <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
866 define <vscale x 4 x half> @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
867 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16:
868 ; CHECK: # %bb.0: # %entry
869 ; CHECK-NEXT: fsrmi a1, 0
870 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
871 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
872 ; CHECK-NEXT: fsrm a1
875 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.nxv4f16.f16(
876 <vscale x 4 x half> undef,
877 <vscale x 4 x half> %0,
881 ret <vscale x 4 x half> %a
884 declare <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
889 iXLen, iXLen, iXLen);
891 define <vscale x 4 x half> @intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
892 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_nxv4f16_f16:
893 ; CHECK: # %bb.0: # %entry
894 ; CHECK-NEXT: fsrmi a1, 0
895 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
896 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
897 ; CHECK-NEXT: fsrm a1
900 %a = call <vscale x 4 x half> @llvm.riscv.vfmul.mask.nxv4f16.f16(
901 <vscale x 4 x half> %0,
902 <vscale x 4 x half> %1,
904 <vscale x 4 x i1> %3,
905 iXLen 0, iXLen %4, iXLen 1)
907 ret <vscale x 4 x half> %a
910 declare <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
916 define <vscale x 8 x half> @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
917 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16:
918 ; CHECK: # %bb.0: # %entry
919 ; CHECK-NEXT: fsrmi a1, 0
920 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
921 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
922 ; CHECK-NEXT: fsrm a1
925 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.nxv8f16.f16(
926 <vscale x 8 x half> undef,
927 <vscale x 8 x half> %0,
931 ret <vscale x 8 x half> %a
934 declare <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
939 iXLen, iXLen, iXLen);
941 define <vscale x 8 x half> @intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
942 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_nxv8f16_f16:
943 ; CHECK: # %bb.0: # %entry
944 ; CHECK-NEXT: fsrmi a1, 0
945 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
946 ; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t
947 ; CHECK-NEXT: fsrm a1
950 %a = call <vscale x 8 x half> @llvm.riscv.vfmul.mask.nxv8f16.f16(
951 <vscale x 8 x half> %0,
952 <vscale x 8 x half> %1,
954 <vscale x 8 x i1> %3,
955 iXLen 0, iXLen %4, iXLen 1)
957 ret <vscale x 8 x half> %a
960 declare <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
961 <vscale x 16 x half>,
962 <vscale x 16 x half>,
966 define <vscale x 16 x half> @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
967 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16:
968 ; CHECK: # %bb.0: # %entry
969 ; CHECK-NEXT: fsrmi a1, 0
970 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
971 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
972 ; CHECK-NEXT: fsrm a1
975 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.nxv16f16.f16(
976 <vscale x 16 x half> undef,
977 <vscale x 16 x half> %0,
981 ret <vscale x 16 x half> %a
984 declare <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
985 <vscale x 16 x half>,
986 <vscale x 16 x half>,
989 iXLen, iXLen, iXLen);
991 define <vscale x 16 x half> @intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
992 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_nxv16f16_f16:
993 ; CHECK: # %bb.0: # %entry
994 ; CHECK-NEXT: fsrmi a1, 0
995 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
996 ; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t
997 ; CHECK-NEXT: fsrm a1
1000 %a = call <vscale x 16 x half> @llvm.riscv.vfmul.mask.nxv16f16.f16(
1001 <vscale x 16 x half> %0,
1002 <vscale x 16 x half> %1,
1004 <vscale x 16 x i1> %3,
1005 iXLen 0, iXLen %4, iXLen 1)
1007 ret <vscale x 16 x half> %a
1010 declare <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
1011 <vscale x 32 x half>,
1012 <vscale x 32 x half>,
1016 define <vscale x 32 x half> @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
1017 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16:
1018 ; CHECK: # %bb.0: # %entry
1019 ; CHECK-NEXT: fsrmi a1, 0
1020 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1021 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1022 ; CHECK-NEXT: fsrm a1
1025 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.nxv32f16.f16(
1026 <vscale x 32 x half> undef,
1027 <vscale x 32 x half> %0,
1031 ret <vscale x 32 x half> %a
1034 declare <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
1035 <vscale x 32 x half>,
1036 <vscale x 32 x half>,
1039 iXLen, iXLen, iXLen);
1041 define <vscale x 32 x half> @intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1042 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_nxv32f16_f16:
1043 ; CHECK: # %bb.0: # %entry
1044 ; CHECK-NEXT: fsrmi a1, 0
1045 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
1046 ; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t
1047 ; CHECK-NEXT: fsrm a1
1050 %a = call <vscale x 32 x half> @llvm.riscv.vfmul.mask.nxv32f16.f16(
1051 <vscale x 32 x half> %0,
1052 <vscale x 32 x half> %1,
1054 <vscale x 32 x i1> %3,
1055 iXLen 0, iXLen %4, iXLen 1)
1057 ret <vscale x 32 x half> %a
1060 declare <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
1061 <vscale x 1 x float>,
1062 <vscale x 1 x float>,
1066 define <vscale x 1 x float> @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
1067 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32:
1068 ; CHECK: # %bb.0: # %entry
1069 ; CHECK-NEXT: fsrmi a1, 0
1070 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1071 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1072 ; CHECK-NEXT: fsrm a1
1075 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32(
1076 <vscale x 1 x float> undef,
1077 <vscale x 1 x float> %0,
1081 ret <vscale x 1 x float> %a
1084 declare <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
1085 <vscale x 1 x float>,
1086 <vscale x 1 x float>,
1089 iXLen, iXLen, iXLen);
1091 define <vscale x 1 x float> @intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1092 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_nxv1f32_f32:
1093 ; CHECK: # %bb.0: # %entry
1094 ; CHECK-NEXT: fsrmi a1, 0
1095 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1096 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
1097 ; CHECK-NEXT: fsrm a1
1100 %a = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32(
1101 <vscale x 1 x float> %0,
1102 <vscale x 1 x float> %1,
1104 <vscale x 1 x i1> %3,
1105 iXLen 0, iXLen %4, iXLen 1)
1107 ret <vscale x 1 x float> %a
1110 declare <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
1111 <vscale x 2 x float>,
1112 <vscale x 2 x float>,
1116 define <vscale x 2 x float> @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1117 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32:
1118 ; CHECK: # %bb.0: # %entry
1119 ; CHECK-NEXT: fsrmi a1, 0
1120 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1121 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1122 ; CHECK-NEXT: fsrm a1
1125 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32(
1126 <vscale x 2 x float> undef,
1127 <vscale x 2 x float> %0,
1131 ret <vscale x 2 x float> %a
1134 declare <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
1135 <vscale x 2 x float>,
1136 <vscale x 2 x float>,
1139 iXLen, iXLen, iXLen);
1141 define <vscale x 2 x float> @intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1142 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_nxv2f32_f32:
1143 ; CHECK: # %bb.0: # %entry
1144 ; CHECK-NEXT: fsrmi a1, 0
1145 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1146 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
1147 ; CHECK-NEXT: fsrm a1
1150 %a = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32(
1151 <vscale x 2 x float> %0,
1152 <vscale x 2 x float> %1,
1154 <vscale x 2 x i1> %3,
1155 iXLen 0, iXLen %4, iXLen 1)
1157 ret <vscale x 2 x float> %a
1160 declare <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
1161 <vscale x 4 x float>,
1162 <vscale x 4 x float>,
1166 define <vscale x 4 x float> @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1167 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32:
1168 ; CHECK: # %bb.0: # %entry
1169 ; CHECK-NEXT: fsrmi a1, 0
1170 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1171 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1172 ; CHECK-NEXT: fsrm a1
1175 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32(
1176 <vscale x 4 x float> undef,
1177 <vscale x 4 x float> %0,
1181 ret <vscale x 4 x float> %a
1184 declare <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
1185 <vscale x 4 x float>,
1186 <vscale x 4 x float>,
1189 iXLen, iXLen, iXLen);
1191 define <vscale x 4 x float> @intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1192 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_nxv4f32_f32:
1193 ; CHECK: # %bb.0: # %entry
1194 ; CHECK-NEXT: fsrmi a1, 0
1195 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1196 ; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t
1197 ; CHECK-NEXT: fsrm a1
1200 %a = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32(
1201 <vscale x 4 x float> %0,
1202 <vscale x 4 x float> %1,
1204 <vscale x 4 x i1> %3,
1205 iXLen 0, iXLen %4, iXLen 1)
1207 ret <vscale x 4 x float> %a
1210 declare <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
1211 <vscale x 8 x float>,
1212 <vscale x 8 x float>,
1216 define <vscale x 8 x float> @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1217 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: fsrmi a1, 0
1220 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1221 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1222 ; CHECK-NEXT: fsrm a1
1225 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32(
1226 <vscale x 8 x float> undef,
1227 <vscale x 8 x float> %0,
1231 ret <vscale x 8 x float> %a
1234 declare <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
1235 <vscale x 8 x float>,
1236 <vscale x 8 x float>,
1239 iXLen, iXLen, iXLen);
1241 define <vscale x 8 x float> @intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1242 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_nxv8f32_f32:
1243 ; CHECK: # %bb.0: # %entry
1244 ; CHECK-NEXT: fsrmi a1, 0
1245 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1246 ; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t
1247 ; CHECK-NEXT: fsrm a1
1250 %a = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32(
1251 <vscale x 8 x float> %0,
1252 <vscale x 8 x float> %1,
1254 <vscale x 8 x i1> %3,
1255 iXLen 0, iXLen %4, iXLen 1)
1257 ret <vscale x 8 x float> %a
1260 declare <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
1261 <vscale x 16 x float>,
1262 <vscale x 16 x float>,
1266 define <vscale x 16 x float> @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1267 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32:
1268 ; CHECK: # %bb.0: # %entry
1269 ; CHECK-NEXT: fsrmi a1, 0
1270 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1271 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1272 ; CHECK-NEXT: fsrm a1
1275 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32(
1276 <vscale x 16 x float> undef,
1277 <vscale x 16 x float> %0,
1281 ret <vscale x 16 x float> %a
1284 declare <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
1285 <vscale x 16 x float>,
1286 <vscale x 16 x float>,
1289 iXLen, iXLen, iXLen);
1291 define <vscale x 16 x float> @intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1292 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_nxv16f32_f32:
1293 ; CHECK: # %bb.0: # %entry
1294 ; CHECK-NEXT: fsrmi a1, 0
1295 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
1296 ; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t
1297 ; CHECK-NEXT: fsrm a1
1300 %a = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32(
1301 <vscale x 16 x float> %0,
1302 <vscale x 16 x float> %1,
1304 <vscale x 16 x i1> %3,
1305 iXLen 0, iXLen %4, iXLen 1)
1307 ret <vscale x 16 x float> %a
1310 declare <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
1311 <vscale x 1 x double>,
1312 <vscale x 1 x double>,
1316 define <vscale x 1 x double> @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1317 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64:
1318 ; CHECK: # %bb.0: # %entry
1319 ; CHECK-NEXT: fsrmi a1, 0
1320 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1321 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1322 ; CHECK-NEXT: fsrm a1
1325 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64(
1326 <vscale x 1 x double> undef,
1327 <vscale x 1 x double> %0,
1331 ret <vscale x 1 x double> %a
1334 declare <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
1335 <vscale x 1 x double>,
1336 <vscale x 1 x double>,
1339 iXLen, iXLen, iXLen);
1341 define <vscale x 1 x double> @intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1342 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_nxv1f64_f64:
1343 ; CHECK: # %bb.0: # %entry
1344 ; CHECK-NEXT: fsrmi a1, 0
1345 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1346 ; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t
1347 ; CHECK-NEXT: fsrm a1
1350 %a = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64(
1351 <vscale x 1 x double> %0,
1352 <vscale x 1 x double> %1,
1354 <vscale x 1 x i1> %3,
1355 iXLen 0, iXLen %4, iXLen 1)
1357 ret <vscale x 1 x double> %a
1360 declare <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
1361 <vscale x 2 x double>,
1362 <vscale x 2 x double>,
1366 define <vscale x 2 x double> @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1367 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64:
1368 ; CHECK: # %bb.0: # %entry
1369 ; CHECK-NEXT: fsrmi a1, 0
1370 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1371 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1372 ; CHECK-NEXT: fsrm a1
1375 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64(
1376 <vscale x 2 x double> undef,
1377 <vscale x 2 x double> %0,
1381 ret <vscale x 2 x double> %a
1384 declare <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
1385 <vscale x 2 x double>,
1386 <vscale x 2 x double>,
1389 iXLen, iXLen, iXLen);
1391 define <vscale x 2 x double> @intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1392 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_nxv2f64_f64:
1393 ; CHECK: # %bb.0: # %entry
1394 ; CHECK-NEXT: fsrmi a1, 0
1395 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1396 ; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t
1397 ; CHECK-NEXT: fsrm a1
1400 %a = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64(
1401 <vscale x 2 x double> %0,
1402 <vscale x 2 x double> %1,
1404 <vscale x 2 x i1> %3,
1405 iXLen 0, iXLen %4, iXLen 1)
1407 ret <vscale x 2 x double> %a
1410 declare <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
1411 <vscale x 4 x double>,
1412 <vscale x 4 x double>,
1416 define <vscale x 4 x double> @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1417 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64:
1418 ; CHECK: # %bb.0: # %entry
1419 ; CHECK-NEXT: fsrmi a1, 0
1420 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1421 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1422 ; CHECK-NEXT: fsrm a1
1425 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64(
1426 <vscale x 4 x double> undef,
1427 <vscale x 4 x double> %0,
1431 ret <vscale x 4 x double> %a
1434 declare <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
1435 <vscale x 4 x double>,
1436 <vscale x 4 x double>,
1439 iXLen, iXLen, iXLen);
1441 define <vscale x 4 x double> @intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1442 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_nxv4f64_f64:
1443 ; CHECK: # %bb.0: # %entry
1444 ; CHECK-NEXT: fsrmi a1, 0
1445 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1446 ; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t
1447 ; CHECK-NEXT: fsrm a1
1450 %a = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64(
1451 <vscale x 4 x double> %0,
1452 <vscale x 4 x double> %1,
1454 <vscale x 4 x i1> %3,
1455 iXLen 0, iXLen %4, iXLen 1)
1457 ret <vscale x 4 x double> %a
1460 declare <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
1461 <vscale x 8 x double>,
1462 <vscale x 8 x double>,
1466 define <vscale x 8 x double> @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1467 ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: fsrmi a1, 0
1470 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1471 ; CHECK-NEXT: vfmul.vf v8, v8, fa0
1472 ; CHECK-NEXT: fsrm a1
1475 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64(
1476 <vscale x 8 x double> undef,
1477 <vscale x 8 x double> %0,
1481 ret <vscale x 8 x double> %a
1484 declare <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
1485 <vscale x 8 x double>,
1486 <vscale x 8 x double>,
1489 iXLen, iXLen, iXLen);
1491 define <vscale x 8 x double> @intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1492 ; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_nxv8f64_f64:
1493 ; CHECK: # %bb.0: # %entry
1494 ; CHECK-NEXT: fsrmi a1, 0
1495 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
1496 ; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t
1497 ; CHECK-NEXT: fsrm a1
1500 %a = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64(
1501 <vscale x 8 x double> %0,
1502 <vscale x 8 x double> %1,
1504 <vscale x 8 x i1> %3,
1505 iXLen 0, iXLen %4, iXLen 1)
1507 ret <vscale x 8 x double> %a