1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64d \
7 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
8 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64d \
9 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
11 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
17 define <vscale x 1 x i8> @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
18 ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8:
19 ; CHECK: # %bb.0: # %entry
20 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
21 ; CHECK-NEXT: vmul.vv v8, v8, v9
24 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8(
25 <vscale x 1 x i8> undef,
30 ret <vscale x 1 x i8> %a
33 declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
40 define <vscale x 1 x i8> @intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
41 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
42 ; CHECK: # %bb.0: # %entry
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
44 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
47 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8(
54 ret <vscale x 1 x i8> %a
57 declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
63 define <vscale x 2 x i8> @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
67 ; CHECK-NEXT: vmul.vv v8, v8, v9
70 %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8(
71 <vscale x 2 x i8> undef,
76 ret <vscale x 2 x i8> %a
79 declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
86 define <vscale x 2 x i8> @intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
88 ; CHECK: # %bb.0: # %entry
89 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
90 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
93 %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8(
100 ret <vscale x 2 x i8> %a
103 declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
109 define <vscale x 4 x i8> @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110 ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8:
111 ; CHECK: # %bb.0: # %entry
112 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
113 ; CHECK-NEXT: vmul.vv v8, v8, v9
116 %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8(
117 <vscale x 4 x i8> undef,
118 <vscale x 4 x i8> %0,
119 <vscale x 4 x i8> %1,
122 ret <vscale x 4 x i8> %a
125 declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
132 define <vscale x 4 x i8> @intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
133 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
136 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
139 %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8(
140 <vscale x 4 x i8> %0,
141 <vscale x 4 x i8> %1,
142 <vscale x 4 x i8> %2,
143 <vscale x 4 x i1> %3,
146 ret <vscale x 4 x i8> %a
149 declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
155 define <vscale x 8 x i8> @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
156 ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
159 ; CHECK-NEXT: vmul.vv v8, v8, v9
162 %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8(
163 <vscale x 8 x i8> undef,
164 <vscale x 8 x i8> %0,
165 <vscale x 8 x i8> %1,
168 ret <vscale x 8 x i8> %a
171 declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
178 define <vscale x 8 x i8> @intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
185 %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8(
186 <vscale x 8 x i8> %0,
187 <vscale x 8 x i8> %1,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
201 define <vscale x 16 x i8> @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vmul.vv v8, v8, v10
208 %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8(
209 <vscale x 16 x i8> undef,
210 <vscale x 16 x i8> %0,
211 <vscale x 16 x i8> %1,
214 ret <vscale x 16 x i8> %a
217 declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
224 define <vscale x 16 x i8> @intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
225 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
226 ; CHECK: # %bb.0: # %entry
227 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
228 ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
231 %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8(
232 <vscale x 16 x i8> %0,
233 <vscale x 16 x i8> %1,
234 <vscale x 16 x i8> %2,
235 <vscale x 16 x i1> %3,
238 ret <vscale x 16 x i8> %a
241 declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
247 define <vscale x 32 x i8> @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
248 ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8:
249 ; CHECK: # %bb.0: # %entry
250 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
251 ; CHECK-NEXT: vmul.vv v8, v8, v12
254 %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8(
255 <vscale x 32 x i8> undef,
256 <vscale x 32 x i8> %0,
257 <vscale x 32 x i8> %1,
260 ret <vscale x 32 x i8> %a
263 declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
270 define <vscale x 32 x i8> @intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
271 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
274 ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
277 %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8(
278 <vscale x 32 x i8> %0,
279 <vscale x 32 x i8> %1,
280 <vscale x 32 x i8> %2,
281 <vscale x 32 x i1> %3,
284 ret <vscale x 32 x i8> %a
287 declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
293 define <vscale x 64 x i8> @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
294 ; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
297 ; CHECK-NEXT: vmul.vv v8, v8, v16
300 %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8(
301 <vscale x 64 x i8> undef,
302 <vscale x 64 x i8> %0,
303 <vscale x 64 x i8> %1,
306 ret <vscale x 64 x i8> %a
309 declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
316 define <vscale x 64 x i8> @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
317 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
318 ; CHECK: # %bb.0: # %entry
319 ; CHECK-NEXT: vl8r.v v24, (a0)
320 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
321 ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
324 %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8(
325 <vscale x 64 x i8> %0,
326 <vscale x 64 x i8> %1,
327 <vscale x 64 x i8> %2,
328 <vscale x 64 x i1> %3,
331 ret <vscale x 64 x i8> %a
334 declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
340 define <vscale x 1 x i16> @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
341 ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16:
342 ; CHECK: # %bb.0: # %entry
343 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
344 ; CHECK-NEXT: vmul.vv v8, v8, v9
347 %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16(
348 <vscale x 1 x i16> undef,
349 <vscale x 1 x i16> %0,
350 <vscale x 1 x i16> %1,
353 ret <vscale x 1 x i16> %a
356 declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
363 define <vscale x 1 x i16> @intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
364 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
365 ; CHECK: # %bb.0: # %entry
366 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
367 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
370 %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16(
371 <vscale x 1 x i16> %0,
372 <vscale x 1 x i16> %1,
373 <vscale x 1 x i16> %2,
374 <vscale x 1 x i1> %3,
377 ret <vscale x 1 x i16> %a
380 declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
386 define <vscale x 2 x i16> @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
387 ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16:
388 ; CHECK: # %bb.0: # %entry
389 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
390 ; CHECK-NEXT: vmul.vv v8, v8, v9
393 %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16(
394 <vscale x 2 x i16> undef,
395 <vscale x 2 x i16> %0,
396 <vscale x 2 x i16> %1,
399 ret <vscale x 2 x i16> %a
402 declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
409 define <vscale x 2 x i16> @intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
410 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
411 ; CHECK: # %bb.0: # %entry
412 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
413 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
416 %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16(
417 <vscale x 2 x i16> %0,
418 <vscale x 2 x i16> %1,
419 <vscale x 2 x i16> %2,
420 <vscale x 2 x i1> %3,
423 ret <vscale x 2 x i16> %a
426 declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
432 define <vscale x 4 x i16> @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
433 ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16:
434 ; CHECK: # %bb.0: # %entry
435 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
436 ; CHECK-NEXT: vmul.vv v8, v8, v9
439 %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16(
440 <vscale x 4 x i16> undef,
441 <vscale x 4 x i16> %0,
442 <vscale x 4 x i16> %1,
445 ret <vscale x 4 x i16> %a
448 declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
455 define <vscale x 4 x i16> @intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
456 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
457 ; CHECK: # %bb.0: # %entry
458 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
459 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
462 %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16(
463 <vscale x 4 x i16> %0,
464 <vscale x 4 x i16> %1,
465 <vscale x 4 x i16> %2,
466 <vscale x 4 x i1> %3,
469 ret <vscale x 4 x i16> %a
472 declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
478 define <vscale x 8 x i16> @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
479 ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16:
480 ; CHECK: # %bb.0: # %entry
481 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
482 ; CHECK-NEXT: vmul.vv v8, v8, v10
485 %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16(
486 <vscale x 8 x i16> undef,
487 <vscale x 8 x i16> %0,
488 <vscale x 8 x i16> %1,
491 ret <vscale x 8 x i16> %a
494 declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
501 define <vscale x 8 x i16> @intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
502 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
503 ; CHECK: # %bb.0: # %entry
504 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
505 ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
508 %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16(
509 <vscale x 8 x i16> %0,
510 <vscale x 8 x i16> %1,
511 <vscale x 8 x i16> %2,
512 <vscale x 8 x i1> %3,
515 ret <vscale x 8 x i16> %a
518 declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
524 define <vscale x 16 x i16> @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
525 ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16:
526 ; CHECK: # %bb.0: # %entry
527 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
528 ; CHECK-NEXT: vmul.vv v8, v8, v12
531 %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16(
532 <vscale x 16 x i16> undef,
533 <vscale x 16 x i16> %0,
534 <vscale x 16 x i16> %1,
537 ret <vscale x 16 x i16> %a
540 declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
547 define <vscale x 16 x i16> @intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
548 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
549 ; CHECK: # %bb.0: # %entry
550 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
551 ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
554 %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16(
555 <vscale x 16 x i16> %0,
556 <vscale x 16 x i16> %1,
557 <vscale x 16 x i16> %2,
558 <vscale x 16 x i1> %3,
561 ret <vscale x 16 x i16> %a
564 declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
570 define <vscale x 32 x i16> @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
571 ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
574 ; CHECK-NEXT: vmul.vv v8, v8, v16
577 %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16(
578 <vscale x 32 x i16> undef,
579 <vscale x 32 x i16> %0,
580 <vscale x 32 x i16> %1,
583 ret <vscale x 32 x i16> %a
586 declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
593 define <vscale x 32 x i16> @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
594 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
595 ; CHECK: # %bb.0: # %entry
596 ; CHECK-NEXT: vl8re16.v v24, (a0)
597 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
598 ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
601 %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16(
602 <vscale x 32 x i16> %0,
603 <vscale x 32 x i16> %1,
604 <vscale x 32 x i16> %2,
605 <vscale x 32 x i1> %3,
608 ret <vscale x 32 x i16> %a
611 declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
617 define <vscale x 1 x i32> @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
618 ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32:
619 ; CHECK: # %bb.0: # %entry
620 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
621 ; CHECK-NEXT: vmul.vv v8, v8, v9
624 %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32(
625 <vscale x 1 x i32> undef,
626 <vscale x 1 x i32> %0,
627 <vscale x 1 x i32> %1,
630 ret <vscale x 1 x i32> %a
633 declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
640 define <vscale x 1 x i32> @intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
641 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
642 ; CHECK: # %bb.0: # %entry
643 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
644 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
647 %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32(
648 <vscale x 1 x i32> %0,
649 <vscale x 1 x i32> %1,
650 <vscale x 1 x i32> %2,
651 <vscale x 1 x i1> %3,
654 ret <vscale x 1 x i32> %a
657 declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
663 define <vscale x 2 x i32> @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
664 ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
667 ; CHECK-NEXT: vmul.vv v8, v8, v9
670 %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32(
671 <vscale x 2 x i32> undef,
672 <vscale x 2 x i32> %0,
673 <vscale x 2 x i32> %1,
676 ret <vscale x 2 x i32> %a
679 declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
686 define <vscale x 2 x i32> @intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
687 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
688 ; CHECK: # %bb.0: # %entry
689 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
690 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
693 %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32(
694 <vscale x 2 x i32> %0,
695 <vscale x 2 x i32> %1,
696 <vscale x 2 x i32> %2,
697 <vscale x 2 x i1> %3,
700 ret <vscale x 2 x i32> %a
703 declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
709 define <vscale x 4 x i32> @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
710 ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32:
711 ; CHECK: # %bb.0: # %entry
712 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
713 ; CHECK-NEXT: vmul.vv v8, v8, v10
716 %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32(
717 <vscale x 4 x i32> undef,
718 <vscale x 4 x i32> %0,
719 <vscale x 4 x i32> %1,
722 ret <vscale x 4 x i32> %a
725 declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
732 define <vscale x 4 x i32> @intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
733 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
734 ; CHECK: # %bb.0: # %entry
735 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
736 ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
739 %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32(
740 <vscale x 4 x i32> %0,
741 <vscale x 4 x i32> %1,
742 <vscale x 4 x i32> %2,
743 <vscale x 4 x i1> %3,
746 ret <vscale x 4 x i32> %a
749 declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
755 define <vscale x 8 x i32> @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
756 ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32:
757 ; CHECK: # %bb.0: # %entry
758 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
759 ; CHECK-NEXT: vmul.vv v8, v8, v12
762 %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32(
763 <vscale x 8 x i32> undef,
764 <vscale x 8 x i32> %0,
765 <vscale x 8 x i32> %1,
768 ret <vscale x 8 x i32> %a
771 declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
778 define <vscale x 8 x i32> @intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
779 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
780 ; CHECK: # %bb.0: # %entry
781 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
782 ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
785 %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32(
786 <vscale x 8 x i32> %0,
787 <vscale x 8 x i32> %1,
788 <vscale x 8 x i32> %2,
789 <vscale x 8 x i1> %3,
792 ret <vscale x 8 x i32> %a
795 declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
801 define <vscale x 16 x i32> @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
802 ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32:
803 ; CHECK: # %bb.0: # %entry
804 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
805 ; CHECK-NEXT: vmul.vv v8, v8, v16
808 %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32(
809 <vscale x 16 x i32> undef,
810 <vscale x 16 x i32> %0,
811 <vscale x 16 x i32> %1,
814 ret <vscale x 16 x i32> %a
817 declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
824 define <vscale x 16 x i32> @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
825 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vl8re32.v v24, (a0)
828 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
829 ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
832 %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32(
833 <vscale x 16 x i32> %0,
834 <vscale x 16 x i32> %1,
835 <vscale x 16 x i32> %2,
836 <vscale x 16 x i1> %3,
839 ret <vscale x 16 x i32> %a
842 declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
848 define <vscale x 1 x i64> @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
849 ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64:
850 ; CHECK: # %bb.0: # %entry
851 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
852 ; CHECK-NEXT: vmul.vv v8, v8, v9
855 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64(
856 <vscale x 1 x i64> undef,
857 <vscale x 1 x i64> %0,
858 <vscale x 1 x i64> %1,
861 ret <vscale x 1 x i64> %a
864 declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
871 define <vscale x 1 x i64> @intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
872 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
873 ; CHECK: # %bb.0: # %entry
874 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
875 ; CHECK-NEXT: vmul.vv v8, v9, v10, v0.t
878 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64(
879 <vscale x 1 x i64> %0,
880 <vscale x 1 x i64> %1,
881 <vscale x 1 x i64> %2,
882 <vscale x 1 x i1> %3,
885 ret <vscale x 1 x i64> %a
888 declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
894 define <vscale x 2 x i64> @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
895 ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64:
896 ; CHECK: # %bb.0: # %entry
897 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
898 ; CHECK-NEXT: vmul.vv v8, v8, v10
901 %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64(
902 <vscale x 2 x i64> undef,
903 <vscale x 2 x i64> %0,
904 <vscale x 2 x i64> %1,
907 ret <vscale x 2 x i64> %a
910 declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
917 define <vscale x 2 x i64> @intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
918 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
921 ; CHECK-NEXT: vmul.vv v8, v10, v12, v0.t
924 %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64(
925 <vscale x 2 x i64> %0,
926 <vscale x 2 x i64> %1,
927 <vscale x 2 x i64> %2,
928 <vscale x 2 x i1> %3,
931 ret <vscale x 2 x i64> %a
934 declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
940 define <vscale x 4 x i64> @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
941 ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64:
942 ; CHECK: # %bb.0: # %entry
943 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
944 ; CHECK-NEXT: vmul.vv v8, v8, v12
947 %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64(
948 <vscale x 4 x i64> undef,
949 <vscale x 4 x i64> %0,
950 <vscale x 4 x i64> %1,
953 ret <vscale x 4 x i64> %a
956 declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
963 define <vscale x 4 x i64> @intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
964 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
965 ; CHECK: # %bb.0: # %entry
966 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
967 ; CHECK-NEXT: vmul.vv v8, v12, v16, v0.t
970 %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64(
971 <vscale x 4 x i64> %0,
972 <vscale x 4 x i64> %1,
973 <vscale x 4 x i64> %2,
974 <vscale x 4 x i1> %3,
977 ret <vscale x 4 x i64> %a
980 declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
986 define <vscale x 8 x i64> @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
987 ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64:
988 ; CHECK: # %bb.0: # %entry
989 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
990 ; CHECK-NEXT: vmul.vv v8, v8, v16
993 %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64(
994 <vscale x 8 x i64> undef,
995 <vscale x 8 x i64> %0,
996 <vscale x 8 x i64> %1,
999 ret <vscale x 8 x i64> %a
1002 declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
1009 define <vscale x 8 x i64> @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1010 ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1011 ; CHECK: # %bb.0: # %entry
1012 ; CHECK-NEXT: vl8re64.v v24, (a0)
1013 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1014 ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t
1017 %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64(
1018 <vscale x 8 x i64> %0,
1019 <vscale x 8 x i64> %1,
1020 <vscale x 8 x i64> %2,
1021 <vscale x 8 x i1> %3,
1024 ret <vscale x 8 x i64> %a
1027 declare <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
1033 define <vscale x 1 x i8> @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1034 ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8:
1035 ; CHECK: # %bb.0: # %entry
1036 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1037 ; CHECK-NEXT: vmul.vx v8, v8, a0
1040 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8(
1041 <vscale x 1 x i8> undef,
1042 <vscale x 1 x i8> %0,
1046 ret <vscale x 1 x i8> %a
1049 declare <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
1056 define <vscale x 1 x i8> @intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1057 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i8_nxv1i8_i8:
1058 ; CHECK: # %bb.0: # %entry
1059 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1060 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1063 %a = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8(
1064 <vscale x 1 x i8> %0,
1065 <vscale x 1 x i8> %1,
1067 <vscale x 1 x i1> %3,
1070 ret <vscale x 1 x i8> %a
1073 declare <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
1079 define <vscale x 2 x i8> @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1080 ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8:
1081 ; CHECK: # %bb.0: # %entry
1082 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1083 ; CHECK-NEXT: vmul.vx v8, v8, a0
1086 %a = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8(
1087 <vscale x 2 x i8> undef,
1088 <vscale x 2 x i8> %0,
1092 ret <vscale x 2 x i8> %a
1095 declare <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
1102 define <vscale x 2 x i8> @intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1103 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i8_nxv2i8_i8:
1104 ; CHECK: # %bb.0: # %entry
1105 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1106 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1109 %a = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8(
1110 <vscale x 2 x i8> %0,
1111 <vscale x 2 x i8> %1,
1113 <vscale x 2 x i1> %3,
1116 ret <vscale x 2 x i8> %a
1119 declare <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
1125 define <vscale x 4 x i8> @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1126 ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1129 ; CHECK-NEXT: vmul.vx v8, v8, a0
1132 %a = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8(
1133 <vscale x 4 x i8> undef,
1134 <vscale x 4 x i8> %0,
1138 ret <vscale x 4 x i8> %a
1141 declare <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
1148 define <vscale x 4 x i8> @intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1149 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i8_nxv4i8_i8:
1150 ; CHECK: # %bb.0: # %entry
1151 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1152 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1155 %a = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8(
1156 <vscale x 4 x i8> %0,
1157 <vscale x 4 x i8> %1,
1159 <vscale x 4 x i1> %3,
1162 ret <vscale x 4 x i8> %a
1165 declare <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
1171 define <vscale x 8 x i8> @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1172 ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8:
1173 ; CHECK: # %bb.0: # %entry
1174 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1175 ; CHECK-NEXT: vmul.vx v8, v8, a0
1178 %a = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8(
1179 <vscale x 8 x i8> undef,
1180 <vscale x 8 x i8> %0,
1184 ret <vscale x 8 x i8> %a
1187 declare <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
1194 define <vscale x 8 x i8> @intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1195 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i8_nxv8i8_i8:
1196 ; CHECK: # %bb.0: # %entry
1197 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1198 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1201 %a = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8(
1202 <vscale x 8 x i8> %0,
1203 <vscale x 8 x i8> %1,
1205 <vscale x 8 x i1> %3,
1208 ret <vscale x 8 x i8> %a
1211 declare <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
1217 define <vscale x 16 x i8> @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1218 ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8:
1219 ; CHECK: # %bb.0: # %entry
1220 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1221 ; CHECK-NEXT: vmul.vx v8, v8, a0
1224 %a = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8(
1225 <vscale x 16 x i8> undef,
1226 <vscale x 16 x i8> %0,
1230 ret <vscale x 16 x i8> %a
1233 declare <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
1240 define <vscale x 16 x i8> @intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1241 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i8_nxv16i8_i8:
1242 ; CHECK: # %bb.0: # %entry
1243 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1244 ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t
1247 %a = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8(
1248 <vscale x 16 x i8> %0,
1249 <vscale x 16 x i8> %1,
1251 <vscale x 16 x i1> %3,
1254 ret <vscale x 16 x i8> %a
1257 declare <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
1263 define <vscale x 32 x i8> @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1264 ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1267 ; CHECK-NEXT: vmul.vx v8, v8, a0
1270 %a = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8(
1271 <vscale x 32 x i8> undef,
1272 <vscale x 32 x i8> %0,
1276 ret <vscale x 32 x i8> %a
1279 declare <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
1286 define <vscale x 32 x i8> @intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1287 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i8_nxv32i8_i8:
1288 ; CHECK: # %bb.0: # %entry
1289 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1290 ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t
1293 %a = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8(
1294 <vscale x 32 x i8> %0,
1295 <vscale x 32 x i8> %1,
1297 <vscale x 32 x i1> %3,
1300 ret <vscale x 32 x i8> %a
1303 declare <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
1309 define <vscale x 64 x i8> @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1310 ; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8:
1311 ; CHECK: # %bb.0: # %entry
1312 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1313 ; CHECK-NEXT: vmul.vx v8, v8, a0
1316 %a = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8(
1317 <vscale x 64 x i8> undef,
1318 <vscale x 64 x i8> %0,
1322 ret <vscale x 64 x i8> %a
1325 declare <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
1332 define <vscale x 64 x i8> @intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1333 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv64i8_nxv64i8_i8:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1336 ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t
1339 %a = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8(
1340 <vscale x 64 x i8> %0,
1341 <vscale x 64 x i8> %1,
1343 <vscale x 64 x i1> %3,
1346 ret <vscale x 64 x i8> %a
1349 declare <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
1355 define <vscale x 1 x i16> @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1356 ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16:
1357 ; CHECK: # %bb.0: # %entry
1358 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1359 ; CHECK-NEXT: vmul.vx v8, v8, a0
1362 %a = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16(
1363 <vscale x 1 x i16> undef,
1364 <vscale x 1 x i16> %0,
1368 ret <vscale x 1 x i16> %a
1371 declare <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
1378 define <vscale x 1 x i16> @intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1379 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i16_nxv1i16_i16:
1380 ; CHECK: # %bb.0: # %entry
1381 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1382 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1385 %a = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16(
1386 <vscale x 1 x i16> %0,
1387 <vscale x 1 x i16> %1,
1389 <vscale x 1 x i1> %3,
1392 ret <vscale x 1 x i16> %a
1395 declare <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
1401 define <vscale x 2 x i16> @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1402 ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16:
1403 ; CHECK: # %bb.0: # %entry
1404 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1405 ; CHECK-NEXT: vmul.vx v8, v8, a0
1408 %a = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16(
1409 <vscale x 2 x i16> undef,
1410 <vscale x 2 x i16> %0,
1414 ret <vscale x 2 x i16> %a
1417 declare <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
1424 define <vscale x 2 x i16> @intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1425 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i16_nxv2i16_i16:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1428 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1431 %a = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16(
1432 <vscale x 2 x i16> %0,
1433 <vscale x 2 x i16> %1,
1435 <vscale x 2 x i1> %3,
1438 ret <vscale x 2 x i16> %a
1441 declare <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
1447 define <vscale x 4 x i16> @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1448 ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16:
1449 ; CHECK: # %bb.0: # %entry
1450 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1451 ; CHECK-NEXT: vmul.vx v8, v8, a0
1454 %a = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16(
1455 <vscale x 4 x i16> undef,
1456 <vscale x 4 x i16> %0,
1460 ret <vscale x 4 x i16> %a
1463 declare <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
1470 define <vscale x 4 x i16> @intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1471 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i16_nxv4i16_i16:
1472 ; CHECK: # %bb.0: # %entry
1473 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1474 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1477 %a = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16(
1478 <vscale x 4 x i16> %0,
1479 <vscale x 4 x i16> %1,
1481 <vscale x 4 x i1> %3,
1484 ret <vscale x 4 x i16> %a
1487 declare <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
1493 define <vscale x 8 x i16> @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1494 ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16:
1495 ; CHECK: # %bb.0: # %entry
1496 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1497 ; CHECK-NEXT: vmul.vx v8, v8, a0
1500 %a = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16(
1501 <vscale x 8 x i16> undef,
1502 <vscale x 8 x i16> %0,
1506 ret <vscale x 8 x i16> %a
1509 declare <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
1516 define <vscale x 8 x i16> @intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1517 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i16_nxv8i16_i16:
1518 ; CHECK: # %bb.0: # %entry
1519 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1520 ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t
1523 %a = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16(
1524 <vscale x 8 x i16> %0,
1525 <vscale x 8 x i16> %1,
1527 <vscale x 8 x i1> %3,
1530 ret <vscale x 8 x i16> %a
1533 declare <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
1534 <vscale x 16 x i16>,
1535 <vscale x 16 x i16>,
1539 define <vscale x 16 x i16> @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1540 ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16:
1541 ; CHECK: # %bb.0: # %entry
1542 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1543 ; CHECK-NEXT: vmul.vx v8, v8, a0
1546 %a = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16(
1547 <vscale x 16 x i16> undef,
1548 <vscale x 16 x i16> %0,
1552 ret <vscale x 16 x i16> %a
1555 declare <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
1556 <vscale x 16 x i16>,
1557 <vscale x 16 x i16>,
1562 define <vscale x 16 x i16> @intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1563 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i16_nxv16i16_i16:
1564 ; CHECK: # %bb.0: # %entry
1565 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1566 ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t
1569 %a = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16(
1570 <vscale x 16 x i16> %0,
1571 <vscale x 16 x i16> %1,
1573 <vscale x 16 x i1> %3,
1576 ret <vscale x 16 x i16> %a
1579 declare <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
1580 <vscale x 32 x i16>,
1581 <vscale x 32 x i16>,
1585 define <vscale x 32 x i16> @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1586 ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16:
1587 ; CHECK: # %bb.0: # %entry
1588 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1589 ; CHECK-NEXT: vmul.vx v8, v8, a0
1592 %a = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16(
1593 <vscale x 32 x i16> undef,
1594 <vscale x 32 x i16> %0,
1598 ret <vscale x 32 x i16> %a
1601 declare <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
1602 <vscale x 32 x i16>,
1603 <vscale x 32 x i16>,
1608 define <vscale x 32 x i16> @intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1609 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv32i16_nxv32i16_i16:
1610 ; CHECK: # %bb.0: # %entry
1611 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1612 ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t
1615 %a = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16(
1616 <vscale x 32 x i16> %0,
1617 <vscale x 32 x i16> %1,
1619 <vscale x 32 x i1> %3,
1622 ret <vscale x 32 x i16> %a
1625 declare <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
1631 define <vscale x 1 x i32> @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1632 ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32:
1633 ; CHECK: # %bb.0: # %entry
1634 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1635 ; CHECK-NEXT: vmul.vx v8, v8, a0
1638 %a = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32(
1639 <vscale x 1 x i32> undef,
1640 <vscale x 1 x i32> %0,
1644 ret <vscale x 1 x i32> %a
1647 declare <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
1654 define <vscale x 1 x i32> @intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1655 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv1i32_nxv1i32_i32:
1656 ; CHECK: # %bb.0: # %entry
1657 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1658 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1661 %a = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32(
1662 <vscale x 1 x i32> %0,
1663 <vscale x 1 x i32> %1,
1665 <vscale x 1 x i1> %3,
1668 ret <vscale x 1 x i32> %a
1671 declare <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
1677 define <vscale x 2 x i32> @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1678 ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32:
1679 ; CHECK: # %bb.0: # %entry
1680 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1681 ; CHECK-NEXT: vmul.vx v8, v8, a0
1684 %a = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32(
1685 <vscale x 2 x i32> undef,
1686 <vscale x 2 x i32> %0,
1690 ret <vscale x 2 x i32> %a
1693 declare <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
1700 define <vscale x 2 x i32> @intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1701 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv2i32_nxv2i32_i32:
1702 ; CHECK: # %bb.0: # %entry
1703 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1704 ; CHECK-NEXT: vmul.vx v8, v9, a0, v0.t
1707 %a = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32(
1708 <vscale x 2 x i32> %0,
1709 <vscale x 2 x i32> %1,
1711 <vscale x 2 x i1> %3,
1714 ret <vscale x 2 x i32> %a
1717 declare <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
1723 define <vscale x 4 x i32> @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1724 ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32:
1725 ; CHECK: # %bb.0: # %entry
1726 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1727 ; CHECK-NEXT: vmul.vx v8, v8, a0
1730 %a = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32(
1731 <vscale x 4 x i32> undef,
1732 <vscale x 4 x i32> %0,
1736 ret <vscale x 4 x i32> %a
1739 declare <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
1746 define <vscale x 4 x i32> @intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1747 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv4i32_nxv4i32_i32:
1748 ; CHECK: # %bb.0: # %entry
1749 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1750 ; CHECK-NEXT: vmul.vx v8, v10, a0, v0.t
1753 %a = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32(
1754 <vscale x 4 x i32> %0,
1755 <vscale x 4 x i32> %1,
1757 <vscale x 4 x i1> %3,
1760 ret <vscale x 4 x i32> %a
1763 declare <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
1769 define <vscale x 8 x i32> @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1770 ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1773 ; CHECK-NEXT: vmul.vx v8, v8, a0
1776 %a = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32(
1777 <vscale x 8 x i32> undef,
1778 <vscale x 8 x i32> %0,
1782 ret <vscale x 8 x i32> %a
1785 declare <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
1792 define <vscale x 8 x i32> @intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1793 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv8i32_nxv8i32_i32:
1794 ; CHECK: # %bb.0: # %entry
1795 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1796 ; CHECK-NEXT: vmul.vx v8, v12, a0, v0.t
1799 %a = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32(
1800 <vscale x 8 x i32> %0,
1801 <vscale x 8 x i32> %1,
1803 <vscale x 8 x i1> %3,
1806 ret <vscale x 8 x i32> %a
1809 declare <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
1810 <vscale x 16 x i32>,
1811 <vscale x 16 x i32>,
1815 define <vscale x 16 x i32> @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1816 ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32:
1817 ; CHECK: # %bb.0: # %entry
1818 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1819 ; CHECK-NEXT: vmul.vx v8, v8, a0
1822 %a = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32(
1823 <vscale x 16 x i32> undef,
1824 <vscale x 16 x i32> %0,
1828 ret <vscale x 16 x i32> %a
1831 declare <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
1832 <vscale x 16 x i32>,
1833 <vscale x 16 x i32>,
1838 define <vscale x 16 x i32> @intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1839 ; CHECK-LABEL: intrinsic_vmul_mask_vx_nxv16i32_nxv16i32_i32:
1840 ; CHECK: # %bb.0: # %entry
1841 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1842 ; CHECK-NEXT: vmul.vx v8, v16, a0, v0.t
1845 %a = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32(
1846 <vscale x 16 x i32> %0,
1847 <vscale x 16 x i32> %1,
1849 <vscale x 16 x i1> %3,
1852 ret <vscale x 16 x i32> %a
1855 declare <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
1861 define <vscale x 1 x i64> @intrinsic_vmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1862 ; RV32-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64:
1863 ; RV32: # %bb.0: # %entry
1864 ; RV32-NEXT: addi sp, sp, -16
1865 ; RV32-NEXT: sw a1, 12(sp)
1866 ; RV32-NEXT: sw a0, 8(sp)
1867 ; RV32-NEXT: addi a0, sp, 8
1868 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1869 ; RV32-NEXT: vlse64.v v9, (a0), zero
1870 ; RV32-NEXT: vmul.vv v8, v8, v9
1871 ; RV32-NEXT: addi sp, sp, 16
1874 ; RV64-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64:
1875 ; RV64: # %bb.0: # %entry
1876 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1877 ; RV64-NEXT: vmul.vx v8, v8, a0
1880 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64(
1881 <vscale x 1 x i64> undef,
1882 <vscale x 1 x i64> %0,
1886 ret <vscale x 1 x i64> %a
1889 declare <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
1896 define <vscale x 1 x i64> @intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1897 ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64:
1898 ; RV32: # %bb.0: # %entry
1899 ; RV32-NEXT: addi sp, sp, -16
1900 ; RV32-NEXT: sw a1, 12(sp)
1901 ; RV32-NEXT: sw a0, 8(sp)
1902 ; RV32-NEXT: addi a0, sp, 8
1903 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1904 ; RV32-NEXT: vlse64.v v10, (a0), zero
1905 ; RV32-NEXT: vmul.vv v8, v9, v10, v0.t
1906 ; RV32-NEXT: addi sp, sp, 16
1909 ; RV64-LABEL: intrinsic_vmul_mask_vx_nxv1i64_nxv1i64_i64:
1910 ; RV64: # %bb.0: # %entry
1911 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1912 ; RV64-NEXT: vmul.vx v8, v9, a0, v0.t
1915 %a = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64(
1916 <vscale x 1 x i64> %0,
1917 <vscale x 1 x i64> %1,
1919 <vscale x 1 x i1> %3,
1922 ret <vscale x 1 x i64> %a
1925 declare <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
1931 define <vscale x 2 x i64> @intrinsic_vmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1932 ; RV32-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64:
1933 ; RV32: # %bb.0: # %entry
1934 ; RV32-NEXT: addi sp, sp, -16
1935 ; RV32-NEXT: sw a1, 12(sp)
1936 ; RV32-NEXT: sw a0, 8(sp)
1937 ; RV32-NEXT: addi a0, sp, 8
1938 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1939 ; RV32-NEXT: vlse64.v v10, (a0), zero
1940 ; RV32-NEXT: vmul.vv v8, v8, v10
1941 ; RV32-NEXT: addi sp, sp, 16
1944 ; RV64-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64:
1945 ; RV64: # %bb.0: # %entry
1946 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1947 ; RV64-NEXT: vmul.vx v8, v8, a0
1950 %a = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64(
1951 <vscale x 2 x i64> undef,
1952 <vscale x 2 x i64> %0,
1956 ret <vscale x 2 x i64> %a
1959 declare <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
1966 define <vscale x 2 x i64> @intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1967 ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64:
1968 ; RV32: # %bb.0: # %entry
1969 ; RV32-NEXT: addi sp, sp, -16
1970 ; RV32-NEXT: sw a1, 12(sp)
1971 ; RV32-NEXT: sw a0, 8(sp)
1972 ; RV32-NEXT: addi a0, sp, 8
1973 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1974 ; RV32-NEXT: vlse64.v v12, (a0), zero
1975 ; RV32-NEXT: vmul.vv v8, v10, v12, v0.t
1976 ; RV32-NEXT: addi sp, sp, 16
1979 ; RV64-LABEL: intrinsic_vmul_mask_vx_nxv2i64_nxv2i64_i64:
1980 ; RV64: # %bb.0: # %entry
1981 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1982 ; RV64-NEXT: vmul.vx v8, v10, a0, v0.t
1985 %a = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64(
1986 <vscale x 2 x i64> %0,
1987 <vscale x 2 x i64> %1,
1989 <vscale x 2 x i1> %3,
1992 ret <vscale x 2 x i64> %a
1995 declare <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
2001 define <vscale x 4 x i64> @intrinsic_vmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2002 ; RV32-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64:
2003 ; RV32: # %bb.0: # %entry
2004 ; RV32-NEXT: addi sp, sp, -16
2005 ; RV32-NEXT: sw a1, 12(sp)
2006 ; RV32-NEXT: sw a0, 8(sp)
2007 ; RV32-NEXT: addi a0, sp, 8
2008 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2009 ; RV32-NEXT: vlse64.v v12, (a0), zero
2010 ; RV32-NEXT: vmul.vv v8, v8, v12
2011 ; RV32-NEXT: addi sp, sp, 16
2014 ; RV64-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64:
2015 ; RV64: # %bb.0: # %entry
2016 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2017 ; RV64-NEXT: vmul.vx v8, v8, a0
2020 %a = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64(
2021 <vscale x 4 x i64> undef,
2022 <vscale x 4 x i64> %0,
2026 ret <vscale x 4 x i64> %a
2029 declare <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
2036 define <vscale x 4 x i64> @intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2037 ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64:
2038 ; RV32: # %bb.0: # %entry
2039 ; RV32-NEXT: addi sp, sp, -16
2040 ; RV32-NEXT: sw a1, 12(sp)
2041 ; RV32-NEXT: sw a0, 8(sp)
2042 ; RV32-NEXT: addi a0, sp, 8
2043 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2044 ; RV32-NEXT: vlse64.v v16, (a0), zero
2045 ; RV32-NEXT: vmul.vv v8, v12, v16, v0.t
2046 ; RV32-NEXT: addi sp, sp, 16
2049 ; RV64-LABEL: intrinsic_vmul_mask_vx_nxv4i64_nxv4i64_i64:
2050 ; RV64: # %bb.0: # %entry
2051 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2052 ; RV64-NEXT: vmul.vx v8, v12, a0, v0.t
2055 %a = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64(
2056 <vscale x 4 x i64> %0,
2057 <vscale x 4 x i64> %1,
2059 <vscale x 4 x i1> %3,
2062 ret <vscale x 4 x i64> %a
2065 declare <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
2071 define <vscale x 8 x i64> @intrinsic_vmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2072 ; RV32-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64:
2073 ; RV32: # %bb.0: # %entry
2074 ; RV32-NEXT: addi sp, sp, -16
2075 ; RV32-NEXT: sw a1, 12(sp)
2076 ; RV32-NEXT: sw a0, 8(sp)
2077 ; RV32-NEXT: addi a0, sp, 8
2078 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2079 ; RV32-NEXT: vlse64.v v16, (a0), zero
2080 ; RV32-NEXT: vmul.vv v8, v8, v16
2081 ; RV32-NEXT: addi sp, sp, 16
2084 ; RV64-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64:
2085 ; RV64: # %bb.0: # %entry
2086 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2087 ; RV64-NEXT: vmul.vx v8, v8, a0
2090 %a = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64(
2091 <vscale x 8 x i64> undef,
2092 <vscale x 8 x i64> %0,
2096 ret <vscale x 8 x i64> %a
2099 declare <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
2106 define <vscale x 8 x i64> @intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2107 ; RV32-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64:
2108 ; RV32: # %bb.0: # %entry
2109 ; RV32-NEXT: addi sp, sp, -16
2110 ; RV32-NEXT: sw a1, 12(sp)
2111 ; RV32-NEXT: sw a0, 8(sp)
2112 ; RV32-NEXT: addi a0, sp, 8
2113 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2114 ; RV32-NEXT: vlse64.v v24, (a0), zero
2115 ; RV32-NEXT: vmul.vv v8, v16, v24, v0.t
2116 ; RV32-NEXT: addi sp, sp, 16
2119 ; RV64-LABEL: intrinsic_vmul_mask_vx_nxv8i64_nxv8i64_i64:
2120 ; RV64: # %bb.0: # %entry
2121 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2122 ; RV64-NEXT: vmul.vx v8, v16, a0, v0.t
2125 %a = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64(
2126 <vscale x 8 x i64> %0,
2127 <vscale x 8 x i64> %1,
2129 <vscale x 8 x i1> %3,
2132 ret <vscale x 8 x i64> %a