1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6 ; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \
7 ; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D
9 ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul
11 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
17 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
18 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
19 ; CHECK: # %bb.0: # %entry
20 ; CHECK-NEXT: csrwi vxrm, 0
21 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
22 ; CHECK-NEXT: vsmul.vv v8, v8, v9
25 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
26 <vscale x 1 x i8> undef,
31 ret <vscale x 1 x i8> %a
34 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
41 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
42 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
43 ; CHECK: # %bb.0: # %entry
44 ; CHECK-NEXT: csrwi vxrm, 0
45 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
46 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
49 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
54 iXLen 0, iXLen %4, iXLen 1)
56 ret <vscale x 1 x i8> %a
59 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
65 define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
66 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
67 ; CHECK: # %bb.0: # %entry
68 ; CHECK-NEXT: csrwi vxrm, 0
69 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
70 ; CHECK-NEXT: vsmul.vv v8, v8, v9
73 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
74 <vscale x 2 x i8> undef,
79 ret <vscale x 2 x i8> %a
82 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
89 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
90 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
91 ; CHECK: # %bb.0: # %entry
92 ; CHECK-NEXT: csrwi vxrm, 0
93 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
94 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
97 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
100 <vscale x 2 x i8> %2,
101 <vscale x 2 x i1> %3,
102 iXLen 0, iXLen %4, iXLen 1)
104 ret <vscale x 2 x i8> %a
107 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
113 define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
114 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: csrwi vxrm, 0
117 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
118 ; CHECK-NEXT: vsmul.vv v8, v8, v9
121 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
122 <vscale x 4 x i8> undef,
123 <vscale x 4 x i8> %0,
124 <vscale x 4 x i8> %1,
127 ret <vscale x 4 x i8> %a
130 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
137 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
138 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: csrwi vxrm, 0
141 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
142 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
145 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
146 <vscale x 4 x i8> %0,
147 <vscale x 4 x i8> %1,
148 <vscale x 4 x i8> %2,
149 <vscale x 4 x i1> %3,
150 iXLen 0, iXLen %4, iXLen 1)
152 ret <vscale x 4 x i8> %a
155 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
161 define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
162 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
163 ; CHECK: # %bb.0: # %entry
164 ; CHECK-NEXT: csrwi vxrm, 0
165 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
166 ; CHECK-NEXT: vsmul.vv v8, v8, v9
169 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
170 <vscale x 8 x i8> undef,
171 <vscale x 8 x i8> %0,
172 <vscale x 8 x i8> %1,
175 ret <vscale x 8 x i8> %a
178 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
185 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
186 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
187 ; CHECK: # %bb.0: # %entry
188 ; CHECK-NEXT: csrwi vxrm, 0
189 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
190 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
193 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
194 <vscale x 8 x i8> %0,
195 <vscale x 8 x i8> %1,
196 <vscale x 8 x i8> %2,
197 <vscale x 8 x i1> %3,
198 iXLen 0, iXLen %4, iXLen 1)
200 ret <vscale x 8 x i8> %a
203 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
209 define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
210 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
211 ; CHECK: # %bb.0: # %entry
212 ; CHECK-NEXT: csrwi vxrm, 0
213 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
214 ; CHECK-NEXT: vsmul.vv v8, v8, v10
217 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
218 <vscale x 16 x i8> undef,
219 <vscale x 16 x i8> %0,
220 <vscale x 16 x i8> %1,
223 ret <vscale x 16 x i8> %a
226 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
233 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
234 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
235 ; CHECK: # %bb.0: # %entry
236 ; CHECK-NEXT: csrwi vxrm, 0
237 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
238 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
241 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
242 <vscale x 16 x i8> %0,
243 <vscale x 16 x i8> %1,
244 <vscale x 16 x i8> %2,
245 <vscale x 16 x i1> %3,
246 iXLen 0, iXLen %4, iXLen 1)
248 ret <vscale x 16 x i8> %a
251 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
257 define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
258 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: csrwi vxrm, 0
261 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
262 ; CHECK-NEXT: vsmul.vv v8, v8, v12
265 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
266 <vscale x 32 x i8> undef,
267 <vscale x 32 x i8> %0,
268 <vscale x 32 x i8> %1,
271 ret <vscale x 32 x i8> %a
274 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
281 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
282 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
283 ; CHECK: # %bb.0: # %entry
284 ; CHECK-NEXT: csrwi vxrm, 0
285 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
286 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
289 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
290 <vscale x 32 x i8> %0,
291 <vscale x 32 x i8> %1,
292 <vscale x 32 x i8> %2,
293 <vscale x 32 x i1> %3,
294 iXLen 0, iXLen %4, iXLen 1)
296 ret <vscale x 32 x i8> %a
299 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
305 define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
306 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
307 ; CHECK: # %bb.0: # %entry
308 ; CHECK-NEXT: csrwi vxrm, 0
309 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
310 ; CHECK-NEXT: vsmul.vv v8, v8, v16
313 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
314 <vscale x 64 x i8> undef,
315 <vscale x 64 x i8> %0,
316 <vscale x 64 x i8> %1,
319 ret <vscale x 64 x i8> %a
322 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
329 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
330 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
331 ; CHECK: # %bb.0: # %entry
332 ; CHECK-NEXT: vl8r.v v24, (a0)
333 ; CHECK-NEXT: csrwi vxrm, 0
334 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
335 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
338 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
339 <vscale x 64 x i8> %0,
340 <vscale x 64 x i8> %1,
341 <vscale x 64 x i8> %2,
342 <vscale x 64 x i1> %3,
343 iXLen 0, iXLen %4, iXLen 1)
345 ret <vscale x 64 x i8> %a
348 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
354 define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
355 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
356 ; CHECK: # %bb.0: # %entry
357 ; CHECK-NEXT: csrwi vxrm, 0
358 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
359 ; CHECK-NEXT: vsmul.vv v8, v8, v9
362 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
363 <vscale x 1 x i16> undef,
364 <vscale x 1 x i16> %0,
365 <vscale x 1 x i16> %1,
368 ret <vscale x 1 x i16> %a
371 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
378 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
379 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
380 ; CHECK: # %bb.0: # %entry
381 ; CHECK-NEXT: csrwi vxrm, 0
382 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
383 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
386 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
387 <vscale x 1 x i16> %0,
388 <vscale x 1 x i16> %1,
389 <vscale x 1 x i16> %2,
390 <vscale x 1 x i1> %3,
391 iXLen 0, iXLen %4, iXLen 1)
393 ret <vscale x 1 x i16> %a
396 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
402 define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
403 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
404 ; CHECK: # %bb.0: # %entry
405 ; CHECK-NEXT: csrwi vxrm, 0
406 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
407 ; CHECK-NEXT: vsmul.vv v8, v8, v9
410 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
411 <vscale x 2 x i16> undef,
412 <vscale x 2 x i16> %0,
413 <vscale x 2 x i16> %1,
416 ret <vscale x 2 x i16> %a
419 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
426 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
427 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
428 ; CHECK: # %bb.0: # %entry
429 ; CHECK-NEXT: csrwi vxrm, 0
430 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
431 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
434 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
435 <vscale x 2 x i16> %0,
436 <vscale x 2 x i16> %1,
437 <vscale x 2 x i16> %2,
438 <vscale x 2 x i1> %3,
439 iXLen 0, iXLen %4, iXLen 1)
441 ret <vscale x 2 x i16> %a
444 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
450 define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
451 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: csrwi vxrm, 0
454 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
455 ; CHECK-NEXT: vsmul.vv v8, v8, v9
458 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
459 <vscale x 4 x i16> undef,
460 <vscale x 4 x i16> %0,
461 <vscale x 4 x i16> %1,
464 ret <vscale x 4 x i16> %a
467 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
474 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
475 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: csrwi vxrm, 0
478 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
479 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
482 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
483 <vscale x 4 x i16> %0,
484 <vscale x 4 x i16> %1,
485 <vscale x 4 x i16> %2,
486 <vscale x 4 x i1> %3,
487 iXLen 0, iXLen %4, iXLen 1)
489 ret <vscale x 4 x i16> %a
492 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
498 define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
499 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
500 ; CHECK: # %bb.0: # %entry
501 ; CHECK-NEXT: csrwi vxrm, 0
502 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
503 ; CHECK-NEXT: vsmul.vv v8, v8, v10
506 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
507 <vscale x 8 x i16> undef,
508 <vscale x 8 x i16> %0,
509 <vscale x 8 x i16> %1,
512 ret <vscale x 8 x i16> %a
515 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
522 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
523 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
524 ; CHECK: # %bb.0: # %entry
525 ; CHECK-NEXT: csrwi vxrm, 0
526 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
527 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
530 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
531 <vscale x 8 x i16> %0,
532 <vscale x 8 x i16> %1,
533 <vscale x 8 x i16> %2,
534 <vscale x 8 x i1> %3,
535 iXLen 0, iXLen %4, iXLen 1)
537 ret <vscale x 8 x i16> %a
540 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
546 define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
547 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
548 ; CHECK: # %bb.0: # %entry
549 ; CHECK-NEXT: csrwi vxrm, 0
550 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
551 ; CHECK-NEXT: vsmul.vv v8, v8, v12
554 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
555 <vscale x 16 x i16> undef,
556 <vscale x 16 x i16> %0,
557 <vscale x 16 x i16> %1,
560 ret <vscale x 16 x i16> %a
563 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
570 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
571 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
572 ; CHECK: # %bb.0: # %entry
573 ; CHECK-NEXT: csrwi vxrm, 0
574 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
575 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
578 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
579 <vscale x 16 x i16> %0,
580 <vscale x 16 x i16> %1,
581 <vscale x 16 x i16> %2,
582 <vscale x 16 x i1> %3,
583 iXLen 0, iXLen %4, iXLen 1)
585 ret <vscale x 16 x i16> %a
588 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
594 define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
595 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
596 ; CHECK: # %bb.0: # %entry
597 ; CHECK-NEXT: csrwi vxrm, 0
598 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
599 ; CHECK-NEXT: vsmul.vv v8, v8, v16
602 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
603 <vscale x 32 x i16> undef,
604 <vscale x 32 x i16> %0,
605 <vscale x 32 x i16> %1,
608 ret <vscale x 32 x i16> %a
611 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
618 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
619 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
620 ; CHECK: # %bb.0: # %entry
621 ; CHECK-NEXT: vl8re16.v v24, (a0)
622 ; CHECK-NEXT: csrwi vxrm, 0
623 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
624 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
627 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
628 <vscale x 32 x i16> %0,
629 <vscale x 32 x i16> %1,
630 <vscale x 32 x i16> %2,
631 <vscale x 32 x i1> %3,
632 iXLen 0, iXLen %4, iXLen 1)
634 ret <vscale x 32 x i16> %a
637 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
643 define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
644 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
645 ; CHECK: # %bb.0: # %entry
646 ; CHECK-NEXT: csrwi vxrm, 0
647 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
648 ; CHECK-NEXT: vsmul.vv v8, v8, v9
651 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
652 <vscale x 1 x i32> undef,
653 <vscale x 1 x i32> %0,
654 <vscale x 1 x i32> %1,
657 ret <vscale x 1 x i32> %a
660 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
667 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
668 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
669 ; CHECK: # %bb.0: # %entry
670 ; CHECK-NEXT: csrwi vxrm, 0
671 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
672 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
675 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
676 <vscale x 1 x i32> %0,
677 <vscale x 1 x i32> %1,
678 <vscale x 1 x i32> %2,
679 <vscale x 1 x i1> %3,
680 iXLen 0, iXLen %4, iXLen 1)
682 ret <vscale x 1 x i32> %a
685 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
691 define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
692 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
693 ; CHECK: # %bb.0: # %entry
694 ; CHECK-NEXT: csrwi vxrm, 0
695 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
696 ; CHECK-NEXT: vsmul.vv v8, v8, v9
699 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
700 <vscale x 2 x i32> undef,
701 <vscale x 2 x i32> %0,
702 <vscale x 2 x i32> %1,
705 ret <vscale x 2 x i32> %a
708 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
715 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
716 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
717 ; CHECK: # %bb.0: # %entry
718 ; CHECK-NEXT: csrwi vxrm, 0
719 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
720 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
723 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
724 <vscale x 2 x i32> %0,
725 <vscale x 2 x i32> %1,
726 <vscale x 2 x i32> %2,
727 <vscale x 2 x i1> %3,
728 iXLen 0, iXLen %4, iXLen 1)
730 ret <vscale x 2 x i32> %a
733 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
739 define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
740 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
741 ; CHECK: # %bb.0: # %entry
742 ; CHECK-NEXT: csrwi vxrm, 0
743 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
744 ; CHECK-NEXT: vsmul.vv v8, v8, v10
747 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
748 <vscale x 4 x i32> undef,
749 <vscale x 4 x i32> %0,
750 <vscale x 4 x i32> %1,
753 ret <vscale x 4 x i32> %a
756 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
763 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
764 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
765 ; CHECK: # %bb.0: # %entry
766 ; CHECK-NEXT: csrwi vxrm, 0
767 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
768 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
771 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
772 <vscale x 4 x i32> %0,
773 <vscale x 4 x i32> %1,
774 <vscale x 4 x i32> %2,
775 <vscale x 4 x i1> %3,
776 iXLen 0, iXLen %4, iXLen 1)
778 ret <vscale x 4 x i32> %a
781 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
787 define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
788 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
789 ; CHECK: # %bb.0: # %entry
790 ; CHECK-NEXT: csrwi vxrm, 0
791 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
792 ; CHECK-NEXT: vsmul.vv v8, v8, v12
795 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
796 <vscale x 8 x i32> undef,
797 <vscale x 8 x i32> %0,
798 <vscale x 8 x i32> %1,
801 ret <vscale x 8 x i32> %a
804 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
811 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
812 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
813 ; CHECK: # %bb.0: # %entry
814 ; CHECK-NEXT: csrwi vxrm, 0
815 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
816 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
819 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
820 <vscale x 8 x i32> %0,
821 <vscale x 8 x i32> %1,
822 <vscale x 8 x i32> %2,
823 <vscale x 8 x i1> %3,
824 iXLen 0, iXLen %4, iXLen 1)
826 ret <vscale x 8 x i32> %a
829 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
835 define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
836 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
837 ; CHECK: # %bb.0: # %entry
838 ; CHECK-NEXT: csrwi vxrm, 0
839 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
840 ; CHECK-NEXT: vsmul.vv v8, v8, v16
843 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
844 <vscale x 16 x i32> undef,
845 <vscale x 16 x i32> %0,
846 <vscale x 16 x i32> %1,
849 ret <vscale x 16 x i32> %a
852 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
859 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
860 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
861 ; CHECK: # %bb.0: # %entry
862 ; CHECK-NEXT: vl8re32.v v24, (a0)
863 ; CHECK-NEXT: csrwi vxrm, 0
864 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
865 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
868 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
869 <vscale x 16 x i32> %0,
870 <vscale x 16 x i32> %1,
871 <vscale x 16 x i32> %2,
872 <vscale x 16 x i1> %3,
873 iXLen 0, iXLen %4, iXLen 1)
875 ret <vscale x 16 x i32> %a
878 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
884 define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
885 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
886 ; CHECK: # %bb.0: # %entry
887 ; CHECK-NEXT: csrwi vxrm, 0
888 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
889 ; CHECK-NEXT: vsmul.vv v8, v8, v9
892 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
893 <vscale x 1 x i64> undef,
894 <vscale x 1 x i64> %0,
895 <vscale x 1 x i64> %1,
898 ret <vscale x 1 x i64> %a
901 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
908 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
909 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
910 ; CHECK: # %bb.0: # %entry
911 ; CHECK-NEXT: csrwi vxrm, 0
912 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
913 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
916 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
917 <vscale x 1 x i64> %0,
918 <vscale x 1 x i64> %1,
919 <vscale x 1 x i64> %2,
920 <vscale x 1 x i1> %3,
921 iXLen 0, iXLen %4, iXLen 1)
923 ret <vscale x 1 x i64> %a
926 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
932 define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
933 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
934 ; CHECK: # %bb.0: # %entry
935 ; CHECK-NEXT: csrwi vxrm, 0
936 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
937 ; CHECK-NEXT: vsmul.vv v8, v8, v10
940 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
941 <vscale x 2 x i64> undef,
942 <vscale x 2 x i64> %0,
943 <vscale x 2 x i64> %1,
946 ret <vscale x 2 x i64> %a
949 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
956 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
957 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: csrwi vxrm, 0
960 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
961 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
964 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
965 <vscale x 2 x i64> %0,
966 <vscale x 2 x i64> %1,
967 <vscale x 2 x i64> %2,
968 <vscale x 2 x i1> %3,
969 iXLen 0, iXLen %4, iXLen 1)
971 ret <vscale x 2 x i64> %a
974 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
980 define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
981 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: csrwi vxrm, 0
984 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
985 ; CHECK-NEXT: vsmul.vv v8, v8, v12
988 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
989 <vscale x 4 x i64> undef,
990 <vscale x 4 x i64> %0,
991 <vscale x 4 x i64> %1,
994 ret <vscale x 4 x i64> %a
997 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1002 iXLen, iXLen, iXLen)
1004 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1005 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1006 ; CHECK: # %bb.0: # %entry
1007 ; CHECK-NEXT: csrwi vxrm, 0
1008 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1009 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
1012 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1013 <vscale x 4 x i64> %0,
1014 <vscale x 4 x i64> %1,
1015 <vscale x 4 x i64> %2,
1016 <vscale x 4 x i1> %3,
1017 iXLen 0, iXLen %4, iXLen 1)
1019 ret <vscale x 4 x i64> %a
1022 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1028 define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1029 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
1030 ; CHECK: # %bb.0: # %entry
1031 ; CHECK-NEXT: csrwi vxrm, 0
1032 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1033 ; CHECK-NEXT: vsmul.vv v8, v8, v16
1036 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1037 <vscale x 8 x i64> undef,
1038 <vscale x 8 x i64> %0,
1039 <vscale x 8 x i64> %1,
1042 ret <vscale x 8 x i64> %a
1045 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1050 iXLen, iXLen, iXLen)
1052 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1053 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1054 ; CHECK: # %bb.0: # %entry
1055 ; CHECK-NEXT: vl8re64.v v24, (a0)
1056 ; CHECK-NEXT: csrwi vxrm, 0
1057 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1058 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
1061 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1062 <vscale x 8 x i64> %0,
1063 <vscale x 8 x i64> %1,
1064 <vscale x 8 x i64> %2,
1065 <vscale x 8 x i1> %3,
1066 iXLen 0, iXLen %4, iXLen 1)
1068 ret <vscale x 8 x i64> %a
1071 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1077 define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1078 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
1079 ; CHECK: # %bb.0: # %entry
1080 ; CHECK-NEXT: csrwi vxrm, 0
1081 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1082 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1085 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1086 <vscale x 1 x i8> undef,
1087 <vscale x 1 x i8> %0,
1091 ret <vscale x 1 x i8> %a
1094 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1099 iXLen, iXLen, iXLen)
1101 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1102 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
1103 ; CHECK: # %bb.0: # %entry
1104 ; CHECK-NEXT: csrwi vxrm, 0
1105 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1106 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1109 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1110 <vscale x 1 x i8> %0,
1111 <vscale x 1 x i8> %1,
1113 <vscale x 1 x i1> %3,
1114 iXLen 0, iXLen %4, iXLen 1)
1116 ret <vscale x 1 x i8> %a
1119 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1125 define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1126 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: csrwi vxrm, 0
1129 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1130 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1133 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1134 <vscale x 2 x i8> undef,
1135 <vscale x 2 x i8> %0,
1139 ret <vscale x 2 x i8> %a
1142 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1147 iXLen, iXLen, iXLen)
1149 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1150 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
1151 ; CHECK: # %bb.0: # %entry
1152 ; CHECK-NEXT: csrwi vxrm, 0
1153 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1154 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1157 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1158 <vscale x 2 x i8> %0,
1159 <vscale x 2 x i8> %1,
1161 <vscale x 2 x i1> %3,
1162 iXLen 0, iXLen %4, iXLen 1)
1164 ret <vscale x 2 x i8> %a
1167 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1173 define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1174 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
1175 ; CHECK: # %bb.0: # %entry
1176 ; CHECK-NEXT: csrwi vxrm, 0
1177 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1178 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1181 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1182 <vscale x 4 x i8> undef,
1183 <vscale x 4 x i8> %0,
1187 ret <vscale x 4 x i8> %a
1190 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1195 iXLen, iXLen, iXLen)
1197 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1198 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
1199 ; CHECK: # %bb.0: # %entry
1200 ; CHECK-NEXT: csrwi vxrm, 0
1201 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1202 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1205 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1206 <vscale x 4 x i8> %0,
1207 <vscale x 4 x i8> %1,
1209 <vscale x 4 x i1> %3,
1210 iXLen 0, iXLen %4, iXLen 1)
1212 ret <vscale x 4 x i8> %a
1215 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1221 define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1222 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
1223 ; CHECK: # %bb.0: # %entry
1224 ; CHECK-NEXT: csrwi vxrm, 0
1225 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1226 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1229 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1230 <vscale x 8 x i8> undef,
1231 <vscale x 8 x i8> %0,
1235 ret <vscale x 8 x i8> %a
1238 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1243 iXLen, iXLen, iXLen)
1245 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1246 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
1247 ; CHECK: # %bb.0: # %entry
1248 ; CHECK-NEXT: csrwi vxrm, 0
1249 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1250 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1253 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1254 <vscale x 8 x i8> %0,
1255 <vscale x 8 x i8> %1,
1257 <vscale x 8 x i1> %3,
1258 iXLen 0, iXLen %4, iXLen 1)
1260 ret <vscale x 8 x i8> %a
1263 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1269 define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1270 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
1271 ; CHECK: # %bb.0: # %entry
1272 ; CHECK-NEXT: csrwi vxrm, 0
1273 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1274 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1277 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1278 <vscale x 16 x i8> undef,
1279 <vscale x 16 x i8> %0,
1283 ret <vscale x 16 x i8> %a
1286 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1291 iXLen, iXLen, iXLen)
1293 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1294 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
1295 ; CHECK: # %bb.0: # %entry
1296 ; CHECK-NEXT: csrwi vxrm, 0
1297 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1298 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1301 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1302 <vscale x 16 x i8> %0,
1303 <vscale x 16 x i8> %1,
1305 <vscale x 16 x i1> %3,
1306 iXLen 0, iXLen %4, iXLen 1)
1308 ret <vscale x 16 x i8> %a
1311 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1317 define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1318 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
1319 ; CHECK: # %bb.0: # %entry
1320 ; CHECK-NEXT: csrwi vxrm, 0
1321 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1322 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1325 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1326 <vscale x 32 x i8> undef,
1327 <vscale x 32 x i8> %0,
1331 ret <vscale x 32 x i8> %a
1334 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1339 iXLen, iXLen, iXLen)
1341 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1342 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
1343 ; CHECK: # %bb.0: # %entry
1344 ; CHECK-NEXT: csrwi vxrm, 0
1345 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1346 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1349 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1350 <vscale x 32 x i8> %0,
1351 <vscale x 32 x i8> %1,
1353 <vscale x 32 x i1> %3,
1354 iXLen 0, iXLen %4, iXLen 1)
1356 ret <vscale x 32 x i8> %a
1359 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1365 define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1366 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
1367 ; CHECK: # %bb.0: # %entry
1368 ; CHECK-NEXT: csrwi vxrm, 0
1369 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1370 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1373 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1374 <vscale x 64 x i8> undef,
1375 <vscale x 64 x i8> %0,
1379 ret <vscale x 64 x i8> %a
1382 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1387 iXLen, iXLen, iXLen)
1389 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1390 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
1391 ; CHECK: # %bb.0: # %entry
1392 ; CHECK-NEXT: csrwi vxrm, 0
1393 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1394 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1397 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1398 <vscale x 64 x i8> %0,
1399 <vscale x 64 x i8> %1,
1401 <vscale x 64 x i1> %3,
1402 iXLen 0, iXLen %4, iXLen 1)
1404 ret <vscale x 64 x i8> %a
1407 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1413 define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1414 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
1415 ; CHECK: # %bb.0: # %entry
1416 ; CHECK-NEXT: csrwi vxrm, 0
1417 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1418 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1421 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1422 <vscale x 1 x i16> undef,
1423 <vscale x 1 x i16> %0,
1427 ret <vscale x 1 x i16> %a
1430 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1435 iXLen, iXLen, iXLen)
1437 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1438 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
1439 ; CHECK: # %bb.0: # %entry
1440 ; CHECK-NEXT: csrwi vxrm, 0
1441 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1442 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1445 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1446 <vscale x 1 x i16> %0,
1447 <vscale x 1 x i16> %1,
1449 <vscale x 1 x i1> %3,
1450 iXLen 0, iXLen %4, iXLen 1)
1452 ret <vscale x 1 x i16> %a
1455 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1461 define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1462 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
1463 ; CHECK: # %bb.0: # %entry
1464 ; CHECK-NEXT: csrwi vxrm, 0
1465 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1466 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1469 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1470 <vscale x 2 x i16> undef,
1471 <vscale x 2 x i16> %0,
1475 ret <vscale x 2 x i16> %a
1478 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1483 iXLen, iXLen, iXLen)
1485 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1486 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
1487 ; CHECK: # %bb.0: # %entry
1488 ; CHECK-NEXT: csrwi vxrm, 0
1489 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1490 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1493 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1494 <vscale x 2 x i16> %0,
1495 <vscale x 2 x i16> %1,
1497 <vscale x 2 x i1> %3,
1498 iXLen 0, iXLen %4, iXLen 1)
1500 ret <vscale x 2 x i16> %a
1503 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1509 define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1510 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
1511 ; CHECK: # %bb.0: # %entry
1512 ; CHECK-NEXT: csrwi vxrm, 0
1513 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1514 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1517 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1518 <vscale x 4 x i16> undef,
1519 <vscale x 4 x i16> %0,
1523 ret <vscale x 4 x i16> %a
1526 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1531 iXLen, iXLen, iXLen)
1533 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1534 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
1535 ; CHECK: # %bb.0: # %entry
1536 ; CHECK-NEXT: csrwi vxrm, 0
1537 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1538 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1541 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1542 <vscale x 4 x i16> %0,
1543 <vscale x 4 x i16> %1,
1545 <vscale x 4 x i1> %3,
1546 iXLen 0, iXLen %4, iXLen 1)
1548 ret <vscale x 4 x i16> %a
1551 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1557 define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1558 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
1559 ; CHECK: # %bb.0: # %entry
1560 ; CHECK-NEXT: csrwi vxrm, 0
1561 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1562 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1565 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1566 <vscale x 8 x i16> undef,
1567 <vscale x 8 x i16> %0,
1571 ret <vscale x 8 x i16> %a
1574 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1579 iXLen, iXLen, iXLen)
1581 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1582 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: csrwi vxrm, 0
1585 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1586 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1589 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1590 <vscale x 8 x i16> %0,
1591 <vscale x 8 x i16> %1,
1593 <vscale x 8 x i1> %3,
1594 iXLen 0, iXLen %4, iXLen 1)
1596 ret <vscale x 8 x i16> %a
1599 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1600 <vscale x 16 x i16>,
1601 <vscale x 16 x i16>,
1605 define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1606 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
1607 ; CHECK: # %bb.0: # %entry
1608 ; CHECK-NEXT: csrwi vxrm, 0
1609 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1610 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1613 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1614 <vscale x 16 x i16> undef,
1615 <vscale x 16 x i16> %0,
1619 ret <vscale x 16 x i16> %a
1622 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1623 <vscale x 16 x i16>,
1624 <vscale x 16 x i16>,
1627 iXLen, iXLen, iXLen)
1629 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1630 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
1631 ; CHECK: # %bb.0: # %entry
1632 ; CHECK-NEXT: csrwi vxrm, 0
1633 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1634 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1637 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1638 <vscale x 16 x i16> %0,
1639 <vscale x 16 x i16> %1,
1641 <vscale x 16 x i1> %3,
1642 iXLen 0, iXLen %4, iXLen 1)
1644 ret <vscale x 16 x i16> %a
1647 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1648 <vscale x 32 x i16>,
1649 <vscale x 32 x i16>,
1653 define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1654 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
1655 ; CHECK: # %bb.0: # %entry
1656 ; CHECK-NEXT: csrwi vxrm, 0
1657 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1658 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1661 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1662 <vscale x 32 x i16> undef,
1663 <vscale x 32 x i16> %0,
1667 ret <vscale x 32 x i16> %a
1670 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1671 <vscale x 32 x i16>,
1672 <vscale x 32 x i16>,
1675 iXLen, iXLen, iXLen)
1677 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1678 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
1679 ; CHECK: # %bb.0: # %entry
1680 ; CHECK-NEXT: csrwi vxrm, 0
1681 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1682 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1685 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1686 <vscale x 32 x i16> %0,
1687 <vscale x 32 x i16> %1,
1689 <vscale x 32 x i1> %3,
1690 iXLen 0, iXLen %4, iXLen 1)
1692 ret <vscale x 32 x i16> %a
1695 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1701 define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1702 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
1703 ; CHECK: # %bb.0: # %entry
1704 ; CHECK-NEXT: csrwi vxrm, 0
1705 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1706 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1709 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1710 <vscale x 1 x i32> undef,
1711 <vscale x 1 x i32> %0,
1715 ret <vscale x 1 x i32> %a
1718 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1723 iXLen, iXLen, iXLen)
1725 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1726 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
1727 ; CHECK: # %bb.0: # %entry
1728 ; CHECK-NEXT: csrwi vxrm, 0
1729 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1730 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1733 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1734 <vscale x 1 x i32> %0,
1735 <vscale x 1 x i32> %1,
1737 <vscale x 1 x i1> %3,
1738 iXLen 0, iXLen %4, iXLen 1)
1740 ret <vscale x 1 x i32> %a
1743 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1749 define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1750 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
1751 ; CHECK: # %bb.0: # %entry
1752 ; CHECK-NEXT: csrwi vxrm, 0
1753 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1754 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1757 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1758 <vscale x 2 x i32> undef,
1759 <vscale x 2 x i32> %0,
1763 ret <vscale x 2 x i32> %a
1766 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1771 iXLen, iXLen, iXLen)
1773 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1774 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
1775 ; CHECK: # %bb.0: # %entry
1776 ; CHECK-NEXT: csrwi vxrm, 0
1777 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1778 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1781 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1782 <vscale x 2 x i32> %0,
1783 <vscale x 2 x i32> %1,
1785 <vscale x 2 x i1> %3,
1786 iXLen 0, iXLen %4, iXLen 1)
1788 ret <vscale x 2 x i32> %a
1791 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1797 define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1798 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
1799 ; CHECK: # %bb.0: # %entry
1800 ; CHECK-NEXT: csrwi vxrm, 0
1801 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1802 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1805 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1806 <vscale x 4 x i32> undef,
1807 <vscale x 4 x i32> %0,
1811 ret <vscale x 4 x i32> %a
1814 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1819 iXLen, iXLen, iXLen)
1821 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1822 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
1823 ; CHECK: # %bb.0: # %entry
1824 ; CHECK-NEXT: csrwi vxrm, 0
1825 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1826 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1829 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1830 <vscale x 4 x i32> %0,
1831 <vscale x 4 x i32> %1,
1833 <vscale x 4 x i1> %3,
1834 iXLen 0, iXLen %4, iXLen 1)
1836 ret <vscale x 4 x i32> %a
1839 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1845 define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1846 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
1847 ; CHECK: # %bb.0: # %entry
1848 ; CHECK-NEXT: csrwi vxrm, 0
1849 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1850 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1853 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1854 <vscale x 8 x i32> undef,
1855 <vscale x 8 x i32> %0,
1859 ret <vscale x 8 x i32> %a
1862 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1867 iXLen, iXLen, iXLen)
1869 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1870 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
1871 ; CHECK: # %bb.0: # %entry
1872 ; CHECK-NEXT: csrwi vxrm, 0
1873 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1874 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1877 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1878 <vscale x 8 x i32> %0,
1879 <vscale x 8 x i32> %1,
1881 <vscale x 8 x i1> %3,
1882 iXLen 0, iXLen %4, iXLen 1)
1884 ret <vscale x 8 x i32> %a
1887 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1888 <vscale x 16 x i32>,
1889 <vscale x 16 x i32>,
1893 define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1894 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
1895 ; CHECK: # %bb.0: # %entry
1896 ; CHECK-NEXT: csrwi vxrm, 0
1897 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1898 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1901 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1902 <vscale x 16 x i32> undef,
1903 <vscale x 16 x i32> %0,
1907 ret <vscale x 16 x i32> %a
1910 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1911 <vscale x 16 x i32>,
1912 <vscale x 16 x i32>,
1915 iXLen, iXLen, iXLen)
1917 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1918 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: csrwi vxrm, 0
1921 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1922 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1925 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1926 <vscale x 16 x i32> %0,
1927 <vscale x 16 x i32> %1,
1929 <vscale x 16 x i1> %3,
1930 iXLen 0, iXLen %4, iXLen 1)
1932 ret <vscale x 16 x i32> %a
1935 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1941 define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1942 ; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1943 ; RV32: # %bb.0: # %entry
1944 ; RV32-NEXT: addi sp, sp, -16
1945 ; RV32-NEXT: sw a0, 8(sp)
1946 ; RV32-NEXT: sw a1, 12(sp)
1947 ; RV32-NEXT: addi a0, sp, 8
1948 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1949 ; RV32-NEXT: vlse64.v v9, (a0), zero
1950 ; RV32-NEXT: csrwi vxrm, 0
1951 ; RV32-NEXT: vsmul.vv v8, v8, v9
1952 ; RV32-NEXT: addi sp, sp, 16
1955 ; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1956 ; RV64: # %bb.0: # %entry
1957 ; RV64-NEXT: csrwi vxrm, 0
1958 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1959 ; RV64-NEXT: vsmul.vx v8, v8, a0
1962 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1963 <vscale x 1 x i64> undef,
1964 <vscale x 1 x i64> %0,
1968 ret <vscale x 1 x i64> %a
1971 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
1976 iXLen, iXLen, iXLen)
1978 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1979 ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
1980 ; RV32: # %bb.0: # %entry
1981 ; RV32-NEXT: addi sp, sp, -16
1982 ; RV32-NEXT: sw a0, 8(sp)
1983 ; RV32-NEXT: sw a1, 12(sp)
1984 ; RV32-NEXT: addi a0, sp, 8
1985 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1986 ; RV32-NEXT: vlse64.v v10, (a0), zero
1987 ; RV32-NEXT: csrwi vxrm, 0
1988 ; RV32-NEXT: vsmul.vv v8, v9, v10, v0.t
1989 ; RV32-NEXT: addi sp, sp, 16
1992 ; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
1993 ; RV64: # %bb.0: # %entry
1994 ; RV64-NEXT: csrwi vxrm, 0
1995 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1996 ; RV64-NEXT: vsmul.vx v8, v9, a0, v0.t
1999 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
2000 <vscale x 1 x i64> %0,
2001 <vscale x 1 x i64> %1,
2003 <vscale x 1 x i1> %3,
2004 iXLen 0, iXLen %4, iXLen 1)
2006 ret <vscale x 1 x i64> %a
2009 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
2015 define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
2016 ; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
2017 ; RV32: # %bb.0: # %entry
2018 ; RV32-NEXT: addi sp, sp, -16
2019 ; RV32-NEXT: sw a0, 8(sp)
2020 ; RV32-NEXT: sw a1, 12(sp)
2021 ; RV32-NEXT: addi a0, sp, 8
2022 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2023 ; RV32-NEXT: vlse64.v v10, (a0), zero
2024 ; RV32-NEXT: csrwi vxrm, 0
2025 ; RV32-NEXT: vsmul.vv v8, v8, v10
2026 ; RV32-NEXT: addi sp, sp, 16
2029 ; RV64-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
2030 ; RV64: # %bb.0: # %entry
2031 ; RV64-NEXT: csrwi vxrm, 0
2032 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
2033 ; RV64-NEXT: vsmul.vx v8, v8, a0
2036 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
2037 <vscale x 2 x i64> undef,
2038 <vscale x 2 x i64> %0,
2042 ret <vscale x 2 x i64> %a
2045 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2050 iXLen, iXLen, iXLen)
2052 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2053 ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
2054 ; RV32: # %bb.0: # %entry
2055 ; RV32-NEXT: addi sp, sp, -16
2056 ; RV32-NEXT: sw a0, 8(sp)
2057 ; RV32-NEXT: sw a1, 12(sp)
2058 ; RV32-NEXT: addi a0, sp, 8
2059 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2060 ; RV32-NEXT: vlse64.v v12, (a0), zero
2061 ; RV32-NEXT: csrwi vxrm, 0
2062 ; RV32-NEXT: vsmul.vv v8, v10, v12, v0.t
2063 ; RV32-NEXT: addi sp, sp, 16
2066 ; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
2067 ; RV64: # %bb.0: # %entry
2068 ; RV64-NEXT: csrwi vxrm, 0
2069 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2070 ; RV64-NEXT: vsmul.vx v8, v10, a0, v0.t
2073 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2074 <vscale x 2 x i64> %0,
2075 <vscale x 2 x i64> %1,
2077 <vscale x 2 x i1> %3,
2078 iXLen 0, iXLen %4, iXLen 1)
2080 ret <vscale x 2 x i64> %a
2083 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2089 define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2090 ; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
2091 ; RV32: # %bb.0: # %entry
2092 ; RV32-NEXT: addi sp, sp, -16
2093 ; RV32-NEXT: sw a0, 8(sp)
2094 ; RV32-NEXT: sw a1, 12(sp)
2095 ; RV32-NEXT: addi a0, sp, 8
2096 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2097 ; RV32-NEXT: vlse64.v v12, (a0), zero
2098 ; RV32-NEXT: csrwi vxrm, 0
2099 ; RV32-NEXT: vsmul.vv v8, v8, v12
2100 ; RV32-NEXT: addi sp, sp, 16
2103 ; RV64-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
2104 ; RV64: # %bb.0: # %entry
2105 ; RV64-NEXT: csrwi vxrm, 0
2106 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2107 ; RV64-NEXT: vsmul.vx v8, v8, a0
2110 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2111 <vscale x 4 x i64> undef,
2112 <vscale x 4 x i64> %0,
2116 ret <vscale x 4 x i64> %a
2119 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2124 iXLen, iXLen, iXLen)
2126 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2127 ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
2128 ; RV32: # %bb.0: # %entry
2129 ; RV32-NEXT: addi sp, sp, -16
2130 ; RV32-NEXT: sw a0, 8(sp)
2131 ; RV32-NEXT: sw a1, 12(sp)
2132 ; RV32-NEXT: addi a0, sp, 8
2133 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2134 ; RV32-NEXT: vlse64.v v16, (a0), zero
2135 ; RV32-NEXT: csrwi vxrm, 0
2136 ; RV32-NEXT: vsmul.vv v8, v12, v16, v0.t
2137 ; RV32-NEXT: addi sp, sp, 16
2140 ; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
2141 ; RV64: # %bb.0: # %entry
2142 ; RV64-NEXT: csrwi vxrm, 0
2143 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2144 ; RV64-NEXT: vsmul.vx v8, v12, a0, v0.t
2147 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2148 <vscale x 4 x i64> %0,
2149 <vscale x 4 x i64> %1,
2151 <vscale x 4 x i1> %3,
2152 iXLen 0, iXLen %4, iXLen 1)
2154 ret <vscale x 4 x i64> %a
2157 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2163 define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2164 ; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
2165 ; RV32: # %bb.0: # %entry
2166 ; RV32-NEXT: addi sp, sp, -16
2167 ; RV32-NEXT: sw a0, 8(sp)
2168 ; RV32-NEXT: sw a1, 12(sp)
2169 ; RV32-NEXT: addi a0, sp, 8
2170 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2171 ; RV32-NEXT: vlse64.v v16, (a0), zero
2172 ; RV32-NEXT: csrwi vxrm, 0
2173 ; RV32-NEXT: vsmul.vv v8, v8, v16
2174 ; RV32-NEXT: addi sp, sp, 16
2177 ; RV64-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
2178 ; RV64: # %bb.0: # %entry
2179 ; RV64-NEXT: csrwi vxrm, 0
2180 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2181 ; RV64-NEXT: vsmul.vx v8, v8, a0
2184 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2185 <vscale x 8 x i64> undef,
2186 <vscale x 8 x i64> %0,
2190 ret <vscale x 8 x i64> %a
2193 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2198 iXLen, iXLen, iXLen)
2200 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2201 ; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
2202 ; RV32: # %bb.0: # %entry
2203 ; RV32-NEXT: addi sp, sp, -16
2204 ; RV32-NEXT: sw a0, 8(sp)
2205 ; RV32-NEXT: sw a1, 12(sp)
2206 ; RV32-NEXT: addi a0, sp, 8
2207 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2208 ; RV32-NEXT: vlse64.v v24, (a0), zero
2209 ; RV32-NEXT: csrwi vxrm, 0
2210 ; RV32-NEXT: vsmul.vv v8, v16, v24, v0.t
2211 ; RV32-NEXT: addi sp, sp, 16
2214 ; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
2215 ; RV64: # %bb.0: # %entry
2216 ; RV64-NEXT: csrwi vxrm, 0
2217 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2218 ; RV64-NEXT: vsmul.vx v8, v16, a0, v0.t
2221 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2222 <vscale x 8 x i64> %0,
2223 <vscale x 8 x i64> %1,
2225 <vscale x 8 x i1> %3,
2226 iXLen 0, iXLen %4, iXLen 1)
2228 ret <vscale x 8 x i64> %a