1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
4 ; RUN: not --crash llc -mtriple=riscv32 -mattr=+zve64d 2>&1 \
5 ; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D
7 ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul
9 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
15 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
16 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
17 ; CHECK: # %bb.0: # %entry
18 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
19 ; CHECK-NEXT: csrwi vxrm, 0
20 ; CHECK-NEXT: vsmul.vv v8, v8, v9
23 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
24 <vscale x 1 x i8> undef,
29 ret <vscale x 1 x i8> %a
32 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
39 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
40 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
43 ; CHECK-NEXT: csrwi vxrm, 0
44 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
47 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
54 ret <vscale x 1 x i8> %a
57 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
63 define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
67 ; CHECK-NEXT: csrwi vxrm, 0
68 ; CHECK-NEXT: vsmul.vv v8, v8, v9
71 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
72 <vscale x 2 x i8> undef,
77 ret <vscale x 2 x i8> %a
80 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
87 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
88 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
91 ; CHECK-NEXT: csrwi vxrm, 0
92 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
95 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
100 i32 0, i32 %4, i32 1)
102 ret <vscale x 2 x i8> %a
105 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
111 define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
112 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
115 ; CHECK-NEXT: csrwi vxrm, 0
116 ; CHECK-NEXT: vsmul.vv v8, v8, v9
119 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
120 <vscale x 4 x i8> undef,
121 <vscale x 4 x i8> %0,
122 <vscale x 4 x i8> %1,
125 ret <vscale x 4 x i8> %a
128 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
135 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
136 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
139 ; CHECK-NEXT: csrwi vxrm, 0
140 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
143 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
144 <vscale x 4 x i8> %0,
145 <vscale x 4 x i8> %1,
146 <vscale x 4 x i8> %2,
147 <vscale x 4 x i1> %3,
148 i32 0, i32 %4, i32 1)
150 ret <vscale x 4 x i8> %a
153 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
159 define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
160 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
161 ; CHECK: # %bb.0: # %entry
162 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
163 ; CHECK-NEXT: csrwi vxrm, 0
164 ; CHECK-NEXT: vsmul.vv v8, v8, v9
167 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
168 <vscale x 8 x i8> undef,
169 <vscale x 8 x i8> %0,
170 <vscale x 8 x i8> %1,
173 ret <vscale x 8 x i8> %a
176 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
183 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
184 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
187 ; CHECK-NEXT: csrwi vxrm, 0
188 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
191 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
192 <vscale x 8 x i8> %0,
193 <vscale x 8 x i8> %1,
194 <vscale x 8 x i8> %2,
195 <vscale x 8 x i1> %3,
196 i32 0, i32 %4, i32 1)
198 ret <vscale x 8 x i8> %a
201 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
207 define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
208 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
209 ; CHECK: # %bb.0: # %entry
210 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
211 ; CHECK-NEXT: csrwi vxrm, 0
212 ; CHECK-NEXT: vsmul.vv v8, v8, v10
215 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
216 <vscale x 16 x i8> undef,
217 <vscale x 16 x i8> %0,
218 <vscale x 16 x i8> %1,
221 ret <vscale x 16 x i8> %a
224 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
231 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
232 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
233 ; CHECK: # %bb.0: # %entry
234 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
235 ; CHECK-NEXT: csrwi vxrm, 0
236 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
239 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
240 <vscale x 16 x i8> %0,
241 <vscale x 16 x i8> %1,
242 <vscale x 16 x i8> %2,
243 <vscale x 16 x i1> %3,
244 i32 0, i32 %4, i32 1)
246 ret <vscale x 16 x i8> %a
249 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
255 define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
256 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
259 ; CHECK-NEXT: csrwi vxrm, 0
260 ; CHECK-NEXT: vsmul.vv v8, v8, v12
263 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
264 <vscale x 32 x i8> undef,
265 <vscale x 32 x i8> %0,
266 <vscale x 32 x i8> %1,
269 ret <vscale x 32 x i8> %a
272 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
279 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
280 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
281 ; CHECK: # %bb.0: # %entry
282 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
283 ; CHECK-NEXT: csrwi vxrm, 0
284 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
287 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
288 <vscale x 32 x i8> %0,
289 <vscale x 32 x i8> %1,
290 <vscale x 32 x i8> %2,
291 <vscale x 32 x i1> %3,
292 i32 0, i32 %4, i32 1)
294 ret <vscale x 32 x i8> %a
297 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
303 define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
304 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
305 ; CHECK: # %bb.0: # %entry
306 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
307 ; CHECK-NEXT: csrwi vxrm, 0
308 ; CHECK-NEXT: vsmul.vv v8, v8, v16
311 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
312 <vscale x 64 x i8> undef,
313 <vscale x 64 x i8> %0,
314 <vscale x 64 x i8> %1,
317 ret <vscale x 64 x i8> %a
320 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
327 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
328 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
329 ; CHECK: # %bb.0: # %entry
330 ; CHECK-NEXT: vl8r.v v24, (a0)
331 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
332 ; CHECK-NEXT: csrwi vxrm, 0
333 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
336 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
337 <vscale x 64 x i8> %0,
338 <vscale x 64 x i8> %1,
339 <vscale x 64 x i8> %2,
340 <vscale x 64 x i1> %3,
341 i32 0, i32 %4, i32 1)
343 ret <vscale x 64 x i8> %a
346 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
352 define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
353 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
356 ; CHECK-NEXT: csrwi vxrm, 0
357 ; CHECK-NEXT: vsmul.vv v8, v8, v9
360 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
361 <vscale x 1 x i16> undef,
362 <vscale x 1 x i16> %0,
363 <vscale x 1 x i16> %1,
366 ret <vscale x 1 x i16> %a
369 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
376 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
377 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
380 ; CHECK-NEXT: csrwi vxrm, 0
381 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
384 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
385 <vscale x 1 x i16> %0,
386 <vscale x 1 x i16> %1,
387 <vscale x 1 x i16> %2,
388 <vscale x 1 x i1> %3,
389 i32 0, i32 %4, i32 1)
391 ret <vscale x 1 x i16> %a
394 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
400 define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
401 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
402 ; CHECK: # %bb.0: # %entry
403 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
404 ; CHECK-NEXT: csrwi vxrm, 0
405 ; CHECK-NEXT: vsmul.vv v8, v8, v9
408 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
409 <vscale x 2 x i16> undef,
410 <vscale x 2 x i16> %0,
411 <vscale x 2 x i16> %1,
414 ret <vscale x 2 x i16> %a
417 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
424 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
425 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
426 ; CHECK: # %bb.0: # %entry
427 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
428 ; CHECK-NEXT: csrwi vxrm, 0
429 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
432 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
433 <vscale x 2 x i16> %0,
434 <vscale x 2 x i16> %1,
435 <vscale x 2 x i16> %2,
436 <vscale x 2 x i1> %3,
437 i32 0, i32 %4, i32 1)
439 ret <vscale x 2 x i16> %a
442 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
448 define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
449 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
450 ; CHECK: # %bb.0: # %entry
451 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
452 ; CHECK-NEXT: csrwi vxrm, 0
453 ; CHECK-NEXT: vsmul.vv v8, v8, v9
456 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
457 <vscale x 4 x i16> undef,
458 <vscale x 4 x i16> %0,
459 <vscale x 4 x i16> %1,
462 ret <vscale x 4 x i16> %a
465 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
472 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
473 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
474 ; CHECK: # %bb.0: # %entry
475 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
476 ; CHECK-NEXT: csrwi vxrm, 0
477 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
480 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
481 <vscale x 4 x i16> %0,
482 <vscale x 4 x i16> %1,
483 <vscale x 4 x i16> %2,
484 <vscale x 4 x i1> %3,
485 i32 0, i32 %4, i32 1)
487 ret <vscale x 4 x i16> %a
490 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
496 define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
497 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
500 ; CHECK-NEXT: csrwi vxrm, 0
501 ; CHECK-NEXT: vsmul.vv v8, v8, v10
504 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
505 <vscale x 8 x i16> undef,
506 <vscale x 8 x i16> %0,
507 <vscale x 8 x i16> %1,
510 ret <vscale x 8 x i16> %a
513 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
520 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
521 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
524 ; CHECK-NEXT: csrwi vxrm, 0
525 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
528 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
529 <vscale x 8 x i16> %0,
530 <vscale x 8 x i16> %1,
531 <vscale x 8 x i16> %2,
532 <vscale x 8 x i1> %3,
533 i32 0, i32 %4, i32 1)
535 ret <vscale x 8 x i16> %a
538 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
544 define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
545 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
548 ; CHECK-NEXT: csrwi vxrm, 0
549 ; CHECK-NEXT: vsmul.vv v8, v8, v12
552 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
553 <vscale x 16 x i16> undef,
554 <vscale x 16 x i16> %0,
555 <vscale x 16 x i16> %1,
558 ret <vscale x 16 x i16> %a
561 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
568 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
569 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
570 ; CHECK: # %bb.0: # %entry
571 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
572 ; CHECK-NEXT: csrwi vxrm, 0
573 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
576 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
577 <vscale x 16 x i16> %0,
578 <vscale x 16 x i16> %1,
579 <vscale x 16 x i16> %2,
580 <vscale x 16 x i1> %3,
581 i32 0, i32 %4, i32 1)
583 ret <vscale x 16 x i16> %a
586 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
592 define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
593 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
596 ; CHECK-NEXT: csrwi vxrm, 0
597 ; CHECK-NEXT: vsmul.vv v8, v8, v16
600 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
601 <vscale x 32 x i16> undef,
602 <vscale x 32 x i16> %0,
603 <vscale x 32 x i16> %1,
606 ret <vscale x 32 x i16> %a
609 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
616 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
617 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
618 ; CHECK: # %bb.0: # %entry
619 ; CHECK-NEXT: vl8re16.v v24, (a0)
620 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
621 ; CHECK-NEXT: csrwi vxrm, 0
622 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
625 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
626 <vscale x 32 x i16> %0,
627 <vscale x 32 x i16> %1,
628 <vscale x 32 x i16> %2,
629 <vscale x 32 x i1> %3,
630 i32 0, i32 %4, i32 1)
632 ret <vscale x 32 x i16> %a
635 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
641 define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
642 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
643 ; CHECK: # %bb.0: # %entry
644 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
645 ; CHECK-NEXT: csrwi vxrm, 0
646 ; CHECK-NEXT: vsmul.vv v8, v8, v9
649 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
650 <vscale x 1 x i32> undef,
651 <vscale x 1 x i32> %0,
652 <vscale x 1 x i32> %1,
655 ret <vscale x 1 x i32> %a
658 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
665 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
666 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
669 ; CHECK-NEXT: csrwi vxrm, 0
670 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
673 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
674 <vscale x 1 x i32> %0,
675 <vscale x 1 x i32> %1,
676 <vscale x 1 x i32> %2,
677 <vscale x 1 x i1> %3,
678 i32 0, i32 %4, i32 1)
680 ret <vscale x 1 x i32> %a
683 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
689 define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
690 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
691 ; CHECK: # %bb.0: # %entry
692 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
693 ; CHECK-NEXT: csrwi vxrm, 0
694 ; CHECK-NEXT: vsmul.vv v8, v8, v9
697 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
698 <vscale x 2 x i32> undef,
699 <vscale x 2 x i32> %0,
700 <vscale x 2 x i32> %1,
703 ret <vscale x 2 x i32> %a
706 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
713 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
714 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
715 ; CHECK: # %bb.0: # %entry
716 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
717 ; CHECK-NEXT: csrwi vxrm, 0
718 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
721 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
722 <vscale x 2 x i32> %0,
723 <vscale x 2 x i32> %1,
724 <vscale x 2 x i32> %2,
725 <vscale x 2 x i1> %3,
726 i32 0, i32 %4, i32 1)
728 ret <vscale x 2 x i32> %a
731 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
737 define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
738 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
739 ; CHECK: # %bb.0: # %entry
740 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
741 ; CHECK-NEXT: csrwi vxrm, 0
742 ; CHECK-NEXT: vsmul.vv v8, v8, v10
745 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
746 <vscale x 4 x i32> undef,
747 <vscale x 4 x i32> %0,
748 <vscale x 4 x i32> %1,
751 ret <vscale x 4 x i32> %a
754 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
761 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
762 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
765 ; CHECK-NEXT: csrwi vxrm, 0
766 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
769 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
770 <vscale x 4 x i32> %0,
771 <vscale x 4 x i32> %1,
772 <vscale x 4 x i32> %2,
773 <vscale x 4 x i1> %3,
774 i32 0, i32 %4, i32 1)
776 ret <vscale x 4 x i32> %a
779 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
785 define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
786 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
787 ; CHECK: # %bb.0: # %entry
788 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
789 ; CHECK-NEXT: csrwi vxrm, 0
790 ; CHECK-NEXT: vsmul.vv v8, v8, v12
793 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
794 <vscale x 8 x i32> undef,
795 <vscale x 8 x i32> %0,
796 <vscale x 8 x i32> %1,
799 ret <vscale x 8 x i32> %a
802 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
809 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
810 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
811 ; CHECK: # %bb.0: # %entry
812 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
813 ; CHECK-NEXT: csrwi vxrm, 0
814 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
817 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
818 <vscale x 8 x i32> %0,
819 <vscale x 8 x i32> %1,
820 <vscale x 8 x i32> %2,
821 <vscale x 8 x i1> %3,
822 i32 0, i32 %4, i32 1)
824 ret <vscale x 8 x i32> %a
827 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
833 define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
834 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
835 ; CHECK: # %bb.0: # %entry
836 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
837 ; CHECK-NEXT: csrwi vxrm, 0
838 ; CHECK-NEXT: vsmul.vv v8, v8, v16
841 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
842 <vscale x 16 x i32> undef,
843 <vscale x 16 x i32> %0,
844 <vscale x 16 x i32> %1,
847 ret <vscale x 16 x i32> %a
850 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
857 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
858 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
859 ; CHECK: # %bb.0: # %entry
860 ; CHECK-NEXT: vl8re32.v v24, (a0)
861 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
862 ; CHECK-NEXT: csrwi vxrm, 0
863 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
866 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
867 <vscale x 16 x i32> %0,
868 <vscale x 16 x i32> %1,
869 <vscale x 16 x i32> %2,
870 <vscale x 16 x i1> %3,
871 i32 0, i32 %4, i32 1)
873 ret <vscale x 16 x i32> %a
876 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
882 define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
883 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
884 ; CHECK: # %bb.0: # %entry
885 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
886 ; CHECK-NEXT: csrwi vxrm, 0
887 ; CHECK-NEXT: vsmul.vv v8, v8, v9
890 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
891 <vscale x 1 x i64> undef,
892 <vscale x 1 x i64> %0,
893 <vscale x 1 x i64> %1,
896 ret <vscale x 1 x i64> %a
899 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
906 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
907 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
908 ; CHECK: # %bb.0: # %entry
909 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
910 ; CHECK-NEXT: csrwi vxrm, 0
911 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
914 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
915 <vscale x 1 x i64> %0,
916 <vscale x 1 x i64> %1,
917 <vscale x 1 x i64> %2,
918 <vscale x 1 x i1> %3,
919 i32 0, i32 %4, i32 1)
921 ret <vscale x 1 x i64> %a
924 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
930 define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
931 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
934 ; CHECK-NEXT: csrwi vxrm, 0
935 ; CHECK-NEXT: vsmul.vv v8, v8, v10
938 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
939 <vscale x 2 x i64> undef,
940 <vscale x 2 x i64> %0,
941 <vscale x 2 x i64> %1,
944 ret <vscale x 2 x i64> %a
947 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
954 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
955 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
956 ; CHECK: # %bb.0: # %entry
957 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
958 ; CHECK-NEXT: csrwi vxrm, 0
959 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
962 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
963 <vscale x 2 x i64> %0,
964 <vscale x 2 x i64> %1,
965 <vscale x 2 x i64> %2,
966 <vscale x 2 x i1> %3,
967 i32 0, i32 %4, i32 1)
969 ret <vscale x 2 x i64> %a
972 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
978 define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
979 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
980 ; CHECK: # %bb.0: # %entry
981 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
982 ; CHECK-NEXT: csrwi vxrm, 0
983 ; CHECK-NEXT: vsmul.vv v8, v8, v12
986 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
987 <vscale x 4 x i64> undef,
988 <vscale x 4 x i64> %0,
989 <vscale x 4 x i64> %1,
992 ret <vscale x 4 x i64> %a
995 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1002 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1003 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1004 ; CHECK: # %bb.0: # %entry
1005 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1006 ; CHECK-NEXT: csrwi vxrm, 0
1007 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
1010 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1011 <vscale x 4 x i64> %0,
1012 <vscale x 4 x i64> %1,
1013 <vscale x 4 x i64> %2,
1014 <vscale x 4 x i1> %3,
1015 i32 0, i32 %4, i32 1)
1017 ret <vscale x 4 x i64> %a
1020 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1026 define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
1027 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1030 ; CHECK-NEXT: csrwi vxrm, 0
1031 ; CHECK-NEXT: vsmul.vv v8, v8, v16
1034 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1035 <vscale x 8 x i64> undef,
1036 <vscale x 8 x i64> %0,
1037 <vscale x 8 x i64> %1,
1040 ret <vscale x 8 x i64> %a
1043 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1050 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1051 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vl8re64.v v24, (a0)
1054 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1055 ; CHECK-NEXT: csrwi vxrm, 0
1056 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
1059 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1060 <vscale x 8 x i64> %0,
1061 <vscale x 8 x i64> %1,
1062 <vscale x 8 x i64> %2,
1063 <vscale x 8 x i1> %3,
1064 i32 0, i32 %4, i32 1)
1066 ret <vscale x 8 x i64> %a
1069 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1075 define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
1076 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1079 ; CHECK-NEXT: csrwi vxrm, 0
1080 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1083 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1084 <vscale x 1 x i8> undef,
1085 <vscale x 1 x i8> %0,
1089 ret <vscale x 1 x i8> %a
1092 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1099 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1100 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1103 ; CHECK-NEXT: csrwi vxrm, 0
1104 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1107 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1108 <vscale x 1 x i8> %0,
1109 <vscale x 1 x i8> %1,
1111 <vscale x 1 x i1> %3,
1112 i32 0, i32 %4, i32 1)
1114 ret <vscale x 1 x i8> %a
1117 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1123 define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
1124 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
1125 ; CHECK: # %bb.0: # %entry
1126 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1127 ; CHECK-NEXT: csrwi vxrm, 0
1128 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1131 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1132 <vscale x 2 x i8> undef,
1133 <vscale x 2 x i8> %0,
1137 ret <vscale x 2 x i8> %a
1140 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1147 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1148 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
1149 ; CHECK: # %bb.0: # %entry
1150 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1151 ; CHECK-NEXT: csrwi vxrm, 0
1152 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1155 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1156 <vscale x 2 x i8> %0,
1157 <vscale x 2 x i8> %1,
1159 <vscale x 2 x i1> %3,
1160 i32 0, i32 %4, i32 1)
1162 ret <vscale x 2 x i8> %a
1165 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1171 define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
1172 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
1173 ; CHECK: # %bb.0: # %entry
1174 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1175 ; CHECK-NEXT: csrwi vxrm, 0
1176 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1179 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1180 <vscale x 4 x i8> undef,
1181 <vscale x 4 x i8> %0,
1185 ret <vscale x 4 x i8> %a
1188 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1195 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1196 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1199 ; CHECK-NEXT: csrwi vxrm, 0
1200 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1203 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1204 <vscale x 4 x i8> %0,
1205 <vscale x 4 x i8> %1,
1207 <vscale x 4 x i1> %3,
1208 i32 0, i32 %4, i32 1)
1210 ret <vscale x 4 x i8> %a
1213 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1219 define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
1220 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
1221 ; CHECK: # %bb.0: # %entry
1222 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1223 ; CHECK-NEXT: csrwi vxrm, 0
1224 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1227 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1228 <vscale x 8 x i8> undef,
1229 <vscale x 8 x i8> %0,
1233 ret <vscale x 8 x i8> %a
1236 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1243 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1244 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
1245 ; CHECK: # %bb.0: # %entry
1246 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1247 ; CHECK-NEXT: csrwi vxrm, 0
1248 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1251 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1252 <vscale x 8 x i8> %0,
1253 <vscale x 8 x i8> %1,
1255 <vscale x 8 x i1> %3,
1256 i32 0, i32 %4, i32 1)
1258 ret <vscale x 8 x i8> %a
1261 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1267 define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
1268 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1271 ; CHECK-NEXT: csrwi vxrm, 0
1272 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1275 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1276 <vscale x 16 x i8> undef,
1277 <vscale x 16 x i8> %0,
1281 ret <vscale x 16 x i8> %a
1284 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1291 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1292 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
1293 ; CHECK: # %bb.0: # %entry
1294 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1295 ; CHECK-NEXT: csrwi vxrm, 0
1296 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1299 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1300 <vscale x 16 x i8> %0,
1301 <vscale x 16 x i8> %1,
1303 <vscale x 16 x i1> %3,
1304 i32 0, i32 %4, i32 1)
1306 ret <vscale x 16 x i8> %a
1309 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1315 define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
1316 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
1317 ; CHECK: # %bb.0: # %entry
1318 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1319 ; CHECK-NEXT: csrwi vxrm, 0
1320 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1323 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1324 <vscale x 32 x i8> undef,
1325 <vscale x 32 x i8> %0,
1329 ret <vscale x 32 x i8> %a
1332 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1339 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1340 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
1341 ; CHECK: # %bb.0: # %entry
1342 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1343 ; CHECK-NEXT: csrwi vxrm, 0
1344 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1347 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1348 <vscale x 32 x i8> %0,
1349 <vscale x 32 x i8> %1,
1351 <vscale x 32 x i1> %3,
1352 i32 0, i32 %4, i32 1)
1354 ret <vscale x 32 x i8> %a
1357 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1363 define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
1364 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
1365 ; CHECK: # %bb.0: # %entry
1366 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1367 ; CHECK-NEXT: csrwi vxrm, 0
1368 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1371 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1372 <vscale x 64 x i8> undef,
1373 <vscale x 64 x i8> %0,
1377 ret <vscale x 64 x i8> %a
1380 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1387 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
1388 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1391 ; CHECK-NEXT: csrwi vxrm, 0
1392 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1395 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1396 <vscale x 64 x i8> %0,
1397 <vscale x 64 x i8> %1,
1399 <vscale x 64 x i1> %3,
1400 i32 0, i32 %4, i32 1)
1402 ret <vscale x 64 x i8> %a
1405 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1411 define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
1412 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1415 ; CHECK-NEXT: csrwi vxrm, 0
1416 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1419 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1420 <vscale x 1 x i16> undef,
1421 <vscale x 1 x i16> %0,
1425 ret <vscale x 1 x i16> %a
1428 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1435 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1436 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
1437 ; CHECK: # %bb.0: # %entry
1438 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1439 ; CHECK-NEXT: csrwi vxrm, 0
1440 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1443 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1444 <vscale x 1 x i16> %0,
1445 <vscale x 1 x i16> %1,
1447 <vscale x 1 x i1> %3,
1448 i32 0, i32 %4, i32 1)
1450 ret <vscale x 1 x i16> %a
1453 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1459 define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
1460 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
1461 ; CHECK: # %bb.0: # %entry
1462 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1463 ; CHECK-NEXT: csrwi vxrm, 0
1464 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1467 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1468 <vscale x 2 x i16> undef,
1469 <vscale x 2 x i16> %0,
1473 ret <vscale x 2 x i16> %a
1476 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1483 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1484 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1487 ; CHECK-NEXT: csrwi vxrm, 0
1488 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1491 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1492 <vscale x 2 x i16> %0,
1493 <vscale x 2 x i16> %1,
1495 <vscale x 2 x i1> %3,
1496 i32 0, i32 %4, i32 1)
1498 ret <vscale x 2 x i16> %a
1501 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1507 define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
1508 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
1509 ; CHECK: # %bb.0: # %entry
1510 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1511 ; CHECK-NEXT: csrwi vxrm, 0
1512 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1515 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1516 <vscale x 4 x i16> undef,
1517 <vscale x 4 x i16> %0,
1521 ret <vscale x 4 x i16> %a
1524 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1531 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1532 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1535 ; CHECK-NEXT: csrwi vxrm, 0
1536 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1539 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1540 <vscale x 4 x i16> %0,
1541 <vscale x 4 x i16> %1,
1543 <vscale x 4 x i1> %3,
1544 i32 0, i32 %4, i32 1)
1546 ret <vscale x 4 x i16> %a
1549 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1555 define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
1556 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
1557 ; CHECK: # %bb.0: # %entry
1558 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1559 ; CHECK-NEXT: csrwi vxrm, 0
1560 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1563 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1564 <vscale x 8 x i16> undef,
1565 <vscale x 8 x i16> %0,
1569 ret <vscale x 8 x i16> %a
1572 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1579 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1580 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
1581 ; CHECK: # %bb.0: # %entry
1582 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1583 ; CHECK-NEXT: csrwi vxrm, 0
1584 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1587 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1588 <vscale x 8 x i16> %0,
1589 <vscale x 8 x i16> %1,
1591 <vscale x 8 x i1> %3,
1592 i32 0, i32 %4, i32 1)
1594 ret <vscale x 8 x i16> %a
1597 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1598 <vscale x 16 x i16>,
1599 <vscale x 16 x i16>,
1603 define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
1604 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
1605 ; CHECK: # %bb.0: # %entry
1606 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1607 ; CHECK-NEXT: csrwi vxrm, 0
1608 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1611 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1612 <vscale x 16 x i16> undef,
1613 <vscale x 16 x i16> %0,
1617 ret <vscale x 16 x i16> %a
1620 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1621 <vscale x 16 x i16>,
1622 <vscale x 16 x i16>,
1627 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1628 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1631 ; CHECK-NEXT: csrwi vxrm, 0
1632 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1635 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1636 <vscale x 16 x i16> %0,
1637 <vscale x 16 x i16> %1,
1639 <vscale x 16 x i1> %3,
1640 i32 0, i32 %4, i32 1)
1642 ret <vscale x 16 x i16> %a
1645 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1646 <vscale x 32 x i16>,
1647 <vscale x 32 x i16>,
1651 define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
1652 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
1653 ; CHECK: # %bb.0: # %entry
1654 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1655 ; CHECK-NEXT: csrwi vxrm, 0
1656 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1659 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1660 <vscale x 32 x i16> undef,
1661 <vscale x 32 x i16> %0,
1665 ret <vscale x 32 x i16> %a
1668 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1669 <vscale x 32 x i16>,
1670 <vscale x 32 x i16>,
1675 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1676 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
1677 ; CHECK: # %bb.0: # %entry
1678 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1679 ; CHECK-NEXT: csrwi vxrm, 0
1680 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1683 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1684 <vscale x 32 x i16> %0,
1685 <vscale x 32 x i16> %1,
1687 <vscale x 32 x i1> %3,
1688 i32 0, i32 %4, i32 1)
1690 ret <vscale x 32 x i16> %a
1693 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1698 define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
1699 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
1700 ; CHECK: # %bb.0: # %entry
1701 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1702 ; CHECK-NEXT: csrwi vxrm, 0
1703 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1706 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1707 <vscale x 1 x i32> undef,
1708 <vscale x 1 x i32> %0,
1712 ret <vscale x 1 x i32> %a
1715 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1722 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1723 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
1724 ; CHECK: # %bb.0: # %entry
1725 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1726 ; CHECK-NEXT: csrwi vxrm, 0
1727 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1730 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1731 <vscale x 1 x i32> %0,
1732 <vscale x 1 x i32> %1,
1734 <vscale x 1 x i1> %3,
1735 i32 0, i32 %4, i32 1)
1737 ret <vscale x 1 x i32> %a
1740 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1745 define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
1746 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
1747 ; CHECK: # %bb.0: # %entry
1748 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1749 ; CHECK-NEXT: csrwi vxrm, 0
1750 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1753 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1754 <vscale x 2 x i32> undef,
1755 <vscale x 2 x i32> %0,
1759 ret <vscale x 2 x i32> %a
1762 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1769 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1770 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
1771 ; CHECK: # %bb.0: # %entry
1772 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1773 ; CHECK-NEXT: csrwi vxrm, 0
1774 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1777 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1778 <vscale x 2 x i32> %0,
1779 <vscale x 2 x i32> %1,
1781 <vscale x 2 x i1> %3,
1782 i32 0, i32 %4, i32 1)
1784 ret <vscale x 2 x i32> %a
1787 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1792 define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
1793 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
1794 ; CHECK: # %bb.0: # %entry
1795 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1796 ; CHECK-NEXT: csrwi vxrm, 0
1797 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1800 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1801 <vscale x 4 x i32> undef,
1802 <vscale x 4 x i32> %0,
1806 ret <vscale x 4 x i32> %a
1809 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1816 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1817 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
1818 ; CHECK: # %bb.0: # %entry
1819 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1820 ; CHECK-NEXT: csrwi vxrm, 0
1821 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1824 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1825 <vscale x 4 x i32> %0,
1826 <vscale x 4 x i32> %1,
1828 <vscale x 4 x i1> %3,
1829 i32 0, i32 %4, i32 1)
1831 ret <vscale x 4 x i32> %a
1834 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1839 define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
1840 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
1841 ; CHECK: # %bb.0: # %entry
1842 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1843 ; CHECK-NEXT: csrwi vxrm, 0
1844 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1847 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1848 <vscale x 8 x i32> undef,
1849 <vscale x 8 x i32> %0,
1853 ret <vscale x 8 x i32> %a
1856 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1863 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1864 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
1865 ; CHECK: # %bb.0: # %entry
1866 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1867 ; CHECK-NEXT: csrwi vxrm, 0
1868 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1871 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1872 <vscale x 8 x i32> %0,
1873 <vscale x 8 x i32> %1,
1875 <vscale x 8 x i1> %3,
1876 i32 0, i32 %4, i32 1)
1878 ret <vscale x 8 x i32> %a
1881 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1882 <vscale x 16 x i32>,
1883 <vscale x 16 x i32>,
1886 define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
1887 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
1888 ; CHECK: # %bb.0: # %entry
1889 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1890 ; CHECK-NEXT: csrwi vxrm, 0
1891 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1894 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1895 <vscale x 16 x i32> undef,
1896 <vscale x 16 x i32> %0,
1900 ret <vscale x 16 x i32> %a
1903 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1904 <vscale x 16 x i32>,
1905 <vscale x 16 x i32>,
1910 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1911 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
1912 ; CHECK: # %bb.0: # %entry
1913 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1914 ; CHECK-NEXT: csrwi vxrm, 0
1915 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1918 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1919 <vscale x 16 x i32> %0,
1920 <vscale x 16 x i32> %1,
1922 <vscale x 16 x i1> %3,
1923 i32 0, i32 %4, i32 1)
1925 ret <vscale x 16 x i32> %a
1928 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1934 define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
1935 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1936 ; CHECK: # %bb.0: # %entry
1937 ; CHECK-NEXT: addi sp, sp, -16
1938 ; CHECK-NEXT: sw a1, 12(sp)
1939 ; CHECK-NEXT: sw a0, 8(sp)
1940 ; CHECK-NEXT: addi a0, sp, 8
1941 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1942 ; CHECK-NEXT: vlse64.v v9, (a0), zero
1943 ; CHECK-NEXT: csrwi vxrm, 0
1944 ; CHECK-NEXT: vsmul.vv v8, v8, v9
1945 ; CHECK-NEXT: addi sp, sp, 16
1948 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1949 <vscale x 1 x i64> undef,
1950 <vscale x 1 x i64> %0,
1954 ret <vscale x 1 x i64> %a
1957 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
1964 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1965 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
1966 ; CHECK: # %bb.0: # %entry
1967 ; CHECK-NEXT: addi sp, sp, -16
1968 ; CHECK-NEXT: sw a1, 12(sp)
1969 ; CHECK-NEXT: sw a0, 8(sp)
1970 ; CHECK-NEXT: addi a0, sp, 8
1971 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1972 ; CHECK-NEXT: vlse64.v v10, (a0), zero
1973 ; CHECK-NEXT: csrwi vxrm, 0
1974 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
1975 ; CHECK-NEXT: addi sp, sp, 16
1978 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
1979 <vscale x 1 x i64> %0,
1980 <vscale x 1 x i64> %1,
1982 <vscale x 1 x i1> %3,
1983 i32 0, i32 %4, i32 1)
1985 ret <vscale x 1 x i64> %a
1988 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
1994 define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
1995 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
1996 ; CHECK: # %bb.0: # %entry
1997 ; CHECK-NEXT: addi sp, sp, -16
1998 ; CHECK-NEXT: sw a1, 12(sp)
1999 ; CHECK-NEXT: sw a0, 8(sp)
2000 ; CHECK-NEXT: addi a0, sp, 8
2001 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
2002 ; CHECK-NEXT: vlse64.v v10, (a0), zero
2003 ; CHECK-NEXT: csrwi vxrm, 0
2004 ; CHECK-NEXT: vsmul.vv v8, v8, v10
2005 ; CHECK-NEXT: addi sp, sp, 16
2008 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
2009 <vscale x 2 x i64> undef,
2010 <vscale x 2 x i64> %0,
2014 ret <vscale x 2 x i64> %a
2017 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2024 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2025 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
2026 ; CHECK: # %bb.0: # %entry
2027 ; CHECK-NEXT: addi sp, sp, -16
2028 ; CHECK-NEXT: sw a1, 12(sp)
2029 ; CHECK-NEXT: sw a0, 8(sp)
2030 ; CHECK-NEXT: addi a0, sp, 8
2031 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2032 ; CHECK-NEXT: vlse64.v v12, (a0), zero
2033 ; CHECK-NEXT: csrwi vxrm, 0
2034 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
2035 ; CHECK-NEXT: addi sp, sp, 16
2038 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2039 <vscale x 2 x i64> %0,
2040 <vscale x 2 x i64> %1,
2042 <vscale x 2 x i1> %3,
2043 i32 0, i32 %4, i32 1)
2045 ret <vscale x 2 x i64> %a
2048 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2054 define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
2055 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
2056 ; CHECK: # %bb.0: # %entry
2057 ; CHECK-NEXT: addi sp, sp, -16
2058 ; CHECK-NEXT: sw a1, 12(sp)
2059 ; CHECK-NEXT: sw a0, 8(sp)
2060 ; CHECK-NEXT: addi a0, sp, 8
2061 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2062 ; CHECK-NEXT: vlse64.v v12, (a0), zero
2063 ; CHECK-NEXT: csrwi vxrm, 0
2064 ; CHECK-NEXT: vsmul.vv v8, v8, v12
2065 ; CHECK-NEXT: addi sp, sp, 16
2068 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2069 <vscale x 4 x i64> undef,
2070 <vscale x 4 x i64> %0,
2074 ret <vscale x 4 x i64> %a
2077 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2084 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2085 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
2086 ; CHECK: # %bb.0: # %entry
2087 ; CHECK-NEXT: addi sp, sp, -16
2088 ; CHECK-NEXT: sw a1, 12(sp)
2089 ; CHECK-NEXT: sw a0, 8(sp)
2090 ; CHECK-NEXT: addi a0, sp, 8
2091 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2092 ; CHECK-NEXT: vlse64.v v16, (a0), zero
2093 ; CHECK-NEXT: csrwi vxrm, 0
2094 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
2095 ; CHECK-NEXT: addi sp, sp, 16
2098 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2099 <vscale x 4 x i64> %0,
2100 <vscale x 4 x i64> %1,
2102 <vscale x 4 x i1> %3,
2103 i32 0, i32 %4, i32 1)
2105 ret <vscale x 4 x i64> %a
2108 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2114 define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
2115 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
2116 ; CHECK: # %bb.0: # %entry
2117 ; CHECK-NEXT: addi sp, sp, -16
2118 ; CHECK-NEXT: sw a1, 12(sp)
2119 ; CHECK-NEXT: sw a0, 8(sp)
2120 ; CHECK-NEXT: addi a0, sp, 8
2121 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2122 ; CHECK-NEXT: vlse64.v v16, (a0), zero
2123 ; CHECK-NEXT: csrwi vxrm, 0
2124 ; CHECK-NEXT: vsmul.vv v8, v8, v16
2125 ; CHECK-NEXT: addi sp, sp, 16
2128 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2129 <vscale x 8 x i64> undef,
2130 <vscale x 8 x i64> %0,
2134 ret <vscale x 8 x i64> %a
2137 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2144 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2145 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
2146 ; CHECK: # %bb.0: # %entry
2147 ; CHECK-NEXT: addi sp, sp, -16
2148 ; CHECK-NEXT: sw a1, 12(sp)
2149 ; CHECK-NEXT: sw a0, 8(sp)
2150 ; CHECK-NEXT: addi a0, sp, 8
2151 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2152 ; CHECK-NEXT: vlse64.v v24, (a0), zero
2153 ; CHECK-NEXT: csrwi vxrm, 0
2154 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
2155 ; CHECK-NEXT: addi sp, sp, 16
2158 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2159 <vscale x 8 x i64> %0,
2160 <vscale x 8 x i64> %1,
2162 <vscale x 8 x i1> %3,
2163 i32 0, i32 %4, i32 1)
2165 ret <vscale x 8 x i64> %a