1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
3 ; RUN: < %s | FileCheck %s
4 ; RUN: not --crash llc -mtriple=riscv64 -mattr=+zve64d 2>&1 \
5 ; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D
7 ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul
9 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
15 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
16 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
17 ; CHECK: # %bb.0: # %entry
18 ; CHECK-NEXT: csrwi vxrm, 0
19 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
20 ; CHECK-NEXT: vsmul.vv v8, v8, v9
23 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
24 <vscale x 1 x i8> undef,
29 ret <vscale x 1 x i8> %a
32 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
39 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
40 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: csrwi vxrm, 0
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
44 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
47 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
54 ret <vscale x 1 x i8> %a
57 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
63 define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
64 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: csrwi vxrm, 0
67 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
68 ; CHECK-NEXT: vsmul.vv v8, v8, v9
71 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
72 <vscale x 2 x i8> undef,
77 ret <vscale x 2 x i8> %a
80 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
87 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
88 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: csrwi vxrm, 0
91 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
92 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
95 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
100 i64 0, i64 %4, i64 1)
102 ret <vscale x 2 x i8> %a
105 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
111 define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
112 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: csrwi vxrm, 0
115 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
116 ; CHECK-NEXT: vsmul.vv v8, v8, v9
119 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
120 <vscale x 4 x i8> undef,
121 <vscale x 4 x i8> %0,
122 <vscale x 4 x i8> %1,
125 ret <vscale x 4 x i8> %a
128 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
135 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
136 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: csrwi vxrm, 0
139 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
140 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
143 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
144 <vscale x 4 x i8> %0,
145 <vscale x 4 x i8> %1,
146 <vscale x 4 x i8> %2,
147 <vscale x 4 x i1> %3,
148 i64 0, i64 %4, i64 1)
150 ret <vscale x 4 x i8> %a
153 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
159 define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
160 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
161 ; CHECK: # %bb.0: # %entry
162 ; CHECK-NEXT: csrwi vxrm, 0
163 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
164 ; CHECK-NEXT: vsmul.vv v8, v8, v9
167 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
168 <vscale x 8 x i8> undef,
169 <vscale x 8 x i8> %0,
170 <vscale x 8 x i8> %1,
173 ret <vscale x 8 x i8> %a
176 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
183 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
184 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: csrwi vxrm, 0
187 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
188 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
191 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
192 <vscale x 8 x i8> %0,
193 <vscale x 8 x i8> %1,
194 <vscale x 8 x i8> %2,
195 <vscale x 8 x i1> %3,
196 i64 0, i64 %4, i64 1)
198 ret <vscale x 8 x i8> %a
201 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
207 define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
208 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
209 ; CHECK: # %bb.0: # %entry
210 ; CHECK-NEXT: csrwi vxrm, 0
211 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
212 ; CHECK-NEXT: vsmul.vv v8, v8, v10
215 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
216 <vscale x 16 x i8> undef,
217 <vscale x 16 x i8> %0,
218 <vscale x 16 x i8> %1,
221 ret <vscale x 16 x i8> %a
224 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
231 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
232 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
233 ; CHECK: # %bb.0: # %entry
234 ; CHECK-NEXT: csrwi vxrm, 0
235 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
236 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
239 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
240 <vscale x 16 x i8> %0,
241 <vscale x 16 x i8> %1,
242 <vscale x 16 x i8> %2,
243 <vscale x 16 x i1> %3,
244 i64 0, i64 %4, i64 1)
246 ret <vscale x 16 x i8> %a
249 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
255 define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
256 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
257 ; CHECK: # %bb.0: # %entry
258 ; CHECK-NEXT: csrwi vxrm, 0
259 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
260 ; CHECK-NEXT: vsmul.vv v8, v8, v12
263 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
264 <vscale x 32 x i8> undef,
265 <vscale x 32 x i8> %0,
266 <vscale x 32 x i8> %1,
269 ret <vscale x 32 x i8> %a
272 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
279 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
280 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
281 ; CHECK: # %bb.0: # %entry
282 ; CHECK-NEXT: csrwi vxrm, 0
283 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
284 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
287 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
288 <vscale x 32 x i8> %0,
289 <vscale x 32 x i8> %1,
290 <vscale x 32 x i8> %2,
291 <vscale x 32 x i1> %3,
292 i64 0, i64 %4, i64 1)
294 ret <vscale x 32 x i8> %a
297 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
303 define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
304 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
305 ; CHECK: # %bb.0: # %entry
306 ; CHECK-NEXT: csrwi vxrm, 0
307 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
308 ; CHECK-NEXT: vsmul.vv v8, v8, v16
311 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
312 <vscale x 64 x i8> undef,
313 <vscale x 64 x i8> %0,
314 <vscale x 64 x i8> %1,
317 ret <vscale x 64 x i8> %a
320 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
327 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
328 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
329 ; CHECK: # %bb.0: # %entry
330 ; CHECK-NEXT: vl8r.v v24, (a0)
331 ; CHECK-NEXT: csrwi vxrm, 0
332 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
333 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
336 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
337 <vscale x 64 x i8> %0,
338 <vscale x 64 x i8> %1,
339 <vscale x 64 x i8> %2,
340 <vscale x 64 x i1> %3,
341 i64 0, i64 %4, i64 1)
343 ret <vscale x 64 x i8> %a
346 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
352 define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
353 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: csrwi vxrm, 0
356 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
357 ; CHECK-NEXT: vsmul.vv v8, v8, v9
360 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
361 <vscale x 1 x i16> undef,
362 <vscale x 1 x i16> %0,
363 <vscale x 1 x i16> %1,
366 ret <vscale x 1 x i16> %a
369 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
376 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
377 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
378 ; CHECK: # %bb.0: # %entry
379 ; CHECK-NEXT: csrwi vxrm, 0
380 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
381 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
384 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
385 <vscale x 1 x i16> %0,
386 <vscale x 1 x i16> %1,
387 <vscale x 1 x i16> %2,
388 <vscale x 1 x i1> %3,
389 i64 0, i64 %4, i64 1)
391 ret <vscale x 1 x i16> %a
394 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
400 define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
401 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
402 ; CHECK: # %bb.0: # %entry
403 ; CHECK-NEXT: csrwi vxrm, 0
404 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
405 ; CHECK-NEXT: vsmul.vv v8, v8, v9
408 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
409 <vscale x 2 x i16> undef,
410 <vscale x 2 x i16> %0,
411 <vscale x 2 x i16> %1,
414 ret <vscale x 2 x i16> %a
417 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
424 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
425 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
426 ; CHECK: # %bb.0: # %entry
427 ; CHECK-NEXT: csrwi vxrm, 0
428 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
429 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
432 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
433 <vscale x 2 x i16> %0,
434 <vscale x 2 x i16> %1,
435 <vscale x 2 x i16> %2,
436 <vscale x 2 x i1> %3,
437 i64 0, i64 %4, i64 1)
439 ret <vscale x 2 x i16> %a
442 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
448 define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
449 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
450 ; CHECK: # %bb.0: # %entry
451 ; CHECK-NEXT: csrwi vxrm, 0
452 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
453 ; CHECK-NEXT: vsmul.vv v8, v8, v9
456 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
457 <vscale x 4 x i16> undef,
458 <vscale x 4 x i16> %0,
459 <vscale x 4 x i16> %1,
462 ret <vscale x 4 x i16> %a
465 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
472 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
473 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
474 ; CHECK: # %bb.0: # %entry
475 ; CHECK-NEXT: csrwi vxrm, 0
476 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
477 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
480 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
481 <vscale x 4 x i16> %0,
482 <vscale x 4 x i16> %1,
483 <vscale x 4 x i16> %2,
484 <vscale x 4 x i1> %3,
485 i64 0, i64 %4, i64 1)
487 ret <vscale x 4 x i16> %a
490 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
496 define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
497 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: csrwi vxrm, 0
500 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
501 ; CHECK-NEXT: vsmul.vv v8, v8, v10
504 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
505 <vscale x 8 x i16> undef,
506 <vscale x 8 x i16> %0,
507 <vscale x 8 x i16> %1,
510 ret <vscale x 8 x i16> %a
513 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
520 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
521 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: csrwi vxrm, 0
524 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
525 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
528 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
529 <vscale x 8 x i16> %0,
530 <vscale x 8 x i16> %1,
531 <vscale x 8 x i16> %2,
532 <vscale x 8 x i1> %3,
533 i64 0, i64 %4, i64 1)
535 ret <vscale x 8 x i16> %a
538 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
544 define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
545 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
546 ; CHECK: # %bb.0: # %entry
547 ; CHECK-NEXT: csrwi vxrm, 0
548 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
549 ; CHECK-NEXT: vsmul.vv v8, v8, v12
552 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
553 <vscale x 16 x i16> undef,
554 <vscale x 16 x i16> %0,
555 <vscale x 16 x i16> %1,
558 ret <vscale x 16 x i16> %a
561 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
568 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
569 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
570 ; CHECK: # %bb.0: # %entry
571 ; CHECK-NEXT: csrwi vxrm, 0
572 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
573 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
576 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
577 <vscale x 16 x i16> %0,
578 <vscale x 16 x i16> %1,
579 <vscale x 16 x i16> %2,
580 <vscale x 16 x i1> %3,
581 i64 0, i64 %4, i64 1)
583 ret <vscale x 16 x i16> %a
586 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
592 define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
593 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: csrwi vxrm, 0
596 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
597 ; CHECK-NEXT: vsmul.vv v8, v8, v16
600 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
601 <vscale x 32 x i16> undef,
602 <vscale x 32 x i16> %0,
603 <vscale x 32 x i16> %1,
606 ret <vscale x 32 x i16> %a
609 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
616 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
617 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
618 ; CHECK: # %bb.0: # %entry
619 ; CHECK-NEXT: vl8re16.v v24, (a0)
620 ; CHECK-NEXT: csrwi vxrm, 0
621 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
622 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
625 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
626 <vscale x 32 x i16> %0,
627 <vscale x 32 x i16> %1,
628 <vscale x 32 x i16> %2,
629 <vscale x 32 x i1> %3,
630 i64 0, i64 %4, i64 1)
632 ret <vscale x 32 x i16> %a
635 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
641 define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
642 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
643 ; CHECK: # %bb.0: # %entry
644 ; CHECK-NEXT: csrwi vxrm, 0
645 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
646 ; CHECK-NEXT: vsmul.vv v8, v8, v9
649 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
650 <vscale x 1 x i32> undef,
651 <vscale x 1 x i32> %0,
652 <vscale x 1 x i32> %1,
655 ret <vscale x 1 x i32> %a
658 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
665 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
666 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: csrwi vxrm, 0
669 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
670 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
673 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
674 <vscale x 1 x i32> %0,
675 <vscale x 1 x i32> %1,
676 <vscale x 1 x i32> %2,
677 <vscale x 1 x i1> %3,
678 i64 0, i64 %4, i64 1)
680 ret <vscale x 1 x i32> %a
683 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
689 define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
690 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
691 ; CHECK: # %bb.0: # %entry
692 ; CHECK-NEXT: csrwi vxrm, 0
693 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
694 ; CHECK-NEXT: vsmul.vv v8, v8, v9
697 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
698 <vscale x 2 x i32> undef,
699 <vscale x 2 x i32> %0,
700 <vscale x 2 x i32> %1,
703 ret <vscale x 2 x i32> %a
706 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
713 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
714 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
715 ; CHECK: # %bb.0: # %entry
716 ; CHECK-NEXT: csrwi vxrm, 0
717 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
718 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
721 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
722 <vscale x 2 x i32> %0,
723 <vscale x 2 x i32> %1,
724 <vscale x 2 x i32> %2,
725 <vscale x 2 x i1> %3,
726 i64 0, i64 %4, i64 1)
728 ret <vscale x 2 x i32> %a
731 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
737 define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
738 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
739 ; CHECK: # %bb.0: # %entry
740 ; CHECK-NEXT: csrwi vxrm, 0
741 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
742 ; CHECK-NEXT: vsmul.vv v8, v8, v10
745 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
746 <vscale x 4 x i32> undef,
747 <vscale x 4 x i32> %0,
748 <vscale x 4 x i32> %1,
751 ret <vscale x 4 x i32> %a
754 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
761 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
762 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: csrwi vxrm, 0
765 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
766 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
769 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
770 <vscale x 4 x i32> %0,
771 <vscale x 4 x i32> %1,
772 <vscale x 4 x i32> %2,
773 <vscale x 4 x i1> %3,
774 i64 0, i64 %4, i64 1)
776 ret <vscale x 4 x i32> %a
779 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
785 define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
786 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
787 ; CHECK: # %bb.0: # %entry
788 ; CHECK-NEXT: csrwi vxrm, 0
789 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
790 ; CHECK-NEXT: vsmul.vv v8, v8, v12
793 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
794 <vscale x 8 x i32> undef,
795 <vscale x 8 x i32> %0,
796 <vscale x 8 x i32> %1,
799 ret <vscale x 8 x i32> %a
802 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
809 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
810 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
811 ; CHECK: # %bb.0: # %entry
812 ; CHECK-NEXT: csrwi vxrm, 0
813 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
814 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
817 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
818 <vscale x 8 x i32> %0,
819 <vscale x 8 x i32> %1,
820 <vscale x 8 x i32> %2,
821 <vscale x 8 x i1> %3,
822 i64 0, i64 %4, i64 1)
824 ret <vscale x 8 x i32> %a
827 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
833 define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
834 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
835 ; CHECK: # %bb.0: # %entry
836 ; CHECK-NEXT: csrwi vxrm, 0
837 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
838 ; CHECK-NEXT: vsmul.vv v8, v8, v16
841 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
842 <vscale x 16 x i32> undef,
843 <vscale x 16 x i32> %0,
844 <vscale x 16 x i32> %1,
847 ret <vscale x 16 x i32> %a
850 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
857 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
858 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
859 ; CHECK: # %bb.0: # %entry
860 ; CHECK-NEXT: vl8re32.v v24, (a0)
861 ; CHECK-NEXT: csrwi vxrm, 0
862 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
863 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
866 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
867 <vscale x 16 x i32> %0,
868 <vscale x 16 x i32> %1,
869 <vscale x 16 x i32> %2,
870 <vscale x 16 x i1> %3,
871 i64 0, i64 %4, i64 1)
873 ret <vscale x 16 x i32> %a
876 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
882 define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
883 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
884 ; CHECK: # %bb.0: # %entry
885 ; CHECK-NEXT: csrwi vxrm, 0
886 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
887 ; CHECK-NEXT: vsmul.vv v8, v8, v9
890 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
891 <vscale x 1 x i64> undef,
892 <vscale x 1 x i64> %0,
893 <vscale x 1 x i64> %1,
896 ret <vscale x 1 x i64> %a
899 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
906 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
907 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
908 ; CHECK: # %bb.0: # %entry
909 ; CHECK-NEXT: csrwi vxrm, 0
910 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
911 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
914 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
915 <vscale x 1 x i64> %0,
916 <vscale x 1 x i64> %1,
917 <vscale x 1 x i64> %2,
918 <vscale x 1 x i1> %3,
919 i64 0, i64 %4, i64 1)
921 ret <vscale x 1 x i64> %a
924 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
930 define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
931 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: csrwi vxrm, 0
934 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
935 ; CHECK-NEXT: vsmul.vv v8, v8, v10
938 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
939 <vscale x 2 x i64> undef,
940 <vscale x 2 x i64> %0,
941 <vscale x 2 x i64> %1,
944 ret <vscale x 2 x i64> %a
947 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
954 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
955 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
956 ; CHECK: # %bb.0: # %entry
957 ; CHECK-NEXT: csrwi vxrm, 0
958 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
959 ; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
962 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
963 <vscale x 2 x i64> %0,
964 <vscale x 2 x i64> %1,
965 <vscale x 2 x i64> %2,
966 <vscale x 2 x i1> %3,
967 i64 0, i64 %4, i64 1)
969 ret <vscale x 2 x i64> %a
972 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
978 define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
979 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
980 ; CHECK: # %bb.0: # %entry
981 ; CHECK-NEXT: csrwi vxrm, 0
982 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
983 ; CHECK-NEXT: vsmul.vv v8, v8, v12
986 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
987 <vscale x 4 x i64> undef,
988 <vscale x 4 x i64> %0,
989 <vscale x 4 x i64> %1,
992 ret <vscale x 4 x i64> %a
995 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1002 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1003 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1004 ; CHECK: # %bb.0: # %entry
1005 ; CHECK-NEXT: csrwi vxrm, 0
1006 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1007 ; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
1010 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
1011 <vscale x 4 x i64> %0,
1012 <vscale x 4 x i64> %1,
1013 <vscale x 4 x i64> %2,
1014 <vscale x 4 x i1> %3,
1015 i64 0, i64 %4, i64 1)
1017 ret <vscale x 4 x i64> %a
1020 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1026 define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1027 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: csrwi vxrm, 0
1030 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1031 ; CHECK-NEXT: vsmul.vv v8, v8, v16
1034 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
1035 <vscale x 8 x i64> undef,
1036 <vscale x 8 x i64> %0,
1037 <vscale x 8 x i64> %1,
1040 ret <vscale x 8 x i64> %a
1043 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1050 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1051 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vl8re64.v v24, (a0)
1054 ; CHECK-NEXT: csrwi vxrm, 0
1055 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1056 ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
1059 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
1060 <vscale x 8 x i64> %0,
1061 <vscale x 8 x i64> %1,
1062 <vscale x 8 x i64> %2,
1063 <vscale x 8 x i1> %3,
1064 i64 0, i64 %4, i64 1)
1066 ret <vscale x 8 x i64> %a
1069 declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1075 define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
1076 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: csrwi vxrm, 0
1079 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1080 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1083 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
1084 <vscale x 1 x i8> undef,
1085 <vscale x 1 x i8> %0,
1089 ret <vscale x 1 x i8> %a
1092 declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1099 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1100 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: csrwi vxrm, 0
1103 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1104 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1107 %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
1108 <vscale x 1 x i8> %0,
1109 <vscale x 1 x i8> %1,
1111 <vscale x 1 x i1> %3,
1112 i64 0, i64 %4, i64 1)
1114 ret <vscale x 1 x i8> %a
1117 declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1123 define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
1124 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
1125 ; CHECK: # %bb.0: # %entry
1126 ; CHECK-NEXT: csrwi vxrm, 0
1127 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1128 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1131 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
1132 <vscale x 2 x i8> undef,
1133 <vscale x 2 x i8> %0,
1137 ret <vscale x 2 x i8> %a
1140 declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1147 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1148 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
1149 ; CHECK: # %bb.0: # %entry
1150 ; CHECK-NEXT: csrwi vxrm, 0
1151 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1152 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1155 %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
1156 <vscale x 2 x i8> %0,
1157 <vscale x 2 x i8> %1,
1159 <vscale x 2 x i1> %3,
1160 i64 0, i64 %4, i64 1)
1162 ret <vscale x 2 x i8> %a
1165 declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1171 define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
1172 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
1173 ; CHECK: # %bb.0: # %entry
1174 ; CHECK-NEXT: csrwi vxrm, 0
1175 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1176 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1179 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
1180 <vscale x 4 x i8> undef,
1181 <vscale x 4 x i8> %0,
1185 ret <vscale x 4 x i8> %a
1188 declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1195 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1196 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: csrwi vxrm, 0
1199 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1200 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1203 %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
1204 <vscale x 4 x i8> %0,
1205 <vscale x 4 x i8> %1,
1207 <vscale x 4 x i1> %3,
1208 i64 0, i64 %4, i64 1)
1210 ret <vscale x 4 x i8> %a
1213 declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1219 define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
1220 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
1221 ; CHECK: # %bb.0: # %entry
1222 ; CHECK-NEXT: csrwi vxrm, 0
1223 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1224 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1227 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
1228 <vscale x 8 x i8> undef,
1229 <vscale x 8 x i8> %0,
1233 ret <vscale x 8 x i8> %a
1236 declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1243 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1244 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
1245 ; CHECK: # %bb.0: # %entry
1246 ; CHECK-NEXT: csrwi vxrm, 0
1247 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1248 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1251 %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
1252 <vscale x 8 x i8> %0,
1253 <vscale x 8 x i8> %1,
1255 <vscale x 8 x i1> %3,
1256 i64 0, i64 %4, i64 1)
1258 ret <vscale x 8 x i8> %a
1261 declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1267 define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
1268 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: csrwi vxrm, 0
1271 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1272 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1275 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
1276 <vscale x 16 x i8> undef,
1277 <vscale x 16 x i8> %0,
1281 ret <vscale x 16 x i8> %a
1284 declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1291 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1292 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
1293 ; CHECK: # %bb.0: # %entry
1294 ; CHECK-NEXT: csrwi vxrm, 0
1295 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1296 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1299 %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
1300 <vscale x 16 x i8> %0,
1301 <vscale x 16 x i8> %1,
1303 <vscale x 16 x i1> %3,
1304 i64 0, i64 %4, i64 1)
1306 ret <vscale x 16 x i8> %a
1309 declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1315 define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
1316 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
1317 ; CHECK: # %bb.0: # %entry
1318 ; CHECK-NEXT: csrwi vxrm, 0
1319 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1320 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1323 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
1324 <vscale x 32 x i8> undef,
1325 <vscale x 32 x i8> %0,
1329 ret <vscale x 32 x i8> %a
1332 declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1339 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1340 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
1341 ; CHECK: # %bb.0: # %entry
1342 ; CHECK-NEXT: csrwi vxrm, 0
1343 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1344 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1347 %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
1348 <vscale x 32 x i8> %0,
1349 <vscale x 32 x i8> %1,
1351 <vscale x 32 x i1> %3,
1352 i64 0, i64 %4, i64 1)
1354 ret <vscale x 32 x i8> %a
1357 declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1363 define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
1364 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
1365 ; CHECK: # %bb.0: # %entry
1366 ; CHECK-NEXT: csrwi vxrm, 0
1367 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1368 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1371 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
1372 <vscale x 64 x i8> undef,
1373 <vscale x 64 x i8> %0,
1377 ret <vscale x 64 x i8> %a
1380 declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1387 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
1388 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
1389 ; CHECK: # %bb.0: # %entry
1390 ; CHECK-NEXT: csrwi vxrm, 0
1391 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1392 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1395 %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
1396 <vscale x 64 x i8> %0,
1397 <vscale x 64 x i8> %1,
1399 <vscale x 64 x i1> %3,
1400 i64 0, i64 %4, i64 1)
1402 ret <vscale x 64 x i8> %a
1405 declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1411 define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
1412 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: csrwi vxrm, 0
1415 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1416 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1419 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
1420 <vscale x 1 x i16> undef,
1421 <vscale x 1 x i16> %0,
1425 ret <vscale x 1 x i16> %a
1428 declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1435 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1436 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
1437 ; CHECK: # %bb.0: # %entry
1438 ; CHECK-NEXT: csrwi vxrm, 0
1439 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1440 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1443 %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
1444 <vscale x 1 x i16> %0,
1445 <vscale x 1 x i16> %1,
1447 <vscale x 1 x i1> %3,
1448 i64 0, i64 %4, i64 1)
1450 ret <vscale x 1 x i16> %a
1453 declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1459 define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
1460 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
1461 ; CHECK: # %bb.0: # %entry
1462 ; CHECK-NEXT: csrwi vxrm, 0
1463 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1464 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1467 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
1468 <vscale x 2 x i16> undef,
1469 <vscale x 2 x i16> %0,
1473 ret <vscale x 2 x i16> %a
1476 declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1483 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1484 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: csrwi vxrm, 0
1487 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1488 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1491 %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
1492 <vscale x 2 x i16> %0,
1493 <vscale x 2 x i16> %1,
1495 <vscale x 2 x i1> %3,
1496 i64 0, i64 %4, i64 1)
1498 ret <vscale x 2 x i16> %a
1501 declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1507 define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
1508 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
1509 ; CHECK: # %bb.0: # %entry
1510 ; CHECK-NEXT: csrwi vxrm, 0
1511 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1512 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1515 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
1516 <vscale x 4 x i16> undef,
1517 <vscale x 4 x i16> %0,
1521 ret <vscale x 4 x i16> %a
1524 declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1531 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1532 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
1533 ; CHECK: # %bb.0: # %entry
1534 ; CHECK-NEXT: csrwi vxrm, 0
1535 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1536 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1539 %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
1540 <vscale x 4 x i16> %0,
1541 <vscale x 4 x i16> %1,
1543 <vscale x 4 x i1> %3,
1544 i64 0, i64 %4, i64 1)
1546 ret <vscale x 4 x i16> %a
1549 declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1555 define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
1556 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
1557 ; CHECK: # %bb.0: # %entry
1558 ; CHECK-NEXT: csrwi vxrm, 0
1559 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1560 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1563 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
1564 <vscale x 8 x i16> undef,
1565 <vscale x 8 x i16> %0,
1569 ret <vscale x 8 x i16> %a
1572 declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1579 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1580 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
1581 ; CHECK: # %bb.0: # %entry
1582 ; CHECK-NEXT: csrwi vxrm, 0
1583 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1584 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1587 %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
1588 <vscale x 8 x i16> %0,
1589 <vscale x 8 x i16> %1,
1591 <vscale x 8 x i1> %3,
1592 i64 0, i64 %4, i64 1)
1594 ret <vscale x 8 x i16> %a
1597 declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1598 <vscale x 16 x i16>,
1599 <vscale x 16 x i16>,
1603 define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
1604 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
1605 ; CHECK: # %bb.0: # %entry
1606 ; CHECK-NEXT: csrwi vxrm, 0
1607 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1608 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1611 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
1612 <vscale x 16 x i16> undef,
1613 <vscale x 16 x i16> %0,
1617 ret <vscale x 16 x i16> %a
1620 declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1621 <vscale x 16 x i16>,
1622 <vscale x 16 x i16>,
1627 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1628 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: csrwi vxrm, 0
1631 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1632 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1635 %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
1636 <vscale x 16 x i16> %0,
1637 <vscale x 16 x i16> %1,
1639 <vscale x 16 x i1> %3,
1640 i64 0, i64 %4, i64 1)
1642 ret <vscale x 16 x i16> %a
1645 declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1646 <vscale x 32 x i16>,
1647 <vscale x 32 x i16>,
1651 define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
1652 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
1653 ; CHECK: # %bb.0: # %entry
1654 ; CHECK-NEXT: csrwi vxrm, 0
1655 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1656 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1659 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
1660 <vscale x 32 x i16> undef,
1661 <vscale x 32 x i16> %0,
1665 ret <vscale x 32 x i16> %a
1668 declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1669 <vscale x 32 x i16>,
1670 <vscale x 32 x i16>,
1675 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1676 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
1677 ; CHECK: # %bb.0: # %entry
1678 ; CHECK-NEXT: csrwi vxrm, 0
1679 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1680 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1683 %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
1684 <vscale x 32 x i16> %0,
1685 <vscale x 32 x i16> %1,
1687 <vscale x 32 x i1> %3,
1688 i64 0, i64 %4, i64 1)
1690 ret <vscale x 32 x i16> %a
1693 declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1699 define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
1700 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
1701 ; CHECK: # %bb.0: # %entry
1702 ; CHECK-NEXT: csrwi vxrm, 0
1703 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1704 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1707 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
1708 <vscale x 1 x i32> undef,
1709 <vscale x 1 x i32> %0,
1713 ret <vscale x 1 x i32> %a
1716 declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1723 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1724 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
1725 ; CHECK: # %bb.0: # %entry
1726 ; CHECK-NEXT: csrwi vxrm, 0
1727 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1728 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1731 %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
1732 <vscale x 1 x i32> %0,
1733 <vscale x 1 x i32> %1,
1735 <vscale x 1 x i1> %3,
1736 i64 0, i64 %4, i64 1)
1738 ret <vscale x 1 x i32> %a
1741 declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1747 define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
1748 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
1749 ; CHECK: # %bb.0: # %entry
1750 ; CHECK-NEXT: csrwi vxrm, 0
1751 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1752 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1755 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
1756 <vscale x 2 x i32> undef,
1757 <vscale x 2 x i32> %0,
1761 ret <vscale x 2 x i32> %a
1764 declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1771 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1772 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
1773 ; CHECK: # %bb.0: # %entry
1774 ; CHECK-NEXT: csrwi vxrm, 0
1775 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1776 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1779 %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
1780 <vscale x 2 x i32> %0,
1781 <vscale x 2 x i32> %1,
1783 <vscale x 2 x i1> %3,
1784 i64 0, i64 %4, i64 1)
1786 ret <vscale x 2 x i32> %a
1789 declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1795 define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
1796 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
1797 ; CHECK: # %bb.0: # %entry
1798 ; CHECK-NEXT: csrwi vxrm, 0
1799 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1800 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1803 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
1804 <vscale x 4 x i32> undef,
1805 <vscale x 4 x i32> %0,
1809 ret <vscale x 4 x i32> %a
1812 declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1819 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1820 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
1821 ; CHECK: # %bb.0: # %entry
1822 ; CHECK-NEXT: csrwi vxrm, 0
1823 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1824 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
1827 %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
1828 <vscale x 4 x i32> %0,
1829 <vscale x 4 x i32> %1,
1831 <vscale x 4 x i1> %3,
1832 i64 0, i64 %4, i64 1)
1834 ret <vscale x 4 x i32> %a
1837 declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1843 define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
1844 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
1845 ; CHECK: # %bb.0: # %entry
1846 ; CHECK-NEXT: csrwi vxrm, 0
1847 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1848 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1851 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
1852 <vscale x 8 x i32> undef,
1853 <vscale x 8 x i32> %0,
1857 ret <vscale x 8 x i32> %a
1860 declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1867 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1868 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
1869 ; CHECK: # %bb.0: # %entry
1870 ; CHECK-NEXT: csrwi vxrm, 0
1871 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1872 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
1875 %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
1876 <vscale x 8 x i32> %0,
1877 <vscale x 8 x i32> %1,
1879 <vscale x 8 x i1> %3,
1880 i64 0, i64 %4, i64 1)
1882 ret <vscale x 8 x i32> %a
1885 declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1886 <vscale x 16 x i32>,
1887 <vscale x 16 x i32>,
1891 define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
1892 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
1893 ; CHECK: # %bb.0: # %entry
1894 ; CHECK-NEXT: csrwi vxrm, 0
1895 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1896 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1899 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
1900 <vscale x 16 x i32> undef,
1901 <vscale x 16 x i32> %0,
1905 ret <vscale x 16 x i32> %a
1908 declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1909 <vscale x 16 x i32>,
1910 <vscale x 16 x i32>,
1915 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1916 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
1917 ; CHECK: # %bb.0: # %entry
1918 ; CHECK-NEXT: csrwi vxrm, 0
1919 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1920 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
1923 %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
1924 <vscale x 16 x i32> %0,
1925 <vscale x 16 x i32> %1,
1927 <vscale x 16 x i1> %3,
1928 i64 0, i64 %4, i64 1)
1930 ret <vscale x 16 x i32> %a
1933 declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1938 define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
1939 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
1940 ; CHECK: # %bb.0: # %entry
1941 ; CHECK-NEXT: csrwi vxrm, 0
1942 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1943 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1946 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
1947 <vscale x 1 x i64> undef,
1948 <vscale x 1 x i64> %0,
1952 ret <vscale x 1 x i64> %a
1955 declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
1962 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1963 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
1964 ; CHECK: # %bb.0: # %entry
1965 ; CHECK-NEXT: csrwi vxrm, 0
1966 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1967 ; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
1970 %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
1971 <vscale x 1 x i64> %0,
1972 <vscale x 1 x i64> %1,
1974 <vscale x 1 x i1> %3,
1975 i64 0, i64 %4, i64 1)
1977 ret <vscale x 1 x i64> %a
1980 declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
1985 define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
1986 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
1987 ; CHECK: # %bb.0: # %entry
1988 ; CHECK-NEXT: csrwi vxrm, 0
1989 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1990 ; CHECK-NEXT: vsmul.vx v8, v8, a0
1993 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
1994 <vscale x 2 x i64> undef,
1995 <vscale x 2 x i64> %0,
1999 ret <vscale x 2 x i64> %a
2002 declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2009 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
2010 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
2011 ; CHECK: # %bb.0: # %entry
2012 ; CHECK-NEXT: csrwi vxrm, 0
2013 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2014 ; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
2017 %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
2018 <vscale x 2 x i64> %0,
2019 <vscale x 2 x i64> %1,
2021 <vscale x 2 x i1> %3,
2022 i64 0, i64 %4, i64 1)
2024 ret <vscale x 2 x i64> %a
2027 declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2032 define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
2033 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
2034 ; CHECK: # %bb.0: # %entry
2035 ; CHECK-NEXT: csrwi vxrm, 0
2036 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2037 ; CHECK-NEXT: vsmul.vx v8, v8, a0
2040 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
2041 <vscale x 4 x i64> undef,
2042 <vscale x 4 x i64> %0,
2046 ret <vscale x 4 x i64> %a
2049 declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2056 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
2057 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
2058 ; CHECK: # %bb.0: # %entry
2059 ; CHECK-NEXT: csrwi vxrm, 0
2060 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2061 ; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
2064 %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
2065 <vscale x 4 x i64> %0,
2066 <vscale x 4 x i64> %1,
2068 <vscale x 4 x i1> %3,
2069 i64 0, i64 %4, i64 1)
2071 ret <vscale x 4 x i64> %a
2074 declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2079 define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
2080 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
2081 ; CHECK: # %bb.0: # %entry
2082 ; CHECK-NEXT: csrwi vxrm, 0
2083 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2084 ; CHECK-NEXT: vsmul.vx v8, v8, a0
2087 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
2088 <vscale x 8 x i64> undef,
2089 <vscale x 8 x i64> %0,
2093 ret <vscale x 8 x i64> %a
2096 declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2103 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
2104 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
2105 ; CHECK: # %bb.0: # %entry
2106 ; CHECK-NEXT: csrwi vxrm, 0
2107 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2108 ; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
2111 %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
2112 <vscale x 8 x i64> %0,
2113 <vscale x 8 x i64> %1,
2115 <vscale x 8 x i1> %3,
2116 i64 0, i64 %4, i64 1)
2118 ret <vscale x 8 x i64> %a