1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6 ; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 \
7 ; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D
8 ; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \
9 ; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D
11 ; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vmulh
13 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
19 define <vscale x 1 x i8> @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
20 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8:
21 ; CHECK: # %bb.0: # %entry
22 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
23 ; CHECK-NEXT: vmulh.vv v8, v8, v9
26 %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8(
27 <vscale x 1 x i8> undef,
32 ret <vscale x 1 x i8> %a
35 declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
42 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
43 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i8_nxv1i8_nxv1i8:
44 ; CHECK: # %bb.0: # %entry
45 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
46 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
49 %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8(
56 ret <vscale x 1 x i8> %a
59 declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
65 define <vscale x 2 x i8> @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
66 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8:
67 ; CHECK: # %bb.0: # %entry
68 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
69 ; CHECK-NEXT: vmulh.vv v8, v8, v9
72 %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8(
73 <vscale x 2 x i8> undef,
78 ret <vscale x 2 x i8> %a
81 declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
88 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i8_nxv2i8_nxv2i8:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
92 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
95 %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8(
102 ret <vscale x 2 x i8> %a
105 declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
111 define <vscale x 4 x i8> @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
112 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
115 ; CHECK-NEXT: vmulh.vv v8, v8, v9
118 %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8(
119 <vscale x 4 x i8> undef,
120 <vscale x 4 x i8> %0,
121 <vscale x 4 x i8> %1,
124 ret <vscale x 4 x i8> %a
127 declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
134 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i8_nxv4i8_nxv4i8:
136 ; CHECK: # %bb.0: # %entry
137 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
138 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
141 %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8(
142 <vscale x 4 x i8> %0,
143 <vscale x 4 x i8> %1,
144 <vscale x 4 x i8> %2,
145 <vscale x 4 x i1> %3,
148 ret <vscale x 4 x i8> %a
151 declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
157 define <vscale x 8 x i8> @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8:
159 ; CHECK: # %bb.0: # %entry
160 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
161 ; CHECK-NEXT: vmulh.vv v8, v8, v9
164 %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8(
165 <vscale x 8 x i8> undef,
166 <vscale x 8 x i8> %0,
167 <vscale x 8 x i8> %1,
170 ret <vscale x 8 x i8> %a
173 declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
180 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
181 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i8_nxv8i8_nxv8i8:
182 ; CHECK: # %bb.0: # %entry
183 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
184 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
187 %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8(
188 <vscale x 8 x i8> %0,
189 <vscale x 8 x i8> %1,
190 <vscale x 8 x i8> %2,
191 <vscale x 8 x i1> %3,
194 ret <vscale x 8 x i8> %a
197 declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
203 define <vscale x 16 x i8> @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
204 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8:
205 ; CHECK: # %bb.0: # %entry
206 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
207 ; CHECK-NEXT: vmulh.vv v8, v8, v10
210 %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8(
211 <vscale x 16 x i8> undef,
212 <vscale x 16 x i8> %0,
213 <vscale x 16 x i8> %1,
216 ret <vscale x 16 x i8> %a
219 declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
226 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
227 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i8_nxv16i8_nxv16i8:
228 ; CHECK: # %bb.0: # %entry
229 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
230 ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
233 %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8(
234 <vscale x 16 x i8> %0,
235 <vscale x 16 x i8> %1,
236 <vscale x 16 x i8> %2,
237 <vscale x 16 x i1> %3,
240 ret <vscale x 16 x i8> %a
243 declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
249 define <vscale x 32 x i8> @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
250 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8:
251 ; CHECK: # %bb.0: # %entry
252 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
253 ; CHECK-NEXT: vmulh.vv v8, v8, v12
256 %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8(
257 <vscale x 32 x i8> undef,
258 <vscale x 32 x i8> %0,
259 <vscale x 32 x i8> %1,
262 ret <vscale x 32 x i8> %a
265 declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
272 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i8_nxv32i8_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
276 ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
279 %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8(
280 <vscale x 32 x i8> %0,
281 <vscale x 32 x i8> %1,
282 <vscale x 32 x i8> %2,
283 <vscale x 32 x i1> %3,
286 ret <vscale x 32 x i8> %a
289 declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
295 define <vscale x 64 x i8> @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
296 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8:
297 ; CHECK: # %bb.0: # %entry
298 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
299 ; CHECK-NEXT: vmulh.vv v8, v8, v16
302 %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8(
303 <vscale x 64 x i8> undef,
304 <vscale x 64 x i8> %0,
305 <vscale x 64 x i8> %1,
308 ret <vscale x 64 x i8> %a
311 declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
318 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
319 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: vl8r.v v24, (a0)
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
323 ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
326 %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8(
327 <vscale x 64 x i8> %0,
328 <vscale x 64 x i8> %1,
329 <vscale x 64 x i8> %2,
330 <vscale x 64 x i1> %3,
333 ret <vscale x 64 x i8> %a
336 declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
342 define <vscale x 1 x i16> @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
343 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16:
344 ; CHECK: # %bb.0: # %entry
345 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
346 ; CHECK-NEXT: vmulh.vv v8, v8, v9
349 %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16(
350 <vscale x 1 x i16> undef,
351 <vscale x 1 x i16> %0,
352 <vscale x 1 x i16> %1,
355 ret <vscale x 1 x i16> %a
358 declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
365 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
366 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i16_nxv1i16_nxv1i16:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
369 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
372 %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16(
373 <vscale x 1 x i16> %0,
374 <vscale x 1 x i16> %1,
375 <vscale x 1 x i16> %2,
376 <vscale x 1 x i1> %3,
379 ret <vscale x 1 x i16> %a
382 declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
388 define <vscale x 2 x i16> @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
389 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
392 ; CHECK-NEXT: vmulh.vv v8, v8, v9
395 %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16(
396 <vscale x 2 x i16> undef,
397 <vscale x 2 x i16> %0,
398 <vscale x 2 x i16> %1,
401 ret <vscale x 2 x i16> %a
404 declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
411 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
412 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i16_nxv2i16_nxv2i16:
413 ; CHECK: # %bb.0: # %entry
414 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
415 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
418 %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16(
419 <vscale x 2 x i16> %0,
420 <vscale x 2 x i16> %1,
421 <vscale x 2 x i16> %2,
422 <vscale x 2 x i1> %3,
425 ret <vscale x 2 x i16> %a
428 declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
434 define <vscale x 4 x i16> @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
435 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16:
436 ; CHECK: # %bb.0: # %entry
437 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
438 ; CHECK-NEXT: vmulh.vv v8, v8, v9
441 %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16(
442 <vscale x 4 x i16> undef,
443 <vscale x 4 x i16> %0,
444 <vscale x 4 x i16> %1,
447 ret <vscale x 4 x i16> %a
450 declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
457 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
458 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i16_nxv4i16_nxv4i16:
459 ; CHECK: # %bb.0: # %entry
460 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
461 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
464 %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16(
465 <vscale x 4 x i16> %0,
466 <vscale x 4 x i16> %1,
467 <vscale x 4 x i16> %2,
468 <vscale x 4 x i1> %3,
471 ret <vscale x 4 x i16> %a
474 declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
480 define <vscale x 8 x i16> @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
481 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
484 ; CHECK-NEXT: vmulh.vv v8, v8, v10
487 %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16(
488 <vscale x 8 x i16> undef,
489 <vscale x 8 x i16> %0,
490 <vscale x 8 x i16> %1,
493 ret <vscale x 8 x i16> %a
496 declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
503 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
504 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i16_nxv8i16_nxv8i16:
505 ; CHECK: # %bb.0: # %entry
506 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
507 ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
510 %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16(
511 <vscale x 8 x i16> %0,
512 <vscale x 8 x i16> %1,
513 <vscale x 8 x i16> %2,
514 <vscale x 8 x i1> %3,
517 ret <vscale x 8 x i16> %a
520 declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
526 define <vscale x 16 x i16> @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
527 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16:
528 ; CHECK: # %bb.0: # %entry
529 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
530 ; CHECK-NEXT: vmulh.vv v8, v8, v12
533 %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16(
534 <vscale x 16 x i16> undef,
535 <vscale x 16 x i16> %0,
536 <vscale x 16 x i16> %1,
539 ret <vscale x 16 x i16> %a
542 declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
549 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
550 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i16_nxv16i16_nxv16i16:
551 ; CHECK: # %bb.0: # %entry
552 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
553 ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
556 %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16(
557 <vscale x 16 x i16> %0,
558 <vscale x 16 x i16> %1,
559 <vscale x 16 x i16> %2,
560 <vscale x 16 x i1> %3,
563 ret <vscale x 16 x i16> %a
566 declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
572 define <vscale x 32 x i16> @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
573 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16:
574 ; CHECK: # %bb.0: # %entry
575 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
576 ; CHECK-NEXT: vmulh.vv v8, v8, v16
579 %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16(
580 <vscale x 32 x i16> undef,
581 <vscale x 32 x i16> %0,
582 <vscale x 32 x i16> %1,
585 ret <vscale x 32 x i16> %a
588 declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
595 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
596 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16:
597 ; CHECK: # %bb.0: # %entry
598 ; CHECK-NEXT: vl8re16.v v24, (a0)
599 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
600 ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
603 %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16(
604 <vscale x 32 x i16> %0,
605 <vscale x 32 x i16> %1,
606 <vscale x 32 x i16> %2,
607 <vscale x 32 x i1> %3,
610 ret <vscale x 32 x i16> %a
613 declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
619 define <vscale x 1 x i32> @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
620 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32:
621 ; CHECK: # %bb.0: # %entry
622 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
623 ; CHECK-NEXT: vmulh.vv v8, v8, v9
626 %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32(
627 <vscale x 1 x i32> undef,
628 <vscale x 1 x i32> %0,
629 <vscale x 1 x i32> %1,
632 ret <vscale x 1 x i32> %a
635 declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
642 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
643 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i32_nxv1i32_nxv1i32:
644 ; CHECK: # %bb.0: # %entry
645 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
646 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
649 %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32(
650 <vscale x 1 x i32> %0,
651 <vscale x 1 x i32> %1,
652 <vscale x 1 x i32> %2,
653 <vscale x 1 x i1> %3,
656 ret <vscale x 1 x i32> %a
659 declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
665 define <vscale x 2 x i32> @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
666 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
669 ; CHECK-NEXT: vmulh.vv v8, v8, v9
672 %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32(
673 <vscale x 2 x i32> undef,
674 <vscale x 2 x i32> %0,
675 <vscale x 2 x i32> %1,
678 ret <vscale x 2 x i32> %a
681 declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
688 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
689 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i32_nxv2i32_nxv2i32:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
692 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
695 %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32(
696 <vscale x 2 x i32> %0,
697 <vscale x 2 x i32> %1,
698 <vscale x 2 x i32> %2,
699 <vscale x 2 x i1> %3,
702 ret <vscale x 2 x i32> %a
705 declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
711 define <vscale x 4 x i32> @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
712 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32:
713 ; CHECK: # %bb.0: # %entry
714 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
715 ; CHECK-NEXT: vmulh.vv v8, v8, v10
718 %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32(
719 <vscale x 4 x i32> undef,
720 <vscale x 4 x i32> %0,
721 <vscale x 4 x i32> %1,
724 ret <vscale x 4 x i32> %a
727 declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
734 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
735 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i32_nxv4i32_nxv4i32:
736 ; CHECK: # %bb.0: # %entry
737 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
738 ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
741 %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32(
742 <vscale x 4 x i32> %0,
743 <vscale x 4 x i32> %1,
744 <vscale x 4 x i32> %2,
745 <vscale x 4 x i1> %3,
748 ret <vscale x 4 x i32> %a
751 declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
757 define <vscale x 8 x i32> @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
758 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32:
759 ; CHECK: # %bb.0: # %entry
760 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
761 ; CHECK-NEXT: vmulh.vv v8, v8, v12
764 %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32(
765 <vscale x 8 x i32> undef,
766 <vscale x 8 x i32> %0,
767 <vscale x 8 x i32> %1,
770 ret <vscale x 8 x i32> %a
773 declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
780 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
781 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i32_nxv8i32_nxv8i32:
782 ; CHECK: # %bb.0: # %entry
783 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
784 ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
787 %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32(
788 <vscale x 8 x i32> %0,
789 <vscale x 8 x i32> %1,
790 <vscale x 8 x i32> %2,
791 <vscale x 8 x i1> %3,
794 ret <vscale x 8 x i32> %a
797 declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
803 define <vscale x 16 x i32> @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
804 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32:
805 ; CHECK: # %bb.0: # %entry
806 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
807 ; CHECK-NEXT: vmulh.vv v8, v8, v16
810 %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32(
811 <vscale x 16 x i32> undef,
812 <vscale x 16 x i32> %0,
813 <vscale x 16 x i32> %1,
816 ret <vscale x 16 x i32> %a
819 declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
826 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
827 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: vl8re32.v v24, (a0)
830 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
831 ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
834 %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32(
835 <vscale x 16 x i32> %0,
836 <vscale x 16 x i32> %1,
837 <vscale x 16 x i32> %2,
838 <vscale x 16 x i1> %3,
841 ret <vscale x 16 x i32> %a
844 declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
850 define <vscale x 1 x i64> @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
851 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64:
852 ; CHECK: # %bb.0: # %entry
853 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
854 ; CHECK-NEXT: vmulh.vv v8, v8, v9
857 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64(
858 <vscale x 1 x i64> undef,
859 <vscale x 1 x i64> %0,
860 <vscale x 1 x i64> %1,
863 ret <vscale x 1 x i64> %a
866 declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
873 define <vscale x 1 x i64> @intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
874 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv1i64_nxv1i64_nxv1i64:
875 ; CHECK: # %bb.0: # %entry
876 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
877 ; CHECK-NEXT: vmulh.vv v8, v9, v10, v0.t
880 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64(
881 <vscale x 1 x i64> %0,
882 <vscale x 1 x i64> %1,
883 <vscale x 1 x i64> %2,
884 <vscale x 1 x i1> %3,
887 ret <vscale x 1 x i64> %a
890 declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
896 define <vscale x 2 x i64> @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
897 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64:
898 ; CHECK: # %bb.0: # %entry
899 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
900 ; CHECK-NEXT: vmulh.vv v8, v8, v10
903 %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64(
904 <vscale x 2 x i64> undef,
905 <vscale x 2 x i64> %0,
906 <vscale x 2 x i64> %1,
909 ret <vscale x 2 x i64> %a
912 declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
919 define <vscale x 2 x i64> @intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
920 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv2i64_nxv2i64_nxv2i64:
921 ; CHECK: # %bb.0: # %entry
922 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
923 ; CHECK-NEXT: vmulh.vv v8, v10, v12, v0.t
926 %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64(
927 <vscale x 2 x i64> %0,
928 <vscale x 2 x i64> %1,
929 <vscale x 2 x i64> %2,
930 <vscale x 2 x i1> %3,
933 ret <vscale x 2 x i64> %a
936 declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
942 define <vscale x 4 x i64> @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
943 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64:
944 ; CHECK: # %bb.0: # %entry
945 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
946 ; CHECK-NEXT: vmulh.vv v8, v8, v12
949 %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64(
950 <vscale x 4 x i64> undef,
951 <vscale x 4 x i64> %0,
952 <vscale x 4 x i64> %1,
955 ret <vscale x 4 x i64> %a
958 declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
965 define <vscale x 4 x i64> @intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
966 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv4i64_nxv4i64_nxv4i64:
967 ; CHECK: # %bb.0: # %entry
968 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
969 ; CHECK-NEXT: vmulh.vv v8, v12, v16, v0.t
972 %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64(
973 <vscale x 4 x i64> %0,
974 <vscale x 4 x i64> %1,
975 <vscale x 4 x i64> %2,
976 <vscale x 4 x i1> %3,
979 ret <vscale x 4 x i64> %a
982 declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
988 define <vscale x 8 x i64> @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
989 ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64:
990 ; CHECK: # %bb.0: # %entry
991 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
992 ; CHECK-NEXT: vmulh.vv v8, v8, v16
995 %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64(
996 <vscale x 8 x i64> undef,
997 <vscale x 8 x i64> %0,
998 <vscale x 8 x i64> %1,
1001 ret <vscale x 8 x i64> %a
1004 declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
1011 define <vscale x 8 x i64> @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1012 ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1013 ; CHECK: # %bb.0: # %entry
1014 ; CHECK-NEXT: vl8re64.v v24, (a0)
1015 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1016 ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t
1019 %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64(
1020 <vscale x 8 x i64> %0,
1021 <vscale x 8 x i64> %1,
1022 <vscale x 8 x i64> %2,
1023 <vscale x 8 x i1> %3,
1026 ret <vscale x 8 x i64> %a
1029 declare <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
1035 define <vscale x 1 x i8> @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1036 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8:
1037 ; CHECK: # %bb.0: # %entry
1038 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1039 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1042 %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8(
1043 <vscale x 1 x i8> undef,
1044 <vscale x 1 x i8> %0,
1048 ret <vscale x 1 x i8> %a
1051 declare <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
1058 define <vscale x 1 x i8> @intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1059 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i8_nxv1i8_i8:
1060 ; CHECK: # %bb.0: # %entry
1061 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1062 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1065 %a = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8(
1066 <vscale x 1 x i8> %0,
1067 <vscale x 1 x i8> %1,
1069 <vscale x 1 x i1> %3,
1072 ret <vscale x 1 x i8> %a
1075 declare <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
1081 define <vscale x 2 x i8> @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1082 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8:
1083 ; CHECK: # %bb.0: # %entry
1084 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1085 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1088 %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8(
1089 <vscale x 2 x i8> undef,
1090 <vscale x 2 x i8> %0,
1094 ret <vscale x 2 x i8> %a
1097 declare <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
1104 define <vscale x 2 x i8> @intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1105 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i8_nxv2i8_i8:
1106 ; CHECK: # %bb.0: # %entry
1107 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1108 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1111 %a = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8(
1112 <vscale x 2 x i8> %0,
1113 <vscale x 2 x i8> %1,
1115 <vscale x 2 x i1> %3,
1118 ret <vscale x 2 x i8> %a
1121 declare <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
1127 define <vscale x 4 x i8> @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1128 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8:
1129 ; CHECK: # %bb.0: # %entry
1130 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1131 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1134 %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8(
1135 <vscale x 4 x i8> undef,
1136 <vscale x 4 x i8> %0,
1140 ret <vscale x 4 x i8> %a
1143 declare <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
1150 define <vscale x 4 x i8> @intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1151 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i8_nxv4i8_i8:
1152 ; CHECK: # %bb.0: # %entry
1153 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1154 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1157 %a = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8(
1158 <vscale x 4 x i8> %0,
1159 <vscale x 4 x i8> %1,
1161 <vscale x 4 x i1> %3,
1164 ret <vscale x 4 x i8> %a
1167 declare <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
1173 define <vscale x 8 x i8> @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1174 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8:
1175 ; CHECK: # %bb.0: # %entry
1176 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1177 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1180 %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8(
1181 <vscale x 8 x i8> undef,
1182 <vscale x 8 x i8> %0,
1186 ret <vscale x 8 x i8> %a
1189 declare <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
1196 define <vscale x 8 x i8> @intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1197 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i8_nxv8i8_i8:
1198 ; CHECK: # %bb.0: # %entry
1199 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1200 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1203 %a = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8(
1204 <vscale x 8 x i8> %0,
1205 <vscale x 8 x i8> %1,
1207 <vscale x 8 x i1> %3,
1210 ret <vscale x 8 x i8> %a
1213 declare <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
1219 define <vscale x 16 x i8> @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1220 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8:
1221 ; CHECK: # %bb.0: # %entry
1222 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1223 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1226 %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8(
1227 <vscale x 16 x i8> undef,
1228 <vscale x 16 x i8> %0,
1232 ret <vscale x 16 x i8> %a
1235 declare <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
1242 define <vscale x 16 x i8> @intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1243 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i8_nxv16i8_i8:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1246 ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t
1249 %a = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8(
1250 <vscale x 16 x i8> %0,
1251 <vscale x 16 x i8> %1,
1253 <vscale x 16 x i1> %3,
1256 ret <vscale x 16 x i8> %a
1259 declare <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
1265 define <vscale x 32 x i8> @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1266 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8:
1267 ; CHECK: # %bb.0: # %entry
1268 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1269 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1272 %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8(
1273 <vscale x 32 x i8> undef,
1274 <vscale x 32 x i8> %0,
1278 ret <vscale x 32 x i8> %a
1281 declare <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
1288 define <vscale x 32 x i8> @intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1289 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i8_nxv32i8_i8:
1290 ; CHECK: # %bb.0: # %entry
1291 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1292 ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t
1295 %a = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8(
1296 <vscale x 32 x i8> %0,
1297 <vscale x 32 x i8> %1,
1299 <vscale x 32 x i1> %3,
1302 ret <vscale x 32 x i8> %a
1305 declare <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
1311 define <vscale x 64 x i8> @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1312 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8:
1313 ; CHECK: # %bb.0: # %entry
1314 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1315 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1318 %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8(
1319 <vscale x 64 x i8> undef,
1320 <vscale x 64 x i8> %0,
1324 ret <vscale x 64 x i8> %a
1327 declare <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
1334 define <vscale x 64 x i8> @intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1335 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv64i8_nxv64i8_i8:
1336 ; CHECK: # %bb.0: # %entry
1337 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1338 ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t
1341 %a = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8(
1342 <vscale x 64 x i8> %0,
1343 <vscale x 64 x i8> %1,
1345 <vscale x 64 x i1> %3,
1348 ret <vscale x 64 x i8> %a
1351 declare <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
1357 define <vscale x 1 x i16> @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1358 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16:
1359 ; CHECK: # %bb.0: # %entry
1360 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1361 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1364 %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16(
1365 <vscale x 1 x i16> undef,
1366 <vscale x 1 x i16> %0,
1370 ret <vscale x 1 x i16> %a
1373 declare <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
1380 define <vscale x 1 x i16> @intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1381 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i16_nxv1i16_i16:
1382 ; CHECK: # %bb.0: # %entry
1383 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1384 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1387 %a = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16(
1388 <vscale x 1 x i16> %0,
1389 <vscale x 1 x i16> %1,
1391 <vscale x 1 x i1> %3,
1394 ret <vscale x 1 x i16> %a
1397 declare <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
1403 define <vscale x 2 x i16> @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1404 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1407 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1410 %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16(
1411 <vscale x 2 x i16> undef,
1412 <vscale x 2 x i16> %0,
1416 ret <vscale x 2 x i16> %a
1419 declare <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
1426 define <vscale x 2 x i16> @intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1427 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i16_nxv2i16_i16:
1428 ; CHECK: # %bb.0: # %entry
1429 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1430 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1433 %a = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16(
1434 <vscale x 2 x i16> %0,
1435 <vscale x 2 x i16> %1,
1437 <vscale x 2 x i1> %3,
1440 ret <vscale x 2 x i16> %a
1443 declare <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
1449 define <vscale x 4 x i16> @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1450 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16:
1451 ; CHECK: # %bb.0: # %entry
1452 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1453 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1456 %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16(
1457 <vscale x 4 x i16> undef,
1458 <vscale x 4 x i16> %0,
1462 ret <vscale x 4 x i16> %a
1465 declare <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
1472 define <vscale x 4 x i16> @intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1473 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i16_nxv4i16_i16:
1474 ; CHECK: # %bb.0: # %entry
1475 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1476 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1479 %a = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16(
1480 <vscale x 4 x i16> %0,
1481 <vscale x 4 x i16> %1,
1483 <vscale x 4 x i1> %3,
1486 ret <vscale x 4 x i16> %a
1489 declare <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
1495 define <vscale x 8 x i16> @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1496 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16:
1497 ; CHECK: # %bb.0: # %entry
1498 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1499 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1502 %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16(
1503 <vscale x 8 x i16> undef,
1504 <vscale x 8 x i16> %0,
1508 ret <vscale x 8 x i16> %a
1511 declare <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
1518 define <vscale x 8 x i16> @intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1519 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i16_nxv8i16_i16:
1520 ; CHECK: # %bb.0: # %entry
1521 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1522 ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t
1525 %a = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16(
1526 <vscale x 8 x i16> %0,
1527 <vscale x 8 x i16> %1,
1529 <vscale x 8 x i1> %3,
1532 ret <vscale x 8 x i16> %a
1535 declare <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
1536 <vscale x 16 x i16>,
1537 <vscale x 16 x i16>,
1541 define <vscale x 16 x i16> @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1542 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1545 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1548 %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16(
1549 <vscale x 16 x i16> undef,
1550 <vscale x 16 x i16> %0,
1554 ret <vscale x 16 x i16> %a
1557 declare <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
1558 <vscale x 16 x i16>,
1559 <vscale x 16 x i16>,
1564 define <vscale x 16 x i16> @intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1565 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i16_nxv16i16_i16:
1566 ; CHECK: # %bb.0: # %entry
1567 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1568 ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t
1571 %a = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16(
1572 <vscale x 16 x i16> %0,
1573 <vscale x 16 x i16> %1,
1575 <vscale x 16 x i1> %3,
1578 ret <vscale x 16 x i16> %a
1581 declare <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
1582 <vscale x 32 x i16>,
1583 <vscale x 32 x i16>,
1587 define <vscale x 32 x i16> @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1588 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16:
1589 ; CHECK: # %bb.0: # %entry
1590 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1591 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1594 %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16(
1595 <vscale x 32 x i16> undef,
1596 <vscale x 32 x i16> %0,
1600 ret <vscale x 32 x i16> %a
1603 declare <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
1604 <vscale x 32 x i16>,
1605 <vscale x 32 x i16>,
1610 define <vscale x 32 x i16> @intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1611 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv32i16_nxv32i16_i16:
1612 ; CHECK: # %bb.0: # %entry
1613 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1614 ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t
1617 %a = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16(
1618 <vscale x 32 x i16> %0,
1619 <vscale x 32 x i16> %1,
1621 <vscale x 32 x i1> %3,
1624 ret <vscale x 32 x i16> %a
1627 declare <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
1633 define <vscale x 1 x i32> @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1634 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32:
1635 ; CHECK: # %bb.0: # %entry
1636 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1637 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1640 %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32(
1641 <vscale x 1 x i32> undef,
1642 <vscale x 1 x i32> %0,
1646 ret <vscale x 1 x i32> %a
1649 declare <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
1656 define <vscale x 1 x i32> @intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1657 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv1i32_nxv1i32_i32:
1658 ; CHECK: # %bb.0: # %entry
1659 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1660 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1663 %a = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32(
1664 <vscale x 1 x i32> %0,
1665 <vscale x 1 x i32> %1,
1667 <vscale x 1 x i1> %3,
1670 ret <vscale x 1 x i32> %a
1673 declare <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
1679 define <vscale x 2 x i32> @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1680 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32:
1681 ; CHECK: # %bb.0: # %entry
1682 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1683 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1686 %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32(
1687 <vscale x 2 x i32> undef,
1688 <vscale x 2 x i32> %0,
1692 ret <vscale x 2 x i32> %a
1695 declare <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
1702 define <vscale x 2 x i32> @intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1703 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv2i32_nxv2i32_i32:
1704 ; CHECK: # %bb.0: # %entry
1705 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1706 ; CHECK-NEXT: vmulh.vx v8, v9, a0, v0.t
1709 %a = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32(
1710 <vscale x 2 x i32> %0,
1711 <vscale x 2 x i32> %1,
1713 <vscale x 2 x i1> %3,
1716 ret <vscale x 2 x i32> %a
1719 declare <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
1725 define <vscale x 4 x i32> @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1726 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32:
1727 ; CHECK: # %bb.0: # %entry
1728 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1729 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1732 %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32(
1733 <vscale x 4 x i32> undef,
1734 <vscale x 4 x i32> %0,
1738 ret <vscale x 4 x i32> %a
1741 declare <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
1748 define <vscale x 4 x i32> @intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1749 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv4i32_nxv4i32_i32:
1750 ; CHECK: # %bb.0: # %entry
1751 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1752 ; CHECK-NEXT: vmulh.vx v8, v10, a0, v0.t
1755 %a = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32(
1756 <vscale x 4 x i32> %0,
1757 <vscale x 4 x i32> %1,
1759 <vscale x 4 x i1> %3,
1762 ret <vscale x 4 x i32> %a
1765 declare <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
1771 define <vscale x 8 x i32> @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1772 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32:
1773 ; CHECK: # %bb.0: # %entry
1774 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1775 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1778 %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32(
1779 <vscale x 8 x i32> undef,
1780 <vscale x 8 x i32> %0,
1784 ret <vscale x 8 x i32> %a
1787 declare <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
1794 define <vscale x 8 x i32> @intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1795 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv8i32_nxv8i32_i32:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1798 ; CHECK-NEXT: vmulh.vx v8, v12, a0, v0.t
1801 %a = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32(
1802 <vscale x 8 x i32> %0,
1803 <vscale x 8 x i32> %1,
1805 <vscale x 8 x i1> %3,
1808 ret <vscale x 8 x i32> %a
1811 declare <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
1812 <vscale x 16 x i32>,
1813 <vscale x 16 x i32>,
1817 define <vscale x 16 x i32> @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1818 ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32:
1819 ; CHECK: # %bb.0: # %entry
1820 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1821 ; CHECK-NEXT: vmulh.vx v8, v8, a0
1824 %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32(
1825 <vscale x 16 x i32> undef,
1826 <vscale x 16 x i32> %0,
1830 ret <vscale x 16 x i32> %a
1833 declare <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
1834 <vscale x 16 x i32>,
1835 <vscale x 16 x i32>,
1840 define <vscale x 16 x i32> @intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1841 ; CHECK-LABEL: intrinsic_vmulh_mask_vx_nxv16i32_nxv16i32_i32:
1842 ; CHECK: # %bb.0: # %entry
1843 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1844 ; CHECK-NEXT: vmulh.vx v8, v16, a0, v0.t
1847 %a = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32(
1848 <vscale x 16 x i32> %0,
1849 <vscale x 16 x i32> %1,
1851 <vscale x 16 x i1> %3,
1854 ret <vscale x 16 x i32> %a
1857 declare <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
1863 define <vscale x 1 x i64> @intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1864 ; RV32-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64:
1865 ; RV32: # %bb.0: # %entry
1866 ; RV32-NEXT: addi sp, sp, -16
1867 ; RV32-NEXT: sw a1, 12(sp)
1868 ; RV32-NEXT: sw a0, 8(sp)
1869 ; RV32-NEXT: addi a0, sp, 8
1870 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1871 ; RV32-NEXT: vlse64.v v9, (a0), zero
1872 ; RV32-NEXT: vmulh.vv v8, v8, v9
1873 ; RV32-NEXT: addi sp, sp, 16
1876 ; RV64-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64:
1877 ; RV64: # %bb.0: # %entry
1878 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1879 ; RV64-NEXT: vmulh.vx v8, v8, a0
1882 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64(
1883 <vscale x 1 x i64> undef,
1884 <vscale x 1 x i64> %0,
1888 ret <vscale x 1 x i64> %a
1891 declare <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
1898 define <vscale x 1 x i64> @intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1899 ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64:
1900 ; RV32: # %bb.0: # %entry
1901 ; RV32-NEXT: addi sp, sp, -16
1902 ; RV32-NEXT: sw a1, 12(sp)
1903 ; RV32-NEXT: sw a0, 8(sp)
1904 ; RV32-NEXT: addi a0, sp, 8
1905 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1906 ; RV32-NEXT: vlse64.v v10, (a0), zero
1907 ; RV32-NEXT: vmulh.vv v8, v9, v10, v0.t
1908 ; RV32-NEXT: addi sp, sp, 16
1911 ; RV64-LABEL: intrinsic_vmulh_mask_vx_nxv1i64_nxv1i64_i64:
1912 ; RV64: # %bb.0: # %entry
1913 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1914 ; RV64-NEXT: vmulh.vx v8, v9, a0, v0.t
1917 %a = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64(
1918 <vscale x 1 x i64> %0,
1919 <vscale x 1 x i64> %1,
1921 <vscale x 1 x i1> %3,
1924 ret <vscale x 1 x i64> %a
1927 declare <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
1933 define <vscale x 2 x i64> @intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1934 ; RV32-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64:
1935 ; RV32: # %bb.0: # %entry
1936 ; RV32-NEXT: addi sp, sp, -16
1937 ; RV32-NEXT: sw a1, 12(sp)
1938 ; RV32-NEXT: sw a0, 8(sp)
1939 ; RV32-NEXT: addi a0, sp, 8
1940 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1941 ; RV32-NEXT: vlse64.v v10, (a0), zero
1942 ; RV32-NEXT: vmulh.vv v8, v8, v10
1943 ; RV32-NEXT: addi sp, sp, 16
1946 ; RV64-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64:
1947 ; RV64: # %bb.0: # %entry
1948 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1949 ; RV64-NEXT: vmulh.vx v8, v8, a0
1952 %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64(
1953 <vscale x 2 x i64> undef,
1954 <vscale x 2 x i64> %0,
1958 ret <vscale x 2 x i64> %a
1961 declare <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
1968 define <vscale x 2 x i64> @intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1969 ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64:
1970 ; RV32: # %bb.0: # %entry
1971 ; RV32-NEXT: addi sp, sp, -16
1972 ; RV32-NEXT: sw a1, 12(sp)
1973 ; RV32-NEXT: sw a0, 8(sp)
1974 ; RV32-NEXT: addi a0, sp, 8
1975 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1976 ; RV32-NEXT: vlse64.v v12, (a0), zero
1977 ; RV32-NEXT: vmulh.vv v8, v10, v12, v0.t
1978 ; RV32-NEXT: addi sp, sp, 16
1981 ; RV64-LABEL: intrinsic_vmulh_mask_vx_nxv2i64_nxv2i64_i64:
1982 ; RV64: # %bb.0: # %entry
1983 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1984 ; RV64-NEXT: vmulh.vx v8, v10, a0, v0.t
1987 %a = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64(
1988 <vscale x 2 x i64> %0,
1989 <vscale x 2 x i64> %1,
1991 <vscale x 2 x i1> %3,
1994 ret <vscale x 2 x i64> %a
1997 declare <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
2003 define <vscale x 4 x i64> @intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
2004 ; RV32-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64:
2005 ; RV32: # %bb.0: # %entry
2006 ; RV32-NEXT: addi sp, sp, -16
2007 ; RV32-NEXT: sw a1, 12(sp)
2008 ; RV32-NEXT: sw a0, 8(sp)
2009 ; RV32-NEXT: addi a0, sp, 8
2010 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2011 ; RV32-NEXT: vlse64.v v12, (a0), zero
2012 ; RV32-NEXT: vmulh.vv v8, v8, v12
2013 ; RV32-NEXT: addi sp, sp, 16
2016 ; RV64-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64:
2017 ; RV64: # %bb.0: # %entry
2018 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2019 ; RV64-NEXT: vmulh.vx v8, v8, a0
2022 %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64(
2023 <vscale x 4 x i64> undef,
2024 <vscale x 4 x i64> %0,
2028 ret <vscale x 4 x i64> %a
2031 declare <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
2038 define <vscale x 4 x i64> @intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2039 ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64:
2040 ; RV32: # %bb.0: # %entry
2041 ; RV32-NEXT: addi sp, sp, -16
2042 ; RV32-NEXT: sw a1, 12(sp)
2043 ; RV32-NEXT: sw a0, 8(sp)
2044 ; RV32-NEXT: addi a0, sp, 8
2045 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2046 ; RV32-NEXT: vlse64.v v16, (a0), zero
2047 ; RV32-NEXT: vmulh.vv v8, v12, v16, v0.t
2048 ; RV32-NEXT: addi sp, sp, 16
2051 ; RV64-LABEL: intrinsic_vmulh_mask_vx_nxv4i64_nxv4i64_i64:
2052 ; RV64: # %bb.0: # %entry
2053 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2054 ; RV64-NEXT: vmulh.vx v8, v12, a0, v0.t
2057 %a = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64(
2058 <vscale x 4 x i64> %0,
2059 <vscale x 4 x i64> %1,
2061 <vscale x 4 x i1> %3,
2064 ret <vscale x 4 x i64> %a
2067 declare <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
2073 define <vscale x 8 x i64> @intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2074 ; RV32-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64:
2075 ; RV32: # %bb.0: # %entry
2076 ; RV32-NEXT: addi sp, sp, -16
2077 ; RV32-NEXT: sw a1, 12(sp)
2078 ; RV32-NEXT: sw a0, 8(sp)
2079 ; RV32-NEXT: addi a0, sp, 8
2080 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2081 ; RV32-NEXT: vlse64.v v16, (a0), zero
2082 ; RV32-NEXT: vmulh.vv v8, v8, v16
2083 ; RV32-NEXT: addi sp, sp, 16
2086 ; RV64-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64:
2087 ; RV64: # %bb.0: # %entry
2088 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2089 ; RV64-NEXT: vmulh.vx v8, v8, a0
2092 %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64(
2093 <vscale x 8 x i64> undef,
2094 <vscale x 8 x i64> %0,
2098 ret <vscale x 8 x i64> %a
2101 declare <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
2108 define <vscale x 8 x i64> @intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2109 ; RV32-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64:
2110 ; RV32: # %bb.0: # %entry
2111 ; RV32-NEXT: addi sp, sp, -16
2112 ; RV32-NEXT: sw a1, 12(sp)
2113 ; RV32-NEXT: sw a0, 8(sp)
2114 ; RV32-NEXT: addi a0, sp, 8
2115 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2116 ; RV32-NEXT: vlse64.v v24, (a0), zero
2117 ; RV32-NEXT: vmulh.vv v8, v16, v24, v0.t
2118 ; RV32-NEXT: addi sp, sp, 16
2121 ; RV64-LABEL: intrinsic_vmulh_mask_vx_nxv8i64_nxv8i64_i64:
2122 ; RV64: # %bb.0: # %entry
2123 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2124 ; RV64-NEXT: vmulh.vx v8, v16, a0, v0.t
2127 %a = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64(
2128 <vscale x 8 x i64> %0,
2129 <vscale x 8 x i64> %1,
2131 <vscale x 8 x i1> %3,
2134 ret <vscale x 8 x i64> %a