1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
13 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vwsub.wv v8, v8, v9
20 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
21 <vscale x 1 x i16> undef,
22 <vscale x 1 x i16> %0,
26 ret <vscale x 1 x i16> %a
29 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
37 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
39 ; CHECK: # %bb.0: # %entry
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
44 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
45 <vscale x 1 x i16> %0,
46 <vscale x 1 x i16> %1,
51 ret <vscale x 1 x i16> %a
54 declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
60 define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
64 ; CHECK-NEXT: vwsub.wv v8, v8, v9
67 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
68 <vscale x 2 x i16> undef,
69 <vscale x 2 x i16> %0,
73 ret <vscale x 2 x i16> %a
76 declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
84 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
91 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
92 <vscale x 2 x i16> %0,
93 <vscale x 2 x i16> %1,
98 ret <vscale x 2 x i16> %a
101 declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
107 define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vwsub.wv v8, v8, v9
114 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
115 <vscale x 4 x i16> undef,
116 <vscale x 4 x i16> %0,
117 <vscale x 4 x i8> %1,
120 ret <vscale x 4 x i16> %a
123 declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
131 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
133 ; CHECK: # %bb.0: # %entry
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
135 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
138 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
139 <vscale x 4 x i16> %0,
140 <vscale x 4 x i16> %1,
141 <vscale x 4 x i8> %2,
142 <vscale x 4 x i1> %3,
145 ret <vscale x 4 x i16> %a
148 declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
154 define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8:
156 ; CHECK: # %bb.0: # %entry
157 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
158 ; CHECK-NEXT: vwsub.wv v8, v8, v10
161 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
162 <vscale x 8 x i16> undef,
163 <vscale x 8 x i16> %0,
164 <vscale x 8 x i8> %1,
167 ret <vscale x 8 x i16> %a
170 declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
178 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t
185 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
186 <vscale x 8 x i16> %0,
187 <vscale x 8 x i16> %1,
188 <vscale x 8 x i8> %2,
189 <vscale x 8 x i1> %3,
192 ret <vscale x 8 x i16> %a
195 declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
201 define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vwsub.wv v8, v8, v12
208 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
209 <vscale x 16 x i16> undef,
210 <vscale x 16 x i16> %0,
211 <vscale x 16 x i8> %1,
214 ret <vscale x 16 x i16> %a
217 declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
225 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
227 ; CHECK: # %bb.0: # %entry
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
229 ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t
232 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
233 <vscale x 16 x i16> %0,
234 <vscale x 16 x i16> %1,
235 <vscale x 16 x i8> %2,
236 <vscale x 16 x i1> %3,
239 ret <vscale x 16 x i16> %a
242 declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
248 define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
249 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
252 ; CHECK-NEXT: vwsub.wv v8, v8, v16
255 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
256 <vscale x 32 x i16> undef,
257 <vscale x 32 x i16> %0,
258 <vscale x 32 x i8> %1,
261 ret <vscale x 32 x i16> %a
264 declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
272 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vl4r.v v24, (a0)
276 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
277 ; CHECK-NEXT: vwsub.wv v8, v16, v24, v0.t
280 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
281 <vscale x 32 x i16> %0,
282 <vscale x 32 x i16> %1,
283 <vscale x 32 x i8> %2,
284 <vscale x 32 x i1> %3,
287 ret <vscale x 32 x i16> %a
290 declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
296 define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
297 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16:
298 ; CHECK: # %bb.0: # %entry
299 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
300 ; CHECK-NEXT: vwsub.wv v8, v8, v9
303 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
304 <vscale x 1 x i32> undef,
305 <vscale x 1 x i32> %0,
306 <vscale x 1 x i16> %1,
309 ret <vscale x 1 x i32> %a
312 declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
320 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
321 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
322 ; CHECK: # %bb.0: # %entry
323 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
324 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
327 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
328 <vscale x 1 x i32> %0,
329 <vscale x 1 x i32> %1,
330 <vscale x 1 x i16> %2,
331 <vscale x 1 x i1> %3,
334 ret <vscale x 1 x i32> %a
337 declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
343 define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
344 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16:
345 ; CHECK: # %bb.0: # %entry
346 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
347 ; CHECK-NEXT: vwsub.wv v8, v8, v9
350 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
351 <vscale x 2 x i32> undef,
352 <vscale x 2 x i32> %0,
353 <vscale x 2 x i16> %1,
356 ret <vscale x 2 x i32> %a
359 declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
367 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
368 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
371 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
374 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
375 <vscale x 2 x i32> %0,
376 <vscale x 2 x i32> %1,
377 <vscale x 2 x i16> %2,
378 <vscale x 2 x i1> %3,
381 ret <vscale x 2 x i32> %a
384 declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
390 define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
391 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
394 ; CHECK-NEXT: vwsub.wv v8, v8, v10
397 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
398 <vscale x 4 x i32> undef,
399 <vscale x 4 x i32> %0,
400 <vscale x 4 x i16> %1,
403 ret <vscale x 4 x i32> %a
406 declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
414 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
415 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
416 ; CHECK: # %bb.0: # %entry
417 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
418 ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t
421 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
422 <vscale x 4 x i32> %0,
423 <vscale x 4 x i32> %1,
424 <vscale x 4 x i16> %2,
425 <vscale x 4 x i1> %3,
428 ret <vscale x 4 x i32> %a
431 declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
437 define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
438 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16:
439 ; CHECK: # %bb.0: # %entry
440 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
441 ; CHECK-NEXT: vwsub.wv v8, v8, v12
444 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
445 <vscale x 8 x i32> undef,
446 <vscale x 8 x i32> %0,
447 <vscale x 8 x i16> %1,
450 ret <vscale x 8 x i32> %a
453 declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
461 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
462 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
463 ; CHECK: # %bb.0: # %entry
464 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
465 ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t
468 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
469 <vscale x 8 x i32> %0,
470 <vscale x 8 x i32> %1,
471 <vscale x 8 x i16> %2,
472 <vscale x 8 x i1> %3,
475 ret <vscale x 8 x i32> %a
478 declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
484 define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
485 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16:
486 ; CHECK: # %bb.0: # %entry
487 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
488 ; CHECK-NEXT: vwsub.wv v8, v8, v16
491 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
492 <vscale x 16 x i32> undef,
493 <vscale x 16 x i32> %0,
494 <vscale x 16 x i16> %1,
497 ret <vscale x 16 x i32> %a
500 declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
508 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
509 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
510 ; CHECK: # %bb.0: # %entry
511 ; CHECK-NEXT: vl4re16.v v24, (a0)
512 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
513 ; CHECK-NEXT: vwsub.wv v8, v16, v24, v0.t
516 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
517 <vscale x 16 x i32> %0,
518 <vscale x 16 x i32> %1,
519 <vscale x 16 x i16> %2,
520 <vscale x 16 x i1> %3,
523 ret <vscale x 16 x i32> %a
526 declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
532 define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
536 ; CHECK-NEXT: vwsub.wv v8, v8, v9
539 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
540 <vscale x 1 x i64> undef,
541 <vscale x 1 x i64> %0,
542 <vscale x 1 x i32> %1,
545 ret <vscale x 1 x i64> %a
548 declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
556 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
557 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
558 ; CHECK: # %bb.0: # %entry
559 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
560 ; CHECK-NEXT: vwsub.wv v8, v9, v10, v0.t
563 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
564 <vscale x 1 x i64> %0,
565 <vscale x 1 x i64> %1,
566 <vscale x 1 x i32> %2,
567 <vscale x 1 x i1> %3,
570 ret <vscale x 1 x i64> %a
573 declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
579 define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
580 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
583 ; CHECK-NEXT: vwsub.wv v8, v8, v10
586 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
587 <vscale x 2 x i64> undef,
588 <vscale x 2 x i64> %0,
589 <vscale x 2 x i32> %1,
592 ret <vscale x 2 x i64> %a
595 declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
603 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
604 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
605 ; CHECK: # %bb.0: # %entry
606 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
607 ; CHECK-NEXT: vwsub.wv v8, v10, v12, v0.t
610 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
611 <vscale x 2 x i64> %0,
612 <vscale x 2 x i64> %1,
613 <vscale x 2 x i32> %2,
614 <vscale x 2 x i1> %3,
617 ret <vscale x 2 x i64> %a
620 declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
626 define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
627 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
630 ; CHECK-NEXT: vwsub.wv v8, v8, v12
633 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
634 <vscale x 4 x i64> undef,
635 <vscale x 4 x i64> %0,
636 <vscale x 4 x i32> %1,
639 ret <vscale x 4 x i64> %a
642 declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
650 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
651 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
654 ; CHECK-NEXT: vwsub.wv v8, v12, v16, v0.t
657 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
658 <vscale x 4 x i64> %0,
659 <vscale x 4 x i64> %1,
660 <vscale x 4 x i32> %2,
661 <vscale x 4 x i1> %3,
664 ret <vscale x 4 x i64> %a
667 declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
673 define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
674 ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32:
675 ; CHECK: # %bb.0: # %entry
676 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
677 ; CHECK-NEXT: vwsub.wv v8, v8, v16
680 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
681 <vscale x 8 x i64> undef,
682 <vscale x 8 x i64> %0,
683 <vscale x 8 x i32> %1,
686 ret <vscale x 8 x i64> %a
689 declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
697 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
698 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
699 ; CHECK: # %bb.0: # %entry
700 ; CHECK-NEXT: vl4re32.v v24, (a0)
701 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
702 ; CHECK-NEXT: vwsub.wv v8, v16, v24, v0.t
705 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
706 <vscale x 8 x i64> %0,
707 <vscale x 8 x i64> %1,
708 <vscale x 8 x i32> %2,
709 <vscale x 8 x i1> %3,
712 ret <vscale x 8 x i64> %a
715 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
721 define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
722 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8:
723 ; CHECK: # %bb.0: # %entry
724 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
725 ; CHECK-NEXT: vwsub.wx v8, v8, a0
728 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
729 <vscale x 1 x i16> undef,
730 <vscale x 1 x i16> %0,
734 ret <vscale x 1 x i16> %a
737 declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
745 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
746 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8:
747 ; CHECK: # %bb.0: # %entry
748 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
749 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
752 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
753 <vscale x 1 x i16> %0,
754 <vscale x 1 x i16> %1,
756 <vscale x 1 x i1> %3,
759 ret <vscale x 1 x i16> %a
762 declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
768 define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
769 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8:
770 ; CHECK: # %bb.0: # %entry
771 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
772 ; CHECK-NEXT: vwsub.wx v8, v8, a0
775 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
776 <vscale x 2 x i16> undef,
777 <vscale x 2 x i16> %0,
781 ret <vscale x 2 x i16> %a
784 declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
792 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
793 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
796 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
799 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
800 <vscale x 2 x i16> %0,
801 <vscale x 2 x i16> %1,
803 <vscale x 2 x i1> %3,
806 ret <vscale x 2 x i16> %a
809 declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
815 define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
816 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8:
817 ; CHECK: # %bb.0: # %entry
818 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
819 ; CHECK-NEXT: vwsub.wx v8, v8, a0
822 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
823 <vscale x 4 x i16> undef,
824 <vscale x 4 x i16> %0,
828 ret <vscale x 4 x i16> %a
831 declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
839 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
840 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
843 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
846 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
847 <vscale x 4 x i16> %0,
848 <vscale x 4 x i16> %1,
850 <vscale x 4 x i1> %3,
853 ret <vscale x 4 x i16> %a
856 declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
862 define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
863 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8:
864 ; CHECK: # %bb.0: # %entry
865 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
866 ; CHECK-NEXT: vwsub.wx v8, v8, a0
869 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
870 <vscale x 8 x i16> undef,
871 <vscale x 8 x i16> %0,
875 ret <vscale x 8 x i16> %a
878 declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
886 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
887 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8:
888 ; CHECK: # %bb.0: # %entry
889 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
890 ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t
893 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
894 <vscale x 8 x i16> %0,
895 <vscale x 8 x i16> %1,
897 <vscale x 8 x i1> %3,
900 ret <vscale x 8 x i16> %a
903 declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
909 define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
910 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8:
911 ; CHECK: # %bb.0: # %entry
912 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
913 ; CHECK-NEXT: vwsub.wx v8, v8, a0
916 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
917 <vscale x 16 x i16> undef,
918 <vscale x 16 x i16> %0,
922 ret <vscale x 16 x i16> %a
925 declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
933 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
934 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8:
935 ; CHECK: # %bb.0: # %entry
936 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
937 ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t
940 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
941 <vscale x 16 x i16> %0,
942 <vscale x 16 x i16> %1,
944 <vscale x 16 x i1> %3,
947 ret <vscale x 16 x i16> %a
950 declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
956 define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
957 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
960 ; CHECK-NEXT: vwsub.wx v8, v8, a0
963 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
964 <vscale x 32 x i16> undef,
965 <vscale x 32 x i16> %0,
969 ret <vscale x 32 x i16> %a
972 declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
980 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
981 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
984 ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t
987 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
988 <vscale x 32 x i16> %0,
989 <vscale x 32 x i16> %1,
991 <vscale x 32 x i1> %3,
994 ret <vscale x 32 x i16> %a
997 declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
1003 define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
1004 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16:
1005 ; CHECK: # %bb.0: # %entry
1006 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1007 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1010 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
1011 <vscale x 1 x i32> undef,
1012 <vscale x 1 x i32> %0,
1016 ret <vscale x 1 x i32> %a
1019 declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
1027 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1028 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16:
1029 ; CHECK: # %bb.0: # %entry
1030 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1031 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
1034 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
1035 <vscale x 1 x i32> %0,
1036 <vscale x 1 x i32> %1,
1038 <vscale x 1 x i1> %3,
1041 ret <vscale x 1 x i32> %a
1044 declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
1050 define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
1051 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1054 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1057 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
1058 <vscale x 2 x i32> undef,
1059 <vscale x 2 x i32> %0,
1063 ret <vscale x 2 x i32> %a
1066 declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
1074 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1075 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16:
1076 ; CHECK: # %bb.0: # %entry
1077 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1078 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
1081 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
1082 <vscale x 2 x i32> %0,
1083 <vscale x 2 x i32> %1,
1085 <vscale x 2 x i1> %3,
1088 ret <vscale x 2 x i32> %a
1091 declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
1097 define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
1098 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16:
1099 ; CHECK: # %bb.0: # %entry
1100 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1101 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1104 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
1105 <vscale x 4 x i32> undef,
1106 <vscale x 4 x i32> %0,
1110 ret <vscale x 4 x i32> %a
1113 declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
1121 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1122 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1125 ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t
1128 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
1129 <vscale x 4 x i32> %0,
1130 <vscale x 4 x i32> %1,
1132 <vscale x 4 x i1> %3,
1135 ret <vscale x 4 x i32> %a
1138 declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
1144 define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
1145 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1148 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1151 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
1152 <vscale x 8 x i32> undef,
1153 <vscale x 8 x i32> %0,
1157 ret <vscale x 8 x i32> %a
1160 declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
1168 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1169 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16:
1170 ; CHECK: # %bb.0: # %entry
1171 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1172 ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t
1175 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
1176 <vscale x 8 x i32> %0,
1177 <vscale x 8 x i32> %1,
1179 <vscale x 8 x i1> %3,
1182 ret <vscale x 8 x i32> %a
1185 declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
1186 <vscale x 16 x i32>,
1187 <vscale x 16 x i32>,
1191 define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
1192 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16:
1193 ; CHECK: # %bb.0: # %entry
1194 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1195 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1198 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
1199 <vscale x 16 x i32> undef,
1200 <vscale x 16 x i32> %0,
1204 ret <vscale x 16 x i32> %a
1207 declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
1208 <vscale x 16 x i32>,
1209 <vscale x 16 x i32>,
1215 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1216 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1219 ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t
1222 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
1223 <vscale x 16 x i32> %0,
1224 <vscale x 16 x i32> %1,
1226 <vscale x 16 x i1> %3,
1229 ret <vscale x 16 x i32> %a
1232 declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
1238 define <vscale x 1 x i64> @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1239 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32:
1240 ; CHECK: # %bb.0: # %entry
1241 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1242 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1245 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32(
1246 <vscale x 1 x i64> undef,
1247 <vscale x 1 x i64> %0,
1251 ret <vscale x 1 x i64> %a
1254 declare <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
1262 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1263 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32:
1264 ; CHECK: # %bb.0: # %entry
1265 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1266 ; CHECK-NEXT: vwsub.wx v8, v9, a0, v0.t
1269 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
1270 <vscale x 1 x i64> %0,
1271 <vscale x 1 x i64> %1,
1273 <vscale x 1 x i1> %3,
1276 ret <vscale x 1 x i64> %a
1279 declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
1285 define <vscale x 2 x i64> @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
1286 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32:
1287 ; CHECK: # %bb.0: # %entry
1288 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1289 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1292 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32(
1293 <vscale x 2 x i64> undef,
1294 <vscale x 2 x i64> %0,
1298 ret <vscale x 2 x i64> %a
1301 declare <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
1309 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1310 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32:
1311 ; CHECK: # %bb.0: # %entry
1312 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1313 ; CHECK-NEXT: vwsub.wx v8, v10, a0, v0.t
1316 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
1317 <vscale x 2 x i64> %0,
1318 <vscale x 2 x i64> %1,
1320 <vscale x 2 x i1> %3,
1323 ret <vscale x 2 x i64> %a
1326 declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
1332 define <vscale x 4 x i64> @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
1333 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1336 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1339 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32(
1340 <vscale x 4 x i64> undef,
1341 <vscale x 4 x i64> %0,
1345 ret <vscale x 4 x i64> %a
1348 declare <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
1356 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1357 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32:
1358 ; CHECK: # %bb.0: # %entry
1359 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1360 ; CHECK-NEXT: vwsub.wx v8, v12, a0, v0.t
1363 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
1364 <vscale x 4 x i64> %0,
1365 <vscale x 4 x i64> %1,
1367 <vscale x 4 x i1> %3,
1370 ret <vscale x 4 x i64> %a
1373 declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
1379 define <vscale x 8 x i64> @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
1380 ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32:
1381 ; CHECK: # %bb.0: # %entry
1382 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1383 ; CHECK-NEXT: vwsub.wx v8, v8, a0
1386 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32(
1387 <vscale x 8 x i64> undef,
1388 <vscale x 8 x i64> %0,
1392 ret <vscale x 8 x i64> %a
1395 declare <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
1403 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32:
1405 ; CHECK: # %bb.0: # %entry
1406 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1407 ; CHECK-NEXT: vwsub.wx v8, v16, a0, v0.t
1410 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
1411 <vscale x 8 x i64> %0,
1412 <vscale x 8 x i64> %1,
1414 <vscale x 8 x i1> %3,
1417 ret <vscale x 8 x i64> %a
1420 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1421 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
1422 ; CHECK: # %bb.0: # %entry
1423 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1424 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1427 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
1428 <vscale x 1 x i16> %0,
1429 <vscale x 1 x i16> %0,
1430 <vscale x 1 x i8> %1,
1431 <vscale x 1 x i1> %2,
1434 ret <vscale x 1 x i16> %a
1437 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1438 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
1439 ; CHECK: # %bb.0: # %entry
1440 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1441 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1444 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
1445 <vscale x 2 x i16> %0,
1446 <vscale x 2 x i16> %0,
1447 <vscale x 2 x i8> %1,
1448 <vscale x 2 x i1> %2,
1451 ret <vscale x 2 x i16> %a
1454 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1455 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
1456 ; CHECK: # %bb.0: # %entry
1457 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1458 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1461 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
1462 <vscale x 4 x i16> %0,
1463 <vscale x 4 x i16> %0,
1464 <vscale x 4 x i8> %1,
1465 <vscale x 4 x i1> %2,
1468 ret <vscale x 4 x i16> %a
1471 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1472 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
1473 ; CHECK: # %bb.0: # %entry
1474 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1475 ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t
1478 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
1479 <vscale x 8 x i16> %0,
1480 <vscale x 8 x i16> %0,
1481 <vscale x 8 x i8> %1,
1482 <vscale x 8 x i1> %2,
1485 ret <vscale x 8 x i16> %a
1488 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1489 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
1490 ; CHECK: # %bb.0: # %entry
1491 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1492 ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
1495 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
1496 <vscale x 16 x i16> %0,
1497 <vscale x 16 x i16> %0,
1498 <vscale x 16 x i8> %1,
1499 <vscale x 16 x i1> %2,
1502 ret <vscale x 16 x i16> %a
1505 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1506 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
1507 ; CHECK: # %bb.0: # %entry
1508 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1509 ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t
1512 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
1513 <vscale x 32 x i16> %0,
1514 <vscale x 32 x i16> %0,
1515 <vscale x 32 x i8> %1,
1516 <vscale x 32 x i1> %2,
1519 ret <vscale x 32 x i16> %a
1522 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1523 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
1524 ; CHECK: # %bb.0: # %entry
1525 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1526 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1529 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
1530 <vscale x 1 x i32> %0,
1531 <vscale x 1 x i32> %0,
1532 <vscale x 1 x i16> %1,
1533 <vscale x 1 x i1> %2,
1536 ret <vscale x 1 x i32> %a
1539 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1540 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
1541 ; CHECK: # %bb.0: # %entry
1542 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1543 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1546 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
1547 <vscale x 2 x i32> %0,
1548 <vscale x 2 x i32> %0,
1549 <vscale x 2 x i16> %1,
1550 <vscale x 2 x i1> %2,
1553 ret <vscale x 2 x i32> %a
1556 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1557 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
1558 ; CHECK: # %bb.0: # %entry
1559 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1560 ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t
1563 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
1564 <vscale x 4 x i32> %0,
1565 <vscale x 4 x i32> %0,
1566 <vscale x 4 x i16> %1,
1567 <vscale x 4 x i1> %2,
1570 ret <vscale x 4 x i32> %a
1573 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1574 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
1575 ; CHECK: # %bb.0: # %entry
1576 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1577 ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
1580 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
1581 <vscale x 8 x i32> %0,
1582 <vscale x 8 x i32> %0,
1583 <vscale x 8 x i16> %1,
1584 <vscale x 8 x i1> %2,
1587 ret <vscale x 8 x i32> %a
1590 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1591 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
1592 ; CHECK: # %bb.0: # %entry
1593 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1594 ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t
1597 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
1598 <vscale x 16 x i32> %0,
1599 <vscale x 16 x i32> %0,
1600 <vscale x 16 x i16> %1,
1601 <vscale x 16 x i1> %2,
1604 ret <vscale x 16 x i32> %a
1607 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1608 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
1609 ; CHECK: # %bb.0: # %entry
1610 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1611 ; CHECK-NEXT: vwsub.wv v8, v8, v9, v0.t
1614 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
1615 <vscale x 1 x i64> %0,
1616 <vscale x 1 x i64> %0,
1617 <vscale x 1 x i32> %1,
1618 <vscale x 1 x i1> %2,
1621 ret <vscale x 1 x i64> %a
1624 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1625 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
1626 ; CHECK: # %bb.0: # %entry
1627 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1628 ; CHECK-NEXT: vwsub.wv v8, v8, v10, v0.t
1631 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
1632 <vscale x 2 x i64> %0,
1633 <vscale x 2 x i64> %0,
1634 <vscale x 2 x i32> %1,
1635 <vscale x 2 x i1> %2,
1638 ret <vscale x 2 x i64> %a
1641 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1642 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
1643 ; CHECK: # %bb.0: # %entry
1644 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1645 ; CHECK-NEXT: vwsub.wv v8, v8, v12, v0.t
1648 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
1649 <vscale x 4 x i64> %0,
1650 <vscale x 4 x i64> %0,
1651 <vscale x 4 x i32> %1,
1652 <vscale x 4 x i1> %2,
1655 ret <vscale x 4 x i64> %a
1658 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1659 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1662 ; CHECK-NEXT: vwsub.wv v8, v8, v16, v0.t
1665 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
1666 <vscale x 8 x i64> %0,
1667 <vscale x 8 x i64> %0,
1668 <vscale x 8 x i32> %1,
1669 <vscale x 8 x i1> %2,
1672 ret <vscale x 8 x i64> %a
1675 define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1676 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
1677 ; CHECK: # %bb.0: # %entry
1678 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1679 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1682 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
1683 <vscale x 1 x i16> %0,
1684 <vscale x 1 x i16> %0,
1686 <vscale x 1 x i1> %2,
1689 ret <vscale x 1 x i16> %a
1692 define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1693 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1696 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1699 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
1700 <vscale x 2 x i16> %0,
1701 <vscale x 2 x i16> %0,
1703 <vscale x 2 x i1> %2,
1706 ret <vscale x 2 x i16> %a
1709 define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1710 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1713 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1716 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
1717 <vscale x 4 x i16> %0,
1718 <vscale x 4 x i16> %0,
1720 <vscale x 4 x i1> %2,
1723 ret <vscale x 4 x i16> %a
1726 define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1727 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
1728 ; CHECK: # %bb.0: # %entry
1729 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1730 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1733 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
1734 <vscale x 8 x i16> %0,
1735 <vscale x 8 x i16> %0,
1737 <vscale x 8 x i1> %2,
1740 ret <vscale x 8 x i16> %a
1743 define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1744 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
1745 ; CHECK: # %bb.0: # %entry
1746 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1747 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1750 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
1751 <vscale x 16 x i16> %0,
1752 <vscale x 16 x i16> %0,
1754 <vscale x 16 x i1> %2,
1757 ret <vscale x 16 x i16> %a
1760 define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1761 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
1762 ; CHECK: # %bb.0: # %entry
1763 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1764 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1767 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
1768 <vscale x 32 x i16> %0,
1769 <vscale x 32 x i16> %0,
1771 <vscale x 32 x i1> %2,
1774 ret <vscale x 32 x i16> %a
1777 define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1778 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
1779 ; CHECK: # %bb.0: # %entry
1780 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1781 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1784 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
1785 <vscale x 1 x i32> %0,
1786 <vscale x 1 x i32> %0,
1788 <vscale x 1 x i1> %2,
1791 ret <vscale x 1 x i32> %a
1794 define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1795 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1798 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1801 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
1802 <vscale x 2 x i32> %0,
1803 <vscale x 2 x i32> %0,
1805 <vscale x 2 x i1> %2,
1808 ret <vscale x 2 x i32> %a
1811 define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1812 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
1813 ; CHECK: # %bb.0: # %entry
1814 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1815 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1818 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
1819 <vscale x 4 x i32> %0,
1820 <vscale x 4 x i32> %0,
1822 <vscale x 4 x i1> %2,
1825 ret <vscale x 4 x i32> %a
1828 define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1829 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
1830 ; CHECK: # %bb.0: # %entry
1831 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1832 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1835 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
1836 <vscale x 8 x i32> %0,
1837 <vscale x 8 x i32> %0,
1839 <vscale x 8 x i1> %2,
1842 ret <vscale x 8 x i32> %a
1845 define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1846 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
1847 ; CHECK: # %bb.0: # %entry
1848 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1849 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1852 %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
1853 <vscale x 16 x i32> %0,
1854 <vscale x 16 x i32> %0,
1856 <vscale x 16 x i1> %2,
1859 ret <vscale x 16 x i32> %a
1862 define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1863 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
1864 ; CHECK: # %bb.0: # %entry
1865 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1866 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1869 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
1870 <vscale x 1 x i64> %0,
1871 <vscale x 1 x i64> %0,
1873 <vscale x 1 x i1> %2,
1876 ret <vscale x 1 x i64> %a
1879 define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1880 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
1881 ; CHECK: # %bb.0: # %entry
1882 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1883 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1886 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
1887 <vscale x 2 x i64> %0,
1888 <vscale x 2 x i64> %0,
1890 <vscale x 2 x i1> %2,
1893 ret <vscale x 2 x i64> %a
1896 define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1897 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
1898 ; CHECK: # %bb.0: # %entry
1899 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1900 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1903 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
1904 <vscale x 4 x i64> %0,
1905 <vscale x 4 x i64> %0,
1907 <vscale x 4 x i1> %2,
1910 ret <vscale x 4 x i64> %a
1913 define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1914 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
1915 ; CHECK: # %bb.0: # %entry
1916 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1917 ; CHECK-NEXT: vwsub.wx v8, v8, a0, v0.t
1920 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
1921 <vscale x 8 x i64> %0,
1922 <vscale x 8 x i64> %0,
1924 <vscale x 8 x i1> %2,
1927 ret <vscale x 8 x i64> %a
1930 define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1931 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
1932 ; CHECK: # %bb.0: # %entry
1933 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1934 ; CHECK-NEXT: vwsub.wv v10, v9, v8
1935 ; CHECK-NEXT: vmv1r.v v8, v10
1938 %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
1939 <vscale x 1 x i16> undef,
1940 <vscale x 1 x i16> %1,
1941 <vscale x 1 x i8> %0,
1944 ret <vscale x 1 x i16> %a
1947 define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1948 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
1949 ; CHECK: # %bb.0: # %entry
1950 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1951 ; CHECK-NEXT: vwsub.wv v10, v9, v8
1952 ; CHECK-NEXT: vmv1r.v v8, v10
1955 %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
1956 <vscale x 2 x i16> undef,
1957 <vscale x 2 x i16> %1,
1958 <vscale x 2 x i8> %0,
1961 ret <vscale x 2 x i16> %a
1964 define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1965 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
1966 ; CHECK: # %bb.0: # %entry
1967 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1968 ; CHECK-NEXT: vwsub.wv v10, v9, v8
1969 ; CHECK-NEXT: vmv1r.v v8, v10
1972 %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
1973 <vscale x 4 x i16> undef,
1974 <vscale x 4 x i16> %1,
1975 <vscale x 4 x i8> %0,
1978 ret <vscale x 4 x i16> %a
1981 define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1982 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
1983 ; CHECK: # %bb.0: # %entry
1984 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1985 ; CHECK-NEXT: vwsub.wv v12, v10, v8
1986 ; CHECK-NEXT: vmv2r.v v8, v12
1989 %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
1990 <vscale x 8 x i16> undef,
1991 <vscale x 8 x i16> %1,
1992 <vscale x 8 x i8> %0,
1995 ret <vscale x 8 x i16> %a
1998 define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1999 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
2000 ; CHECK: # %bb.0: # %entry
2001 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2002 ; CHECK-NEXT: vwsub.wv v16, v12, v8
2003 ; CHECK-NEXT: vmv4r.v v8, v16
2006 %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
2007 <vscale x 16 x i16> undef,
2008 <vscale x 16 x i16> %1,
2009 <vscale x 16 x i8> %0,
2012 ret <vscale x 16 x i16> %a
2015 define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2016 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
2017 ; CHECK: # %bb.0: # %entry
2018 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2019 ; CHECK-NEXT: vwsub.wv v24, v16, v8
2020 ; CHECK-NEXT: vmv8r.v v8, v24
2023 %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
2024 <vscale x 32 x i16> undef,
2025 <vscale x 32 x i16> %1,
2026 <vscale x 32 x i8> %0,
2029 ret <vscale x 32 x i16> %a
2032 define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2033 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
2034 ; CHECK: # %bb.0: # %entry
2035 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2036 ; CHECK-NEXT: vwsub.wv v10, v9, v8
2037 ; CHECK-NEXT: vmv1r.v v8, v10
2040 %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
2041 <vscale x 1 x i32> undef,
2042 <vscale x 1 x i32> %1,
2043 <vscale x 1 x i16> %0,
2046 ret <vscale x 1 x i32> %a
2049 define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
2050 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
2051 ; CHECK: # %bb.0: # %entry
2052 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2053 ; CHECK-NEXT: vwsub.wv v10, v9, v8
2054 ; CHECK-NEXT: vmv1r.v v8, v10
2057 %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
2058 <vscale x 2 x i32> undef,
2059 <vscale x 2 x i32> %1,
2060 <vscale x 2 x i16> %0,
2063 ret <vscale x 2 x i32> %a
2066 define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
2067 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
2068 ; CHECK: # %bb.0: # %entry
2069 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2070 ; CHECK-NEXT: vwsub.wv v12, v10, v8
2071 ; CHECK-NEXT: vmv2r.v v8, v12
2074 %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
2075 <vscale x 4 x i32> undef,
2076 <vscale x 4 x i32> %1,
2077 <vscale x 4 x i16> %0,
2080 ret <vscale x 4 x i32> %a
2083 define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
2084 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
2085 ; CHECK: # %bb.0: # %entry
2086 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2087 ; CHECK-NEXT: vwsub.wv v16, v12, v8
2088 ; CHECK-NEXT: vmv4r.v v8, v16
2091 %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
2092 <vscale x 8 x i32> undef,
2093 <vscale x 8 x i32> %1,
2094 <vscale x 8 x i16> %0,
2097 ret <vscale x 8 x i32> %a
2100 define <vscale x 1 x i64> @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
2101 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
2102 ; CHECK: # %bb.0: # %entry
2103 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2104 ; CHECK-NEXT: vwsub.wv v10, v9, v8
2105 ; CHECK-NEXT: vmv1r.v v8, v10
2108 %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32(
2109 <vscale x 1 x i64> undef,
2110 <vscale x 1 x i64> %1,
2111 <vscale x 1 x i32> %0,
2114 ret <vscale x 1 x i64> %a
2117 define <vscale x 2 x i64> @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
2118 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
2119 ; CHECK: # %bb.0: # %entry
2120 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2121 ; CHECK-NEXT: vwsub.wv v12, v10, v8
2122 ; CHECK-NEXT: vmv2r.v v8, v12
2125 %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32(
2126 <vscale x 2 x i64> undef,
2127 <vscale x 2 x i64> %1,
2128 <vscale x 2 x i32> %0,
2131 ret <vscale x 2 x i64> %a
2134 define <vscale x 4 x i64> @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
2135 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
2136 ; CHECK: # %bb.0: # %entry
2137 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2138 ; CHECK-NEXT: vwsub.wv v16, v12, v8
2139 ; CHECK-NEXT: vmv4r.v v8, v16
2142 %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32(
2143 <vscale x 4 x i64> undef,
2144 <vscale x 4 x i64> %1,
2145 <vscale x 4 x i32> %0,
2148 ret <vscale x 4 x i64> %a
2151 define <vscale x 8 x i64> @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
2152 ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
2153 ; CHECK: # %bb.0: # %entry
2154 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2155 ; CHECK-NEXT: vwsub.wv v24, v16, v8
2156 ; CHECK-NEXT: vmv8r.v v8, v24
2159 %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32(
2160 <vscale x 8 x i64> undef,
2161 <vscale x 8 x i64> %1,
2162 <vscale x 8 x i32> %0,
2165 ret <vscale x 8 x i64> %a