1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vsub.vv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
36 define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
59 define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vsub.vv v8, v8, v9
66 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
67 <vscale x 2 x i8> undef,
72 ret <vscale x 2 x i8> %a
75 declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
82 define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
86 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
96 ret <vscale x 2 x i8> %a
99 declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
105 define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
109 ; CHECK-NEXT: vsub.vv v8, v8, v9
112 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
113 <vscale x 4 x i8> undef,
114 <vscale x 4 x i8> %0,
115 <vscale x 4 x i8> %1,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
128 define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
132 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
135 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
136 <vscale x 4 x i8> %0,
137 <vscale x 4 x i8> %1,
138 <vscale x 4 x i8> %2,
139 <vscale x 4 x i1> %3,
142 ret <vscale x 4 x i8> %a
145 declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
151 define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
152 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
155 ; CHECK-NEXT: vsub.vv v8, v8, v9
158 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
159 <vscale x 8 x i8> undef,
160 <vscale x 8 x i8> %0,
161 <vscale x 8 x i8> %1,
164 ret <vscale x 8 x i8> %a
167 declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
174 define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
178 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
181 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8> %1,
184 <vscale x 8 x i8> %2,
185 <vscale x 8 x i1> %3,
188 ret <vscale x 8 x i8> %a
191 declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
197 define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
198 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
201 ; CHECK-NEXT: vsub.vv v8, v8, v10
204 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
205 <vscale x 16 x i8> undef,
206 <vscale x 16 x i8> %0,
207 <vscale x 16 x i8> %1,
210 ret <vscale x 16 x i8> %a
213 declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
220 define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
224 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
227 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
230 <vscale x 16 x i8> %2,
231 <vscale x 16 x i1> %3,
234 ret <vscale x 16 x i8> %a
237 declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
243 define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
247 ; CHECK-NEXT: vsub.vv v8, v8, v12
250 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
251 <vscale x 32 x i8> undef,
252 <vscale x 32 x i8> %0,
253 <vscale x 32 x i8> %1,
256 ret <vscale x 32 x i8> %a
259 declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
266 define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
270 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
273 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
274 <vscale x 32 x i8> %0,
275 <vscale x 32 x i8> %1,
276 <vscale x 32 x i8> %2,
277 <vscale x 32 x i1> %3,
280 ret <vscale x 32 x i8> %a
283 declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
289 define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
290 ; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
293 ; CHECK-NEXT: vsub.vv v8, v8, v16
296 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
297 <vscale x 64 x i8> undef,
298 <vscale x 64 x i8> %0,
299 <vscale x 64 x i8> %1,
302 ret <vscale x 64 x i8> %a
305 declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
312 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vl8r.v v24, (a0)
316 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
317 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
320 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
321 <vscale x 64 x i8> %0,
322 <vscale x 64 x i8> %1,
323 <vscale x 64 x i8> %2,
324 <vscale x 64 x i1> %3,
327 ret <vscale x 64 x i8> %a
330 declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
336 define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
337 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
340 ; CHECK-NEXT: vsub.vv v8, v8, v9
343 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
344 <vscale x 1 x i16> undef,
345 <vscale x 1 x i16> %0,
346 <vscale x 1 x i16> %1,
349 ret <vscale x 1 x i16> %a
352 declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
359 define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
360 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
361 ; CHECK: # %bb.0: # %entry
362 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
363 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
366 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
367 <vscale x 1 x i16> %0,
368 <vscale x 1 x i16> %1,
369 <vscale x 1 x i16> %2,
370 <vscale x 1 x i1> %3,
373 ret <vscale x 1 x i16> %a
376 declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
382 define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
383 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16:
384 ; CHECK: # %bb.0: # %entry
385 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
386 ; CHECK-NEXT: vsub.vv v8, v8, v9
389 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
390 <vscale x 2 x i16> undef,
391 <vscale x 2 x i16> %0,
392 <vscale x 2 x i16> %1,
395 ret <vscale x 2 x i16> %a
398 declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
405 define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
406 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
407 ; CHECK: # %bb.0: # %entry
408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
409 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
412 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
413 <vscale x 2 x i16> %0,
414 <vscale x 2 x i16> %1,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i1> %3,
419 ret <vscale x 2 x i16> %a
422 declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
428 define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vsub.vv v8, v8, v9
435 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
436 <vscale x 4 x i16> undef,
437 <vscale x 4 x i16> %0,
438 <vscale x 4 x i16> %1,
441 ret <vscale x 4 x i16> %a
444 declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
451 define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
452 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
453 ; CHECK: # %bb.0: # %entry
454 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
455 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
458 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
459 <vscale x 4 x i16> %0,
460 <vscale x 4 x i16> %1,
461 <vscale x 4 x i16> %2,
462 <vscale x 4 x i1> %3,
465 ret <vscale x 4 x i16> %a
468 declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
474 define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
475 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
478 ; CHECK-NEXT: vsub.vv v8, v8, v10
481 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
482 <vscale x 8 x i16> undef,
483 <vscale x 8 x i16> %0,
484 <vscale x 8 x i16> %1,
487 ret <vscale x 8 x i16> %a
490 declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
497 define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
498 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
501 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
504 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
505 <vscale x 8 x i16> %0,
506 <vscale x 8 x i16> %1,
507 <vscale x 8 x i16> %2,
508 <vscale x 8 x i1> %3,
511 ret <vscale x 8 x i16> %a
514 declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
520 define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
521 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
524 ; CHECK-NEXT: vsub.vv v8, v8, v12
527 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
528 <vscale x 16 x i16> undef,
529 <vscale x 16 x i16> %0,
530 <vscale x 16 x i16> %1,
533 ret <vscale x 16 x i16> %a
536 declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
543 define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
544 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
545 ; CHECK: # %bb.0: # %entry
546 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
547 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
550 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
551 <vscale x 16 x i16> %0,
552 <vscale x 16 x i16> %1,
553 <vscale x 16 x i16> %2,
554 <vscale x 16 x i1> %3,
557 ret <vscale x 16 x i16> %a
560 declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
566 define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
567 ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
570 ; CHECK-NEXT: vsub.vv v8, v8, v16
573 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
574 <vscale x 32 x i16> undef,
575 <vscale x 32 x i16> %0,
576 <vscale x 32 x i16> %1,
579 ret <vscale x 32 x i16> %a
582 declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
589 define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
590 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vl8re16.v v24, (a0)
593 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
594 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
597 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
598 <vscale x 32 x i16> %0,
599 <vscale x 32 x i16> %1,
600 <vscale x 32 x i16> %2,
601 <vscale x 32 x i1> %3,
604 ret <vscale x 32 x i16> %a
607 declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
613 define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
614 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
617 ; CHECK-NEXT: vsub.vv v8, v8, v9
620 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
621 <vscale x 1 x i32> undef,
622 <vscale x 1 x i32> %0,
623 <vscale x 1 x i32> %1,
626 ret <vscale x 1 x i32> %a
629 declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
636 define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
637 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
640 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
643 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
644 <vscale x 1 x i32> %0,
645 <vscale x 1 x i32> %1,
646 <vscale x 1 x i32> %2,
647 <vscale x 1 x i1> %3,
650 ret <vscale x 1 x i32> %a
653 declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
659 define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
660 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
663 ; CHECK-NEXT: vsub.vv v8, v8, v9
666 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
667 <vscale x 2 x i32> undef,
668 <vscale x 2 x i32> %0,
669 <vscale x 2 x i32> %1,
672 ret <vscale x 2 x i32> %a
675 declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
682 define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
683 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
684 ; CHECK: # %bb.0: # %entry
685 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
686 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
689 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
690 <vscale x 2 x i32> %0,
691 <vscale x 2 x i32> %1,
692 <vscale x 2 x i32> %2,
693 <vscale x 2 x i1> %3,
696 ret <vscale x 2 x i32> %a
699 declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
705 define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
706 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
709 ; CHECK-NEXT: vsub.vv v8, v8, v10
712 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
713 <vscale x 4 x i32> undef,
714 <vscale x 4 x i32> %0,
715 <vscale x 4 x i32> %1,
718 ret <vscale x 4 x i32> %a
721 declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
728 define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
729 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
732 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
735 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
736 <vscale x 4 x i32> %0,
737 <vscale x 4 x i32> %1,
738 <vscale x 4 x i32> %2,
739 <vscale x 4 x i1> %3,
742 ret <vscale x 4 x i32> %a
745 declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
751 define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
752 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32:
753 ; CHECK: # %bb.0: # %entry
754 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
755 ; CHECK-NEXT: vsub.vv v8, v8, v12
758 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
759 <vscale x 8 x i32> undef,
760 <vscale x 8 x i32> %0,
761 <vscale x 8 x i32> %1,
764 ret <vscale x 8 x i32> %a
767 declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
774 define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
775 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
778 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
781 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
782 <vscale x 8 x i32> %0,
783 <vscale x 8 x i32> %1,
784 <vscale x 8 x i32> %2,
785 <vscale x 8 x i1> %3,
788 ret <vscale x 8 x i32> %a
791 declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
797 define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
798 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
801 ; CHECK-NEXT: vsub.vv v8, v8, v16
804 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
805 <vscale x 16 x i32> undef,
806 <vscale x 16 x i32> %0,
807 <vscale x 16 x i32> %1,
810 ret <vscale x 16 x i32> %a
813 declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
820 define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
821 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
822 ; CHECK: # %bb.0: # %entry
823 ; CHECK-NEXT: vl8re32.v v24, (a0)
824 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
825 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
828 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
829 <vscale x 16 x i32> %0,
830 <vscale x 16 x i32> %1,
831 <vscale x 16 x i32> %2,
832 <vscale x 16 x i1> %3,
835 ret <vscale x 16 x i32> %a
838 declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
844 define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
848 ; CHECK-NEXT: vsub.vv v8, v8, v9
851 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
852 <vscale x 1 x i64> undef,
853 <vscale x 1 x i64> %0,
854 <vscale x 1 x i64> %1,
857 ret <vscale x 1 x i64> %a
860 declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
867 define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
868 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
871 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
874 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
875 <vscale x 1 x i64> %0,
876 <vscale x 1 x i64> %1,
877 <vscale x 1 x i64> %2,
878 <vscale x 1 x i1> %3,
881 ret <vscale x 1 x i64> %a
884 declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
890 define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
891 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
894 ; CHECK-NEXT: vsub.vv v8, v8, v10
897 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
898 <vscale x 2 x i64> undef,
899 <vscale x 2 x i64> %0,
900 <vscale x 2 x i64> %1,
903 ret <vscale x 2 x i64> %a
906 declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
913 define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
914 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
917 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
920 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
921 <vscale x 2 x i64> %0,
922 <vscale x 2 x i64> %1,
923 <vscale x 2 x i64> %2,
924 <vscale x 2 x i1> %3,
927 ret <vscale x 2 x i64> %a
930 declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
936 define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
937 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64:
938 ; CHECK: # %bb.0: # %entry
939 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
940 ; CHECK-NEXT: vsub.vv v8, v8, v12
943 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
944 <vscale x 4 x i64> undef,
945 <vscale x 4 x i64> %0,
946 <vscale x 4 x i64> %1,
949 ret <vscale x 4 x i64> %a
952 declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
959 define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
960 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
961 ; CHECK: # %bb.0: # %entry
962 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
963 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
966 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
967 <vscale x 4 x i64> %0,
968 <vscale x 4 x i64> %1,
969 <vscale x 4 x i64> %2,
970 <vscale x 4 x i1> %3,
973 ret <vscale x 4 x i64> %a
976 declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
982 define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
983 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
986 ; CHECK-NEXT: vsub.vv v8, v8, v16
989 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
990 <vscale x 8 x i64> undef,
991 <vscale x 8 x i64> %0,
992 <vscale x 8 x i64> %1,
995 ret <vscale x 8 x i64> %a
998 declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
1005 define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1006 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1007 ; CHECK: # %bb.0: # %entry
1008 ; CHECK-NEXT: vl8re64.v v24, (a0)
1009 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1010 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
1013 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
1014 <vscale x 8 x i64> %0,
1015 <vscale x 8 x i64> %1,
1016 <vscale x 8 x i64> %2,
1017 <vscale x 8 x i1> %3,
1020 ret <vscale x 8 x i64> %a
1023 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
1029 define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1030 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8:
1031 ; CHECK: # %bb.0: # %entry
1032 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1033 ; CHECK-NEXT: vsub.vx v8, v8, a0
1036 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
1037 <vscale x 1 x i8> undef,
1038 <vscale x 1 x i8> %0,
1042 ret <vscale x 1 x i8> %a
1045 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
1052 define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1053 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8:
1054 ; CHECK: # %bb.0: # %entry
1055 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1056 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1059 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
1060 <vscale x 1 x i8> %0,
1061 <vscale x 1 x i8> %1,
1063 <vscale x 1 x i1> %3,
1066 ret <vscale x 1 x i8> %a
1069 declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
1075 define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1076 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1079 ; CHECK-NEXT: vsub.vx v8, v8, a0
1082 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
1083 <vscale x 2 x i8> undef,
1084 <vscale x 2 x i8> %0,
1088 ret <vscale x 2 x i8> %a
1091 declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
1098 define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1099 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1102 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1105 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
1106 <vscale x 2 x i8> %0,
1107 <vscale x 2 x i8> %1,
1109 <vscale x 2 x i1> %3,
1112 ret <vscale x 2 x i8> %a
1115 declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
1121 define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1125 ; CHECK-NEXT: vsub.vx v8, v8, a0
1128 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
1129 <vscale x 4 x i8> undef,
1130 <vscale x 4 x i8> %0,
1134 ret <vscale x 4 x i8> %a
1137 declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
1144 define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1145 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1148 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1151 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
1152 <vscale x 4 x i8> %0,
1153 <vscale x 4 x i8> %1,
1155 <vscale x 4 x i1> %3,
1158 ret <vscale x 4 x i8> %a
1161 declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
1167 define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1168 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1171 ; CHECK-NEXT: vsub.vx v8, v8, a0
1174 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
1175 <vscale x 8 x i8> undef,
1176 <vscale x 8 x i8> %0,
1180 ret <vscale x 8 x i8> %a
1183 declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
1190 define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1191 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1194 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1197 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
1198 <vscale x 8 x i8> %0,
1199 <vscale x 8 x i8> %1,
1201 <vscale x 8 x i1> %3,
1204 ret <vscale x 8 x i8> %a
1207 declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
1213 define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1214 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8:
1215 ; CHECK: # %bb.0: # %entry
1216 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1217 ; CHECK-NEXT: vsub.vx v8, v8, a0
1220 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
1221 <vscale x 16 x i8> undef,
1222 <vscale x 16 x i8> %0,
1226 ret <vscale x 16 x i8> %a
1229 declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
1236 define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1237 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1240 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1243 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
1244 <vscale x 16 x i8> %0,
1245 <vscale x 16 x i8> %1,
1247 <vscale x 16 x i1> %3,
1250 ret <vscale x 16 x i8> %a
1253 declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
1259 define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1260 ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1263 ; CHECK-NEXT: vsub.vx v8, v8, a0
1266 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
1267 <vscale x 32 x i8> undef,
1268 <vscale x 32 x i8> %0,
1272 ret <vscale x 32 x i8> %a
1275 declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
1282 define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1283 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8:
1284 ; CHECK: # %bb.0: # %entry
1285 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1286 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1289 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
1290 <vscale x 32 x i8> %0,
1291 <vscale x 32 x i8> %1,
1293 <vscale x 32 x i1> %3,
1296 ret <vscale x 32 x i8> %a
1299 declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
1305 define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1306 ; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8:
1307 ; CHECK: # %bb.0: # %entry
1308 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1309 ; CHECK-NEXT: vsub.vx v8, v8, a0
1312 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
1313 <vscale x 64 x i8> undef,
1314 <vscale x 64 x i8> %0,
1318 ret <vscale x 64 x i8> %a
1321 declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
1328 define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1329 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8:
1330 ; CHECK: # %bb.0: # %entry
1331 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1332 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1335 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
1336 <vscale x 64 x i8> %0,
1337 <vscale x 64 x i8> %1,
1339 <vscale x 64 x i1> %3,
1342 ret <vscale x 64 x i8> %a
1345 declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
1351 define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1352 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16:
1353 ; CHECK: # %bb.0: # %entry
1354 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1355 ; CHECK-NEXT: vsub.vx v8, v8, a0
1358 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
1359 <vscale x 1 x i16> undef,
1360 <vscale x 1 x i16> %0,
1364 ret <vscale x 1 x i16> %a
1367 declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
1374 define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1375 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16:
1376 ; CHECK: # %bb.0: # %entry
1377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1378 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1381 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
1382 <vscale x 1 x i16> %0,
1383 <vscale x 1 x i16> %1,
1385 <vscale x 1 x i1> %3,
1388 ret <vscale x 1 x i16> %a
1391 declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
1397 define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1398 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16:
1399 ; CHECK: # %bb.0: # %entry
1400 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1401 ; CHECK-NEXT: vsub.vx v8, v8, a0
1404 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
1405 <vscale x 2 x i16> undef,
1406 <vscale x 2 x i16> %0,
1410 ret <vscale x 2 x i16> %a
1413 declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
1420 define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1421 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16:
1422 ; CHECK: # %bb.0: # %entry
1423 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1424 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1427 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
1428 <vscale x 2 x i16> %0,
1429 <vscale x 2 x i16> %1,
1431 <vscale x 2 x i1> %3,
1434 ret <vscale x 2 x i16> %a
1437 declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
1443 define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1444 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16:
1445 ; CHECK: # %bb.0: # %entry
1446 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1447 ; CHECK-NEXT: vsub.vx v8, v8, a0
1450 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
1451 <vscale x 4 x i16> undef,
1452 <vscale x 4 x i16> %0,
1456 ret <vscale x 4 x i16> %a
1459 declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
1466 define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1467 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1470 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1473 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
1474 <vscale x 4 x i16> %0,
1475 <vscale x 4 x i16> %1,
1477 <vscale x 4 x i1> %3,
1480 ret <vscale x 4 x i16> %a
1483 declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
1489 define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1490 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1493 ; CHECK-NEXT: vsub.vx v8, v8, a0
1496 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
1497 <vscale x 8 x i16> undef,
1498 <vscale x 8 x i16> %0,
1502 ret <vscale x 8 x i16> %a
1505 declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
1512 define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1513 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16:
1514 ; CHECK: # %bb.0: # %entry
1515 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1516 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1519 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
1520 <vscale x 8 x i16> %0,
1521 <vscale x 8 x i16> %1,
1523 <vscale x 8 x i1> %3,
1526 ret <vscale x 8 x i16> %a
1529 declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
1530 <vscale x 16 x i16>,
1531 <vscale x 16 x i16>,
1535 define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1536 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16:
1537 ; CHECK: # %bb.0: # %entry
1538 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1539 ; CHECK-NEXT: vsub.vx v8, v8, a0
1542 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
1543 <vscale x 16 x i16> undef,
1544 <vscale x 16 x i16> %0,
1548 ret <vscale x 16 x i16> %a
1551 declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
1552 <vscale x 16 x i16>,
1553 <vscale x 16 x i16>,
1558 define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1559 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16:
1560 ; CHECK: # %bb.0: # %entry
1561 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1562 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1565 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
1566 <vscale x 16 x i16> %0,
1567 <vscale x 16 x i16> %1,
1569 <vscale x 16 x i1> %3,
1572 ret <vscale x 16 x i16> %a
1575 declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
1576 <vscale x 32 x i16>,
1577 <vscale x 32 x i16>,
1581 define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1582 ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1585 ; CHECK-NEXT: vsub.vx v8, v8, a0
1588 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
1589 <vscale x 32 x i16> undef,
1590 <vscale x 32 x i16> %0,
1594 ret <vscale x 32 x i16> %a
1597 declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
1598 <vscale x 32 x i16>,
1599 <vscale x 32 x i16>,
1604 define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1605 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16:
1606 ; CHECK: # %bb.0: # %entry
1607 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1608 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1611 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
1612 <vscale x 32 x i16> %0,
1613 <vscale x 32 x i16> %1,
1615 <vscale x 32 x i1> %3,
1618 ret <vscale x 32 x i16> %a
1621 declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
1627 define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1628 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1631 ; CHECK-NEXT: vsub.vx v8, v8, a0
1634 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
1635 <vscale x 1 x i32> undef,
1636 <vscale x 1 x i32> %0,
1640 ret <vscale x 1 x i32> %a
1643 declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
1650 define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1651 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32:
1652 ; CHECK: # %bb.0: # %entry
1653 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1654 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1657 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
1658 <vscale x 1 x i32> %0,
1659 <vscale x 1 x i32> %1,
1661 <vscale x 1 x i1> %3,
1664 ret <vscale x 1 x i32> %a
1667 declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
1673 define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1674 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32:
1675 ; CHECK: # %bb.0: # %entry
1676 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1677 ; CHECK-NEXT: vsub.vx v8, v8, a0
1680 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
1681 <vscale x 2 x i32> undef,
1682 <vscale x 2 x i32> %0,
1686 ret <vscale x 2 x i32> %a
1689 declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
1696 define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1697 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1700 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1703 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
1704 <vscale x 2 x i32> %0,
1705 <vscale x 2 x i32> %1,
1707 <vscale x 2 x i1> %3,
1710 ret <vscale x 2 x i32> %a
1713 declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
1719 define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1720 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1723 ; CHECK-NEXT: vsub.vx v8, v8, a0
1726 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
1727 <vscale x 4 x i32> undef,
1728 <vscale x 4 x i32> %0,
1732 ret <vscale x 4 x i32> %a
1735 declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
1742 define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1743 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1746 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1749 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
1750 <vscale x 4 x i32> %0,
1751 <vscale x 4 x i32> %1,
1753 <vscale x 4 x i1> %3,
1756 ret <vscale x 4 x i32> %a
1759 declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
1765 define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1766 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32:
1767 ; CHECK: # %bb.0: # %entry
1768 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1769 ; CHECK-NEXT: vsub.vx v8, v8, a0
1772 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
1773 <vscale x 8 x i32> undef,
1774 <vscale x 8 x i32> %0,
1778 ret <vscale x 8 x i32> %a
1781 declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
1788 define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1789 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1792 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1795 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
1796 <vscale x 8 x i32> %0,
1797 <vscale x 8 x i32> %1,
1799 <vscale x 8 x i1> %3,
1802 ret <vscale x 8 x i32> %a
1805 declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
1806 <vscale x 16 x i32>,
1807 <vscale x 16 x i32>,
1811 define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1812 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32:
1813 ; CHECK: # %bb.0: # %entry
1814 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1815 ; CHECK-NEXT: vsub.vx v8, v8, a0
1818 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
1819 <vscale x 16 x i32> undef,
1820 <vscale x 16 x i32> %0,
1824 ret <vscale x 16 x i32> %a
1827 declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
1828 <vscale x 16 x i32>,
1829 <vscale x 16 x i32>,
1834 define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1835 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32:
1836 ; CHECK: # %bb.0: # %entry
1837 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1838 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1841 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
1842 <vscale x 16 x i32> %0,
1843 <vscale x 16 x i32> %1,
1845 <vscale x 16 x i1> %3,
1848 ret <vscale x 16 x i32> %a
1851 declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
1857 define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1858 ; RV32-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64:
1859 ; RV32: # %bb.0: # %entry
1860 ; RV32-NEXT: addi sp, sp, -16
1861 ; RV32-NEXT: sw a1, 12(sp)
1862 ; RV32-NEXT: sw a0, 8(sp)
1863 ; RV32-NEXT: addi a0, sp, 8
1864 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1865 ; RV32-NEXT: vlse64.v v9, (a0), zero
1866 ; RV32-NEXT: vsub.vv v8, v8, v9
1867 ; RV32-NEXT: addi sp, sp, 16
1870 ; RV64-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64:
1871 ; RV64: # %bb.0: # %entry
1872 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1873 ; RV64-NEXT: vsub.vx v8, v8, a0
1876 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
1877 <vscale x 1 x i64> undef,
1878 <vscale x 1 x i64> %0,
1882 ret <vscale x 1 x i64> %a
1885 declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
1892 define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1893 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64:
1894 ; RV32: # %bb.0: # %entry
1895 ; RV32-NEXT: addi sp, sp, -16
1896 ; RV32-NEXT: sw a1, 12(sp)
1897 ; RV32-NEXT: sw a0, 8(sp)
1898 ; RV32-NEXT: addi a0, sp, 8
1899 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1900 ; RV32-NEXT: vlse64.v v10, (a0), zero
1901 ; RV32-NEXT: vsub.vv v8, v9, v10, v0.t
1902 ; RV32-NEXT: addi sp, sp, 16
1905 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64:
1906 ; RV64: # %bb.0: # %entry
1907 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1908 ; RV64-NEXT: vsub.vx v8, v9, a0, v0.t
1911 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
1912 <vscale x 1 x i64> %0,
1913 <vscale x 1 x i64> %1,
1915 <vscale x 1 x i1> %3,
1918 ret <vscale x 1 x i64> %a
1921 declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
1927 define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1928 ; RV32-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64:
1929 ; RV32: # %bb.0: # %entry
1930 ; RV32-NEXT: addi sp, sp, -16
1931 ; RV32-NEXT: sw a1, 12(sp)
1932 ; RV32-NEXT: sw a0, 8(sp)
1933 ; RV32-NEXT: addi a0, sp, 8
1934 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1935 ; RV32-NEXT: vlse64.v v10, (a0), zero
1936 ; RV32-NEXT: vsub.vv v8, v8, v10
1937 ; RV32-NEXT: addi sp, sp, 16
1940 ; RV64-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64:
1941 ; RV64: # %bb.0: # %entry
1942 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1943 ; RV64-NEXT: vsub.vx v8, v8, a0
1946 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
1947 <vscale x 2 x i64> undef,
1948 <vscale x 2 x i64> %0,
1952 ret <vscale x 2 x i64> %a
1955 declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
1962 define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1963 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64:
1964 ; RV32: # %bb.0: # %entry
1965 ; RV32-NEXT: addi sp, sp, -16
1966 ; RV32-NEXT: sw a1, 12(sp)
1967 ; RV32-NEXT: sw a0, 8(sp)
1968 ; RV32-NEXT: addi a0, sp, 8
1969 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1970 ; RV32-NEXT: vlse64.v v12, (a0), zero
1971 ; RV32-NEXT: vsub.vv v8, v10, v12, v0.t
1972 ; RV32-NEXT: addi sp, sp, 16
1975 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64:
1976 ; RV64: # %bb.0: # %entry
1977 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1978 ; RV64-NEXT: vsub.vx v8, v10, a0, v0.t
1981 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
1982 <vscale x 2 x i64> %0,
1983 <vscale x 2 x i64> %1,
1985 <vscale x 2 x i1> %3,
1988 ret <vscale x 2 x i64> %a
1991 declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
1997 define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1998 ; RV32-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64:
1999 ; RV32: # %bb.0: # %entry
2000 ; RV32-NEXT: addi sp, sp, -16
2001 ; RV32-NEXT: sw a1, 12(sp)
2002 ; RV32-NEXT: sw a0, 8(sp)
2003 ; RV32-NEXT: addi a0, sp, 8
2004 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2005 ; RV32-NEXT: vlse64.v v12, (a0), zero
2006 ; RV32-NEXT: vsub.vv v8, v8, v12
2007 ; RV32-NEXT: addi sp, sp, 16
2010 ; RV64-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64:
2011 ; RV64: # %bb.0: # %entry
2012 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2013 ; RV64-NEXT: vsub.vx v8, v8, a0
2016 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
2017 <vscale x 4 x i64> undef,
2018 <vscale x 4 x i64> %0,
2022 ret <vscale x 4 x i64> %a
2025 declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
2032 define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2033 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64:
2034 ; RV32: # %bb.0: # %entry
2035 ; RV32-NEXT: addi sp, sp, -16
2036 ; RV32-NEXT: sw a1, 12(sp)
2037 ; RV32-NEXT: sw a0, 8(sp)
2038 ; RV32-NEXT: addi a0, sp, 8
2039 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2040 ; RV32-NEXT: vlse64.v v16, (a0), zero
2041 ; RV32-NEXT: vsub.vv v8, v12, v16, v0.t
2042 ; RV32-NEXT: addi sp, sp, 16
2045 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64:
2046 ; RV64: # %bb.0: # %entry
2047 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2048 ; RV64-NEXT: vsub.vx v8, v12, a0, v0.t
2051 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
2052 <vscale x 4 x i64> %0,
2053 <vscale x 4 x i64> %1,
2055 <vscale x 4 x i1> %3,
2058 ret <vscale x 4 x i64> %a
2061 declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
2067 define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2068 ; RV32-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64:
2069 ; RV32: # %bb.0: # %entry
2070 ; RV32-NEXT: addi sp, sp, -16
2071 ; RV32-NEXT: sw a1, 12(sp)
2072 ; RV32-NEXT: sw a0, 8(sp)
2073 ; RV32-NEXT: addi a0, sp, 8
2074 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2075 ; RV32-NEXT: vlse64.v v16, (a0), zero
2076 ; RV32-NEXT: vsub.vv v8, v8, v16
2077 ; RV32-NEXT: addi sp, sp, 16
2080 ; RV64-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64:
2081 ; RV64: # %bb.0: # %entry
2082 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2083 ; RV64-NEXT: vsub.vx v8, v8, a0
2086 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
2087 <vscale x 8 x i64> undef,
2088 <vscale x 8 x i64> %0,
2092 ret <vscale x 8 x i64> %a
2095 declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
2102 define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2103 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64:
2104 ; RV32: # %bb.0: # %entry
2105 ; RV32-NEXT: addi sp, sp, -16
2106 ; RV32-NEXT: sw a1, 12(sp)
2107 ; RV32-NEXT: sw a0, 8(sp)
2108 ; RV32-NEXT: addi a0, sp, 8
2109 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2110 ; RV32-NEXT: vlse64.v v24, (a0), zero
2111 ; RV32-NEXT: vsub.vv v8, v16, v24, v0.t
2112 ; RV32-NEXT: addi sp, sp, 16
2115 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64:
2116 ; RV64: # %bb.0: # %entry
2117 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2118 ; RV64-NEXT: vsub.vx v8, v16, a0, v0.t
2121 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
2122 <vscale x 8 x i64> %0,
2123 <vscale x 8 x i64> %1,
2125 <vscale x 8 x i1> %3,
2128 ret <vscale x 8 x i64> %a
2131 define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2132 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8:
2133 ; CHECK: # %bb.0: # %entry
2134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2135 ; CHECK-NEXT: vadd.vi v8, v8, -9
2138 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
2139 <vscale x 1 x i8> undef,
2140 <vscale x 1 x i8> %0,
2144 ret <vscale x 1 x i8> %a
2147 define <vscale x 1 x i8> @intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
2148 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8:
2149 ; CHECK: # %bb.0: # %entry
2150 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
2151 ; CHECK-NEXT: vadd.vi v8, v9, -9
2154 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
2155 <vscale x 1 x i8> %0,
2156 <vscale x 1 x i8> %1,
2160 ret <vscale x 1 x i8> %a
2163 define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2164 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
2165 ; CHECK: # %bb.0: # %entry
2166 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2167 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2170 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
2171 <vscale x 1 x i8> %0,
2172 <vscale x 1 x i8> %1,
2174 <vscale x 1 x i1> %2,
2177 ret <vscale x 1 x i8> %a
2180 define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2181 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8:
2182 ; CHECK: # %bb.0: # %entry
2183 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2184 ; CHECK-NEXT: vadd.vi v8, v8, -9
2187 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
2188 <vscale x 2 x i8> undef,
2189 <vscale x 2 x i8> %0,
2193 ret <vscale x 2 x i8> %a
2196 define <vscale x 2 x i8> @intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
2197 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8:
2198 ; CHECK: # %bb.0: # %entry
2199 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
2200 ; CHECK-NEXT: vadd.vi v8, v9, -9
2203 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
2204 <vscale x 2 x i8> %0,
2205 <vscale x 2 x i8> %1,
2209 ret <vscale x 2 x i8> %a
2212 define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2213 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8:
2214 ; CHECK: # %bb.0: # %entry
2215 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2216 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2219 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
2220 <vscale x 2 x i8> %0,
2221 <vscale x 2 x i8> %1,
2223 <vscale x 2 x i1> %2,
2226 ret <vscale x 2 x i8> %a
2229 define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2230 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8:
2231 ; CHECK: # %bb.0: # %entry
2232 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2233 ; CHECK-NEXT: vadd.vi v8, v8, -9
2236 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
2237 <vscale x 4 x i8> undef,
2238 <vscale x 4 x i8> %0,
2242 ret <vscale x 4 x i8> %a
2245 define <vscale x 4 x i8> @intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
2246 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8:
2247 ; CHECK: # %bb.0: # %entry
2248 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
2249 ; CHECK-NEXT: vadd.vi v8, v9, -9
2252 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
2253 <vscale x 4 x i8> %0,
2254 <vscale x 4 x i8> %1,
2258 ret <vscale x 4 x i8> %a
2261 define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2262 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8:
2263 ; CHECK: # %bb.0: # %entry
2264 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2265 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2268 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
2269 <vscale x 4 x i8> %0,
2270 <vscale x 4 x i8> %1,
2272 <vscale x 4 x i1> %2,
2275 ret <vscale x 4 x i8> %a
2278 define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2279 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8:
2280 ; CHECK: # %bb.0: # %entry
2281 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2282 ; CHECK-NEXT: vadd.vi v8, v8, -9
2285 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
2286 <vscale x 8 x i8> undef,
2287 <vscale x 8 x i8> %0,
2291 ret <vscale x 8 x i8> %a
2294 define <vscale x 8 x i8> @intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
2295 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8:
2296 ; CHECK: # %bb.0: # %entry
2297 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
2298 ; CHECK-NEXT: vadd.vi v8, v9, -9
2301 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
2302 <vscale x 8 x i8> %0,
2303 <vscale x 8 x i8> %1,
2307 ret <vscale x 8 x i8> %a
2310 define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2311 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8:
2312 ; CHECK: # %bb.0: # %entry
2313 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2314 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2317 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
2318 <vscale x 8 x i8> %0,
2319 <vscale x 8 x i8> %1,
2321 <vscale x 8 x i1> %2,
2324 ret <vscale x 8 x i8> %a
2327 define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2328 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8:
2329 ; CHECK: # %bb.0: # %entry
2330 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2331 ; CHECK-NEXT: vadd.vi v8, v8, -9
2334 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
2335 <vscale x 16 x i8> undef,
2336 <vscale x 16 x i8> %0,
2340 ret <vscale x 16 x i8> %a
2343 define <vscale x 16 x i8> @intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
2344 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8:
2345 ; CHECK: # %bb.0: # %entry
2346 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
2347 ; CHECK-NEXT: vadd.vi v8, v10, -9
2350 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
2351 <vscale x 16 x i8> %0,
2352 <vscale x 16 x i8> %1,
2356 ret <vscale x 16 x i8> %a
2359 define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2360 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8:
2361 ; CHECK: # %bb.0: # %entry
2362 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2363 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2366 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
2367 <vscale x 16 x i8> %0,
2368 <vscale x 16 x i8> %1,
2370 <vscale x 16 x i1> %2,
2373 ret <vscale x 16 x i8> %a
2376 define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2377 ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8:
2378 ; CHECK: # %bb.0: # %entry
2379 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2380 ; CHECK-NEXT: vadd.vi v8, v8, -9
2383 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
2384 <vscale x 32 x i8> undef,
2385 <vscale x 32 x i8> %0,
2389 ret <vscale x 32 x i8> %a
2392 define <vscale x 32 x i8> @intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
2393 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8:
2394 ; CHECK: # %bb.0: # %entry
2395 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
2396 ; CHECK-NEXT: vadd.vi v8, v12, -9
2399 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
2400 <vscale x 32 x i8> %0,
2401 <vscale x 32 x i8> %1,
2405 ret <vscale x 32 x i8> %a
2408 define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2409 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8:
2410 ; CHECK: # %bb.0: # %entry
2411 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2412 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2415 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
2416 <vscale x 32 x i8> %0,
2417 <vscale x 32 x i8> %1,
2419 <vscale x 32 x i1> %2,
2422 ret <vscale x 32 x i8> %a
2425 define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2426 ; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2429 ; CHECK-NEXT: vadd.vi v8, v8, 9
2432 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
2433 <vscale x 64 x i8> undef,
2434 <vscale x 64 x i8> %0,
2438 ret <vscale x 64 x i8> %a
2441 define <vscale x 64 x i8> @intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
2442 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8:
2443 ; CHECK: # %bb.0: # %entry
2444 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
2445 ; CHECK-NEXT: vadd.vi v8, v16, -9
2448 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
2449 <vscale x 64 x i8> %0,
2450 <vscale x 64 x i8> %1,
2454 ret <vscale x 64 x i8> %a
2457 define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2458 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8:
2459 ; CHECK: # %bb.0: # %entry
2460 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2461 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2464 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
2465 <vscale x 64 x i8> %0,
2466 <vscale x 64 x i8> %1,
2468 <vscale x 64 x i1> %2,
2471 ret <vscale x 64 x i8> %a
2474 define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2475 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16:
2476 ; CHECK: # %bb.0: # %entry
2477 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2478 ; CHECK-NEXT: vadd.vi v8, v8, -9
2481 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
2482 <vscale x 1 x i16> undef,
2483 <vscale x 1 x i16> %0,
2487 ret <vscale x 1 x i16> %a
2490 define <vscale x 1 x i16> @intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2491 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16:
2492 ; CHECK: # %bb.0: # %entry
2493 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2494 ; CHECK-NEXT: vadd.vi v8, v9, -9
2497 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
2498 <vscale x 1 x i16> %0,
2499 <vscale x 1 x i16> %1,
2503 ret <vscale x 1 x i16> %a
2506 define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2507 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16:
2508 ; CHECK: # %bb.0: # %entry
2509 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2510 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2513 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
2514 <vscale x 1 x i16> %0,
2515 <vscale x 1 x i16> %1,
2517 <vscale x 1 x i1> %2,
2520 ret <vscale x 1 x i16> %a
2523 define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2524 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16:
2525 ; CHECK: # %bb.0: # %entry
2526 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2527 ; CHECK-NEXT: vadd.vi v8, v8, -9
2530 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
2531 <vscale x 2 x i16> undef,
2532 <vscale x 2 x i16> %0,
2536 ret <vscale x 2 x i16> %a
2539 define <vscale x 2 x i16> @intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2540 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16:
2541 ; CHECK: # %bb.0: # %entry
2542 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2543 ; CHECK-NEXT: vadd.vi v8, v9, -9
2546 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
2547 <vscale x 2 x i16> %0,
2548 <vscale x 2 x i16> %1,
2552 ret <vscale x 2 x i16> %a
2555 define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2556 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16:
2557 ; CHECK: # %bb.0: # %entry
2558 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2559 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2562 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
2563 <vscale x 2 x i16> %0,
2564 <vscale x 2 x i16> %1,
2566 <vscale x 2 x i1> %2,
2569 ret <vscale x 2 x i16> %a
2572 define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2573 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16:
2574 ; CHECK: # %bb.0: # %entry
2575 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2576 ; CHECK-NEXT: vadd.vi v8, v8, -9
2579 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
2580 <vscale x 4 x i16> undef,
2581 <vscale x 4 x i16> %0,
2585 ret <vscale x 4 x i16> %a
2588 define <vscale x 4 x i16> @intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2589 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16:
2590 ; CHECK: # %bb.0: # %entry
2591 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2592 ; CHECK-NEXT: vadd.vi v8, v9, -9
2595 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
2596 <vscale x 4 x i16> %0,
2597 <vscale x 4 x i16> %1,
2601 ret <vscale x 4 x i16> %a
2604 define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2605 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16:
2606 ; CHECK: # %bb.0: # %entry
2607 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2608 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2611 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
2612 <vscale x 4 x i16> %0,
2613 <vscale x 4 x i16> %1,
2615 <vscale x 4 x i1> %2,
2618 ret <vscale x 4 x i16> %a
2621 define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2622 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16:
2623 ; CHECK: # %bb.0: # %entry
2624 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2625 ; CHECK-NEXT: vadd.vi v8, v8, -9
2628 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
2629 <vscale x 8 x i16> undef,
2630 <vscale x 8 x i16> %0,
2634 ret <vscale x 8 x i16> %a
2637 define <vscale x 8 x i16> @intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2638 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16:
2639 ; CHECK: # %bb.0: # %entry
2640 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2641 ; CHECK-NEXT: vadd.vi v8, v10, -9
2644 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
2645 <vscale x 8 x i16> %0,
2646 <vscale x 8 x i16> %1,
2650 ret <vscale x 8 x i16> %a
2653 define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2654 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16:
2655 ; CHECK: # %bb.0: # %entry
2656 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2657 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2660 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
2661 <vscale x 8 x i16> %0,
2662 <vscale x 8 x i16> %1,
2664 <vscale x 8 x i1> %2,
2667 ret <vscale x 8 x i16> %a
2670 define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2671 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16:
2672 ; CHECK: # %bb.0: # %entry
2673 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2674 ; CHECK-NEXT: vadd.vi v8, v8, -9
2677 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
2678 <vscale x 16 x i16> undef,
2679 <vscale x 16 x i16> %0,
2683 ret <vscale x 16 x i16> %a
2686 define <vscale x 16 x i16> @intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2687 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16:
2688 ; CHECK: # %bb.0: # %entry
2689 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2690 ; CHECK-NEXT: vadd.vi v8, v12, -9
2693 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
2694 <vscale x 16 x i16> %0,
2695 <vscale x 16 x i16> %1,
2699 ret <vscale x 16 x i16> %a
2702 define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2703 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16:
2704 ; CHECK: # %bb.0: # %entry
2705 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2706 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2709 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
2710 <vscale x 16 x i16> %0,
2711 <vscale x 16 x i16> %1,
2713 <vscale x 16 x i1> %2,
2716 ret <vscale x 16 x i16> %a
2719 define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2720 ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16:
2721 ; CHECK: # %bb.0: # %entry
2722 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2723 ; CHECK-NEXT: vadd.vi v8, v8, -9
2726 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
2727 <vscale x 32 x i16> undef,
2728 <vscale x 32 x i16> %0,
2732 ret <vscale x 32 x i16> %a
2735 define <vscale x 32 x i16> @intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2736 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
2739 ; CHECK-NEXT: vadd.vi v8, v16, -9
2742 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
2743 <vscale x 32 x i16> %0,
2744 <vscale x 32 x i16> %1,
2748 ret <vscale x 32 x i16> %a
2751 define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2752 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16:
2753 ; CHECK: # %bb.0: # %entry
2754 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2755 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2758 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
2759 <vscale x 32 x i16> %0,
2760 <vscale x 32 x i16> %1,
2762 <vscale x 32 x i1> %2,
2765 ret <vscale x 32 x i16> %a
2768 define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2769 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32:
2770 ; CHECK: # %bb.0: # %entry
2771 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2772 ; CHECK-NEXT: vadd.vi v8, v8, -9
2775 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
2776 <vscale x 1 x i32> undef,
2777 <vscale x 1 x i32> %0,
2781 ret <vscale x 1 x i32> %a
2784 define <vscale x 1 x i32> @intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2785 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32:
2786 ; CHECK: # %bb.0: # %entry
2787 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2788 ; CHECK-NEXT: vadd.vi v8, v9, -9
2791 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
2792 <vscale x 1 x i32> %0,
2793 <vscale x 1 x i32> %1,
2797 ret <vscale x 1 x i32> %a
2800 define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2801 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32:
2802 ; CHECK: # %bb.0: # %entry
2803 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2804 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2807 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
2808 <vscale x 1 x i32> %0,
2809 <vscale x 1 x i32> %1,
2811 <vscale x 1 x i1> %2,
2814 ret <vscale x 1 x i32> %a
2817 define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2818 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32:
2819 ; CHECK: # %bb.0: # %entry
2820 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2821 ; CHECK-NEXT: vadd.vi v8, v8, -9
2824 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
2825 <vscale x 2 x i32> undef,
2826 <vscale x 2 x i32> %0,
2830 ret <vscale x 2 x i32> %a
2833 define <vscale x 2 x i32> @intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
2834 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32:
2835 ; CHECK: # %bb.0: # %entry
2836 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2837 ; CHECK-NEXT: vadd.vi v8, v9, -9
2840 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
2841 <vscale x 2 x i32> %0,
2842 <vscale x 2 x i32> %1,
2846 ret <vscale x 2 x i32> %a
2849 define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2850 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32:
2851 ; CHECK: # %bb.0: # %entry
2852 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2853 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2856 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
2857 <vscale x 2 x i32> %0,
2858 <vscale x 2 x i32> %1,
2860 <vscale x 2 x i1> %2,
2863 ret <vscale x 2 x i32> %a
2866 define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2867 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32:
2868 ; CHECK: # %bb.0: # %entry
2869 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2870 ; CHECK-NEXT: vadd.vi v8, v8, -9
2873 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
2874 <vscale x 4 x i32> undef,
2875 <vscale x 4 x i32> %0,
2879 ret <vscale x 4 x i32> %a
2882 define <vscale x 4 x i32> @intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
2883 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32:
2884 ; CHECK: # %bb.0: # %entry
2885 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2886 ; CHECK-NEXT: vadd.vi v8, v10, -9
2889 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
2890 <vscale x 4 x i32> %0,
2891 <vscale x 4 x i32> %1,
2895 ret <vscale x 4 x i32> %a
2898 define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2899 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32:
2900 ; CHECK: # %bb.0: # %entry
2901 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2902 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2905 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
2906 <vscale x 4 x i32> %0,
2907 <vscale x 4 x i32> %1,
2909 <vscale x 4 x i1> %2,
2912 ret <vscale x 4 x i32> %a
2915 define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2916 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32:
2917 ; CHECK: # %bb.0: # %entry
2918 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2919 ; CHECK-NEXT: vadd.vi v8, v8, -9
2922 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
2923 <vscale x 8 x i32> undef,
2924 <vscale x 8 x i32> %0,
2928 ret <vscale x 8 x i32> %a
2931 define <vscale x 8 x i32> @intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
2932 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32:
2933 ; CHECK: # %bb.0: # %entry
2934 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2935 ; CHECK-NEXT: vadd.vi v8, v12, -9
2938 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
2939 <vscale x 8 x i32> %0,
2940 <vscale x 8 x i32> %1,
2944 ret <vscale x 8 x i32> %a
2947 define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2948 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32:
2949 ; CHECK: # %bb.0: # %entry
2950 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2951 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2954 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
2955 <vscale x 8 x i32> %0,
2956 <vscale x 8 x i32> %1,
2958 <vscale x 8 x i1> %2,
2961 ret <vscale x 8 x i32> %a
2964 define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2965 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32:
2966 ; CHECK: # %bb.0: # %entry
2967 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2968 ; CHECK-NEXT: vadd.vi v8, v8, -9
2971 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
2972 <vscale x 16 x i32> undef,
2973 <vscale x 16 x i32> %0,
2977 ret <vscale x 16 x i32> %a
2980 define <vscale x 16 x i32> @intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
2981 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32:
2982 ; CHECK: # %bb.0: # %entry
2983 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
2984 ; CHECK-NEXT: vadd.vi v8, v16, -9
2987 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
2988 <vscale x 16 x i32> %0,
2989 <vscale x 16 x i32> %1,
2993 ret <vscale x 16 x i32> %a
2996 define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2997 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32:
2998 ; CHECK: # %bb.0: # %entry
2999 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
3000 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
3003 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
3004 <vscale x 16 x i32> %0,
3005 <vscale x 16 x i32> %1,
3007 <vscale x 16 x i1> %2,
3010 ret <vscale x 16 x i32> %a
3013 define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
3014 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64:
3015 ; CHECK: # %bb.0: # %entry
3016 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3017 ; CHECK-NEXT: vadd.vi v8, v8, -9
3020 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
3021 <vscale x 1 x i64> undef,
3022 <vscale x 1 x i64> %0,
3026 ret <vscale x 1 x i64> %a
3029 define <vscale x 1 x i64> @intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
3030 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64:
3031 ; CHECK: # %bb.0: # %entry
3032 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
3033 ; CHECK-NEXT: vadd.vi v8, v9, -9
3036 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
3037 <vscale x 1 x i64> %0,
3038 <vscale x 1 x i64> %1,
3042 ret <vscale x 1 x i64> %a
3045 define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3046 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64:
3047 ; CHECK: # %bb.0: # %entry
3048 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
3049 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
3052 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
3053 <vscale x 1 x i64> %0,
3054 <vscale x 1 x i64> %1,
3056 <vscale x 1 x i1> %2,
3059 ret <vscale x 1 x i64> %a
3062 define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
3063 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64:
3064 ; CHECK: # %bb.0: # %entry
3065 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3066 ; CHECK-NEXT: vadd.vi v8, v8, -9
3069 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
3070 <vscale x 2 x i64> undef,
3071 <vscale x 2 x i64> %0,
3075 ret <vscale x 2 x i64> %a
3078 define <vscale x 2 x i64> @intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
3079 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64:
3080 ; CHECK: # %bb.0: # %entry
3081 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
3082 ; CHECK-NEXT: vadd.vi v8, v10, -9
3085 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
3086 <vscale x 2 x i64> %0,
3087 <vscale x 2 x i64> %1,
3091 ret <vscale x 2 x i64> %a
3094 define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
3095 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64:
3096 ; CHECK: # %bb.0: # %entry
3097 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
3098 ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
3101 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
3102 <vscale x 2 x i64> %0,
3103 <vscale x 2 x i64> %1,
3105 <vscale x 2 x i1> %2,
3108 ret <vscale x 2 x i64> %a
3111 define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
3112 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64:
3113 ; CHECK: # %bb.0: # %entry
3114 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3115 ; CHECK-NEXT: vadd.vi v8, v8, -9
3118 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
3119 <vscale x 4 x i64> undef,
3120 <vscale x 4 x i64> %0,
3124 ret <vscale x 4 x i64> %a
3127 define <vscale x 4 x i64> @intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
3128 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64:
3129 ; CHECK: # %bb.0: # %entry
3130 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
3131 ; CHECK-NEXT: vadd.vi v8, v12, -9
3134 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
3135 <vscale x 4 x i64> %0,
3136 <vscale x 4 x i64> %1,
3140 ret <vscale x 4 x i64> %a
3143 define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
3144 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64:
3145 ; CHECK: # %bb.0: # %entry
3146 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
3147 ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
3150 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
3151 <vscale x 4 x i64> %0,
3152 <vscale x 4 x i64> %1,
3154 <vscale x 4 x i1> %2,
3157 ret <vscale x 4 x i64> %a
3160 define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
3161 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64:
3162 ; CHECK: # %bb.0: # %entry
3163 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3164 ; CHECK-NEXT: vadd.vi v8, v8, -9
3167 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
3168 <vscale x 8 x i64> undef,
3169 <vscale x 8 x i64> %0,
3173 ret <vscale x 8 x i64> %a
3176 define <vscale x 8 x i64> @intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
3177 ; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64:
3178 ; CHECK: # %bb.0: # %entry
3179 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
3180 ; CHECK-NEXT: vadd.vi v8, v16, -9
3183 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
3184 <vscale x 8 x i64> %0,
3185 <vscale x 8 x i64> %1,
3189 ret <vscale x 8 x i64> %a
3192 define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
3193 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64:
3194 ; CHECK: # %bb.0: # %entry
3195 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
3196 ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
3199 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
3200 <vscale x 8 x i64> %0,
3201 <vscale x 8 x i64> %1,
3203 <vscale x 8 x i1> %2,
3206 ret <vscale x 8 x i64> %a