1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
13 define <vscale x 1 x i8> @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vsub.vv v8, v8, v9
20 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
36 define <vscale x 1 x i8> @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
40 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
59 define <vscale x 2 x i8> @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
63 ; CHECK-NEXT: vsub.vv v8, v8, v9
66 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8(
67 <vscale x 2 x i8> undef,
72 ret <vscale x 2 x i8> %a
75 declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
82 define <vscale x 2 x i8> @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
86 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
89 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8(
96 ret <vscale x 2 x i8> %a
99 declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
105 define <vscale x 4 x i8> @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
109 ; CHECK-NEXT: vsub.vv v8, v8, v9
112 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8(
113 <vscale x 4 x i8> undef,
114 <vscale x 4 x i8> %0,
115 <vscale x 4 x i8> %1,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
128 define <vscale x 4 x i8> @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
132 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
135 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8(
136 <vscale x 4 x i8> %0,
137 <vscale x 4 x i8> %1,
138 <vscale x 4 x i8> %2,
139 <vscale x 4 x i1> %3,
142 ret <vscale x 4 x i8> %a
145 declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
151 define <vscale x 8 x i8> @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
152 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
155 ; CHECK-NEXT: vsub.vv v8, v8, v9
158 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8(
159 <vscale x 8 x i8> undef,
160 <vscale x 8 x i8> %0,
161 <vscale x 8 x i8> %1,
164 ret <vscale x 8 x i8> %a
167 declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
174 define <vscale x 8 x i8> @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
178 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
181 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8> %1,
184 <vscale x 8 x i8> %2,
185 <vscale x 8 x i1> %3,
188 ret <vscale x 8 x i8> %a
191 declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
197 define <vscale x 16 x i8> @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
198 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
201 ; CHECK-NEXT: vsub.vv v8, v8, v10
204 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8(
205 <vscale x 16 x i8> undef,
206 <vscale x 16 x i8> %0,
207 <vscale x 16 x i8> %1,
210 ret <vscale x 16 x i8> %a
213 declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
220 define <vscale x 16 x i8> @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
224 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
227 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
230 <vscale x 16 x i8> %2,
231 <vscale x 16 x i1> %3,
234 ret <vscale x 16 x i8> %a
237 declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
243 define <vscale x 32 x i8> @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
247 ; CHECK-NEXT: vsub.vv v8, v8, v12
250 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8(
251 <vscale x 32 x i8> undef,
252 <vscale x 32 x i8> %0,
253 <vscale x 32 x i8> %1,
256 ret <vscale x 32 x i8> %a
259 declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
266 define <vscale x 32 x i8> @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
270 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
273 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8(
274 <vscale x 32 x i8> %0,
275 <vscale x 32 x i8> %1,
276 <vscale x 32 x i8> %2,
277 <vscale x 32 x i1> %3,
280 ret <vscale x 32 x i8> %a
283 declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
289 define <vscale x 64 x i8> @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
290 ; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
293 ; CHECK-NEXT: vsub.vv v8, v8, v16
296 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8(
297 <vscale x 64 x i8> undef,
298 <vscale x 64 x i8> %0,
299 <vscale x 64 x i8> %1,
302 ret <vscale x 64 x i8> %a
305 declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
312 define <vscale x 64 x i8> @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vl8r.v v24, (a0)
316 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
317 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
320 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8(
321 <vscale x 64 x i8> %0,
322 <vscale x 64 x i8> %1,
323 <vscale x 64 x i8> %2,
324 <vscale x 64 x i1> %3,
327 ret <vscale x 64 x i8> %a
330 declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
336 define <vscale x 1 x i16> @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
337 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
340 ; CHECK-NEXT: vsub.vv v8, v8, v9
343 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16(
344 <vscale x 1 x i16> undef,
345 <vscale x 1 x i16> %0,
346 <vscale x 1 x i16> %1,
349 ret <vscale x 1 x i16> %a
352 declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
359 define <vscale x 1 x i16> @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
360 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
361 ; CHECK: # %bb.0: # %entry
362 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
363 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
366 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16(
367 <vscale x 1 x i16> %0,
368 <vscale x 1 x i16> %1,
369 <vscale x 1 x i16> %2,
370 <vscale x 1 x i1> %3,
373 ret <vscale x 1 x i16> %a
376 declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
382 define <vscale x 2 x i16> @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
383 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16:
384 ; CHECK: # %bb.0: # %entry
385 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
386 ; CHECK-NEXT: vsub.vv v8, v8, v9
389 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16(
390 <vscale x 2 x i16> undef,
391 <vscale x 2 x i16> %0,
392 <vscale x 2 x i16> %1,
395 ret <vscale x 2 x i16> %a
398 declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
405 define <vscale x 2 x i16> @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
406 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
407 ; CHECK: # %bb.0: # %entry
408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
409 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
412 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16(
413 <vscale x 2 x i16> %0,
414 <vscale x 2 x i16> %1,
415 <vscale x 2 x i16> %2,
416 <vscale x 2 x i1> %3,
419 ret <vscale x 2 x i16> %a
422 declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
428 define <vscale x 4 x i16> @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
429 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
432 ; CHECK-NEXT: vsub.vv v8, v8, v9
435 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16(
436 <vscale x 4 x i16> undef,
437 <vscale x 4 x i16> %0,
438 <vscale x 4 x i16> %1,
441 ret <vscale x 4 x i16> %a
444 declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
451 define <vscale x 4 x i16> @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
452 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
453 ; CHECK: # %bb.0: # %entry
454 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
455 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
458 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16(
459 <vscale x 4 x i16> %0,
460 <vscale x 4 x i16> %1,
461 <vscale x 4 x i16> %2,
462 <vscale x 4 x i1> %3,
465 ret <vscale x 4 x i16> %a
468 declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
474 define <vscale x 8 x i16> @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
475 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
478 ; CHECK-NEXT: vsub.vv v8, v8, v10
481 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16(
482 <vscale x 8 x i16> undef,
483 <vscale x 8 x i16> %0,
484 <vscale x 8 x i16> %1,
487 ret <vscale x 8 x i16> %a
490 declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
497 define <vscale x 8 x i16> @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
498 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
499 ; CHECK: # %bb.0: # %entry
500 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
501 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
504 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16(
505 <vscale x 8 x i16> %0,
506 <vscale x 8 x i16> %1,
507 <vscale x 8 x i16> %2,
508 <vscale x 8 x i1> %3,
511 ret <vscale x 8 x i16> %a
514 declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
520 define <vscale x 16 x i16> @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
521 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
524 ; CHECK-NEXT: vsub.vv v8, v8, v12
527 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16(
528 <vscale x 16 x i16> undef,
529 <vscale x 16 x i16> %0,
530 <vscale x 16 x i16> %1,
533 ret <vscale x 16 x i16> %a
536 declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
543 define <vscale x 16 x i16> @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
544 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
545 ; CHECK: # %bb.0: # %entry
546 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
547 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
550 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16(
551 <vscale x 16 x i16> %0,
552 <vscale x 16 x i16> %1,
553 <vscale x 16 x i16> %2,
554 <vscale x 16 x i1> %3,
557 ret <vscale x 16 x i16> %a
560 declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
566 define <vscale x 32 x i16> @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
567 ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
570 ; CHECK-NEXT: vsub.vv v8, v8, v16
573 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16(
574 <vscale x 32 x i16> undef,
575 <vscale x 32 x i16> %0,
576 <vscale x 32 x i16> %1,
579 ret <vscale x 32 x i16> %a
582 declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
589 define <vscale x 32 x i16> @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
590 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
591 ; CHECK: # %bb.0: # %entry
592 ; CHECK-NEXT: vl8re16.v v24, (a0)
593 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
594 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
597 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16(
598 <vscale x 32 x i16> %0,
599 <vscale x 32 x i16> %1,
600 <vscale x 32 x i16> %2,
601 <vscale x 32 x i1> %3,
604 ret <vscale x 32 x i16> %a
607 declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
613 define <vscale x 1 x i32> @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
614 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
617 ; CHECK-NEXT: vsub.vv v8, v8, v9
620 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32(
621 <vscale x 1 x i32> undef,
622 <vscale x 1 x i32> %0,
623 <vscale x 1 x i32> %1,
626 ret <vscale x 1 x i32> %a
629 declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
636 define <vscale x 1 x i32> @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
637 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
638 ; CHECK: # %bb.0: # %entry
639 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
640 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
643 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32(
644 <vscale x 1 x i32> %0,
645 <vscale x 1 x i32> %1,
646 <vscale x 1 x i32> %2,
647 <vscale x 1 x i1> %3,
650 ret <vscale x 1 x i32> %a
653 declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
659 define <vscale x 2 x i32> @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
660 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
663 ; CHECK-NEXT: vsub.vv v8, v8, v9
666 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32(
667 <vscale x 2 x i32> undef,
668 <vscale x 2 x i32> %0,
669 <vscale x 2 x i32> %1,
672 ret <vscale x 2 x i32> %a
675 declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
682 define <vscale x 2 x i32> @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
683 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
684 ; CHECK: # %bb.0: # %entry
685 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
686 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
689 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32(
690 <vscale x 2 x i32> %0,
691 <vscale x 2 x i32> %1,
692 <vscale x 2 x i32> %2,
693 <vscale x 2 x i1> %3,
696 ret <vscale x 2 x i32> %a
699 declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
705 define <vscale x 4 x i32> @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
706 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
709 ; CHECK-NEXT: vsub.vv v8, v8, v10
712 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(
713 <vscale x 4 x i32> undef,
714 <vscale x 4 x i32> %0,
715 <vscale x 4 x i32> %1,
718 ret <vscale x 4 x i32> %a
721 declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
728 define <vscale x 4 x i32> @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
729 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
730 ; CHECK: # %bb.0: # %entry
731 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
732 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
735 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32(
736 <vscale x 4 x i32> %0,
737 <vscale x 4 x i32> %1,
738 <vscale x 4 x i32> %2,
739 <vscale x 4 x i1> %3,
742 ret <vscale x 4 x i32> %a
745 declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
751 define <vscale x 8 x i32> @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
752 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32:
753 ; CHECK: # %bb.0: # %entry
754 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
755 ; CHECK-NEXT: vsub.vv v8, v8, v12
758 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32(
759 <vscale x 8 x i32> undef,
760 <vscale x 8 x i32> %0,
761 <vscale x 8 x i32> %1,
764 ret <vscale x 8 x i32> %a
767 declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
774 define <vscale x 8 x i32> @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
775 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
778 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
781 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32(
782 <vscale x 8 x i32> %0,
783 <vscale x 8 x i32> %1,
784 <vscale x 8 x i32> %2,
785 <vscale x 8 x i1> %3,
788 ret <vscale x 8 x i32> %a
791 declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
797 define <vscale x 16 x i32> @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
798 ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
801 ; CHECK-NEXT: vsub.vv v8, v8, v16
804 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32(
805 <vscale x 16 x i32> undef,
806 <vscale x 16 x i32> %0,
807 <vscale x 16 x i32> %1,
810 ret <vscale x 16 x i32> %a
813 declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
820 define <vscale x 16 x i32> @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
821 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
822 ; CHECK: # %bb.0: # %entry
823 ; CHECK-NEXT: vl8re32.v v24, (a0)
824 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
825 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
828 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32(
829 <vscale x 16 x i32> %0,
830 <vscale x 16 x i32> %1,
831 <vscale x 16 x i32> %2,
832 <vscale x 16 x i1> %3,
835 ret <vscale x 16 x i32> %a
838 declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
844 define <vscale x 1 x i64> @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
845 ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64:
846 ; CHECK: # %bb.0: # %entry
847 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
848 ; CHECK-NEXT: vsub.vv v8, v8, v9
851 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64(
852 <vscale x 1 x i64> undef,
853 <vscale x 1 x i64> %0,
854 <vscale x 1 x i64> %1,
857 ret <vscale x 1 x i64> %a
860 declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
867 define <vscale x 1 x i64> @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
868 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
871 ; CHECK-NEXT: vsub.vv v8, v9, v10, v0.t
874 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64(
875 <vscale x 1 x i64> %0,
876 <vscale x 1 x i64> %1,
877 <vscale x 1 x i64> %2,
878 <vscale x 1 x i1> %3,
881 ret <vscale x 1 x i64> %a
884 declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
890 define <vscale x 2 x i64> @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
891 ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
894 ; CHECK-NEXT: vsub.vv v8, v8, v10
897 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64(
898 <vscale x 2 x i64> undef,
899 <vscale x 2 x i64> %0,
900 <vscale x 2 x i64> %1,
903 ret <vscale x 2 x i64> %a
906 declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
913 define <vscale x 2 x i64> @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
914 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
917 ; CHECK-NEXT: vsub.vv v8, v10, v12, v0.t
920 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64(
921 <vscale x 2 x i64> %0,
922 <vscale x 2 x i64> %1,
923 <vscale x 2 x i64> %2,
924 <vscale x 2 x i1> %3,
927 ret <vscale x 2 x i64> %a
930 declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
936 define <vscale x 4 x i64> @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
937 ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64:
938 ; CHECK: # %bb.0: # %entry
939 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
940 ; CHECK-NEXT: vsub.vv v8, v8, v12
943 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64(
944 <vscale x 4 x i64> undef,
945 <vscale x 4 x i64> %0,
946 <vscale x 4 x i64> %1,
949 ret <vscale x 4 x i64> %a
952 declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
959 define <vscale x 4 x i64> @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
960 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
961 ; CHECK: # %bb.0: # %entry
962 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
963 ; CHECK-NEXT: vsub.vv v8, v12, v16, v0.t
966 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64(
967 <vscale x 4 x i64> %0,
968 <vscale x 4 x i64> %1,
969 <vscale x 4 x i64> %2,
970 <vscale x 4 x i1> %3,
973 ret <vscale x 4 x i64> %a
976 declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
982 define <vscale x 8 x i64> @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
983 ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
986 ; CHECK-NEXT: vsub.vv v8, v8, v16
989 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64(
990 <vscale x 8 x i64> undef,
991 <vscale x 8 x i64> %0,
992 <vscale x 8 x i64> %1,
995 ret <vscale x 8 x i64> %a
998 declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
1005 define <vscale x 8 x i64> @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1006 ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1007 ; CHECK: # %bb.0: # %entry
1008 ; CHECK-NEXT: vl8re64.v v24, (a0)
1009 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1010 ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t
1013 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64(
1014 <vscale x 8 x i64> %0,
1015 <vscale x 8 x i64> %1,
1016 <vscale x 8 x i64> %2,
1017 <vscale x 8 x i1> %3,
1020 ret <vscale x 8 x i64> %a
1023 declare <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
1029 define <vscale x 1 x i8> @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
1030 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8:
1031 ; CHECK: # %bb.0: # %entry
1032 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1033 ; CHECK-NEXT: vsub.vx v8, v8, a0
1036 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
1037 <vscale x 1 x i8> undef,
1038 <vscale x 1 x i8> %0,
1042 ret <vscale x 1 x i8> %a
1045 declare <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
1052 define <vscale x 1 x i8> @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1053 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8:
1054 ; CHECK: # %bb.0: # %entry
1055 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
1056 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1059 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
1060 <vscale x 1 x i8> %0,
1061 <vscale x 1 x i8> %1,
1063 <vscale x 1 x i1> %3,
1066 ret <vscale x 1 x i8> %a
1069 declare <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
1075 define <vscale x 2 x i8> @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
1076 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8:
1077 ; CHECK: # %bb.0: # %entry
1078 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1079 ; CHECK-NEXT: vsub.vx v8, v8, a0
1082 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
1083 <vscale x 2 x i8> undef,
1084 <vscale x 2 x i8> %0,
1088 ret <vscale x 2 x i8> %a
1091 declare <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
1098 define <vscale x 2 x i8> @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1099 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
1102 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1105 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
1106 <vscale x 2 x i8> %0,
1107 <vscale x 2 x i8> %1,
1109 <vscale x 2 x i1> %3,
1112 ret <vscale x 2 x i8> %a
1115 declare <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
1121 define <vscale x 4 x i8> @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1122 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8:
1123 ; CHECK: # %bb.0: # %entry
1124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1125 ; CHECK-NEXT: vsub.vx v8, v8, a0
1128 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
1129 <vscale x 4 x i8> undef,
1130 <vscale x 4 x i8> %0,
1134 ret <vscale x 4 x i8> %a
1137 declare <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
1144 define <vscale x 4 x i8> @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1145 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8:
1146 ; CHECK: # %bb.0: # %entry
1147 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1148 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1151 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
1152 <vscale x 4 x i8> %0,
1153 <vscale x 4 x i8> %1,
1155 <vscale x 4 x i1> %3,
1158 ret <vscale x 4 x i8> %a
1161 declare <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
1167 define <vscale x 8 x i8> @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1168 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8:
1169 ; CHECK: # %bb.0: # %entry
1170 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1171 ; CHECK-NEXT: vsub.vx v8, v8, a0
1174 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
1175 <vscale x 8 x i8> undef,
1176 <vscale x 8 x i8> %0,
1180 ret <vscale x 8 x i8> %a
1183 declare <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
1190 define <vscale x 8 x i8> @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1191 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1194 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1197 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
1198 <vscale x 8 x i8> %0,
1199 <vscale x 8 x i8> %1,
1201 <vscale x 8 x i1> %3,
1204 ret <vscale x 8 x i8> %a
1207 declare <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
1213 define <vscale x 16 x i8> @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1214 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8:
1215 ; CHECK: # %bb.0: # %entry
1216 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1217 ; CHECK-NEXT: vsub.vx v8, v8, a0
1220 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
1221 <vscale x 16 x i8> undef,
1222 <vscale x 16 x i8> %0,
1226 ret <vscale x 16 x i8> %a
1229 declare <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
1236 define <vscale x 16 x i8> @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1237 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8:
1238 ; CHECK: # %bb.0: # %entry
1239 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
1240 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1243 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
1244 <vscale x 16 x i8> %0,
1245 <vscale x 16 x i8> %1,
1247 <vscale x 16 x i1> %3,
1250 ret <vscale x 16 x i8> %a
1253 declare <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
1259 define <vscale x 32 x i8> @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1260 ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8:
1261 ; CHECK: # %bb.0: # %entry
1262 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1263 ; CHECK-NEXT: vsub.vx v8, v8, a0
1266 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
1267 <vscale x 32 x i8> undef,
1268 <vscale x 32 x i8> %0,
1272 ret <vscale x 32 x i8> %a
1275 declare <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
1282 define <vscale x 32 x i8> @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1283 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8:
1284 ; CHECK: # %bb.0: # %entry
1285 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
1286 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1289 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
1290 <vscale x 32 x i8> %0,
1291 <vscale x 32 x i8> %1,
1293 <vscale x 32 x i1> %3,
1296 ret <vscale x 32 x i8> %a
1299 declare <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
1305 define <vscale x 64 x i8> @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
1306 ; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8:
1307 ; CHECK: # %bb.0: # %entry
1308 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1309 ; CHECK-NEXT: vsub.vx v8, v8, a0
1312 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
1313 <vscale x 64 x i8> undef,
1314 <vscale x 64 x i8> %0,
1318 ret <vscale x 64 x i8> %a
1321 declare <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
1328 define <vscale x 64 x i8> @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
1329 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8:
1330 ; CHECK: # %bb.0: # %entry
1331 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
1332 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1335 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
1336 <vscale x 64 x i8> %0,
1337 <vscale x 64 x i8> %1,
1339 <vscale x 64 x i1> %3,
1342 ret <vscale x 64 x i8> %a
1345 declare <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
1351 define <vscale x 1 x i16> @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1352 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16:
1353 ; CHECK: # %bb.0: # %entry
1354 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1355 ; CHECK-NEXT: vsub.vx v8, v8, a0
1358 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
1359 <vscale x 1 x i16> undef,
1360 <vscale x 1 x i16> %0,
1364 ret <vscale x 1 x i16> %a
1367 declare <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
1374 define <vscale x 1 x i16> @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1375 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16:
1376 ; CHECK: # %bb.0: # %entry
1377 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1378 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1381 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
1382 <vscale x 1 x i16> %0,
1383 <vscale x 1 x i16> %1,
1385 <vscale x 1 x i1> %3,
1388 ret <vscale x 1 x i16> %a
1391 declare <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
1397 define <vscale x 2 x i16> @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1398 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16:
1399 ; CHECK: # %bb.0: # %entry
1400 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1401 ; CHECK-NEXT: vsub.vx v8, v8, a0
1404 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
1405 <vscale x 2 x i16> undef,
1406 <vscale x 2 x i16> %0,
1410 ret <vscale x 2 x i16> %a
1413 declare <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
1420 define <vscale x 2 x i16> @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1421 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16:
1422 ; CHECK: # %bb.0: # %entry
1423 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
1424 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1427 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
1428 <vscale x 2 x i16> %0,
1429 <vscale x 2 x i16> %1,
1431 <vscale x 2 x i1> %3,
1434 ret <vscale x 2 x i16> %a
1437 declare <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
1443 define <vscale x 4 x i16> @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1444 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16:
1445 ; CHECK: # %bb.0: # %entry
1446 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1447 ; CHECK-NEXT: vsub.vx v8, v8, a0
1450 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
1451 <vscale x 4 x i16> undef,
1452 <vscale x 4 x i16> %0,
1456 ret <vscale x 4 x i16> %a
1459 declare <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
1466 define <vscale x 4 x i16> @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1467 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16:
1468 ; CHECK: # %bb.0: # %entry
1469 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1470 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1473 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
1474 <vscale x 4 x i16> %0,
1475 <vscale x 4 x i16> %1,
1477 <vscale x 4 x i1> %3,
1480 ret <vscale x 4 x i16> %a
1483 declare <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
1489 define <vscale x 8 x i16> @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1490 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1493 ; CHECK-NEXT: vsub.vx v8, v8, a0
1496 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
1497 <vscale x 8 x i16> undef,
1498 <vscale x 8 x i16> %0,
1502 ret <vscale x 8 x i16> %a
1505 declare <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
1512 define <vscale x 8 x i16> @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1513 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16:
1514 ; CHECK: # %bb.0: # %entry
1515 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1516 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1519 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
1520 <vscale x 8 x i16> %0,
1521 <vscale x 8 x i16> %1,
1523 <vscale x 8 x i1> %3,
1526 ret <vscale x 8 x i16> %a
1529 declare <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
1530 <vscale x 16 x i16>,
1531 <vscale x 16 x i16>,
1535 define <vscale x 16 x i16> @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1536 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16:
1537 ; CHECK: # %bb.0: # %entry
1538 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1539 ; CHECK-NEXT: vsub.vx v8, v8, a0
1542 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
1543 <vscale x 16 x i16> undef,
1544 <vscale x 16 x i16> %0,
1548 ret <vscale x 16 x i16> %a
1551 declare <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
1552 <vscale x 16 x i16>,
1553 <vscale x 16 x i16>,
1558 define <vscale x 16 x i16> @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1559 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16:
1560 ; CHECK: # %bb.0: # %entry
1561 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1562 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1565 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
1566 <vscale x 16 x i16> %0,
1567 <vscale x 16 x i16> %1,
1569 <vscale x 16 x i1> %3,
1572 ret <vscale x 16 x i16> %a
1575 declare <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
1576 <vscale x 32 x i16>,
1577 <vscale x 32 x i16>,
1581 define <vscale x 32 x i16> @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
1582 ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16:
1583 ; CHECK: # %bb.0: # %entry
1584 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1585 ; CHECK-NEXT: vsub.vx v8, v8, a0
1588 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
1589 <vscale x 32 x i16> undef,
1590 <vscale x 32 x i16> %0,
1594 ret <vscale x 32 x i16> %a
1597 declare <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
1598 <vscale x 32 x i16>,
1599 <vscale x 32 x i16>,
1604 define <vscale x 32 x i16> @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1605 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16:
1606 ; CHECK: # %bb.0: # %entry
1607 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1608 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1611 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
1612 <vscale x 32 x i16> %0,
1613 <vscale x 32 x i16> %1,
1615 <vscale x 32 x i1> %3,
1618 ret <vscale x 32 x i16> %a
1621 declare <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
1627 define <vscale x 1 x i32> @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1628 ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32:
1629 ; CHECK: # %bb.0: # %entry
1630 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1631 ; CHECK-NEXT: vsub.vx v8, v8, a0
1634 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
1635 <vscale x 1 x i32> undef,
1636 <vscale x 1 x i32> %0,
1640 ret <vscale x 1 x i32> %a
1643 declare <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
1650 define <vscale x 1 x i32> @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1651 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32:
1652 ; CHECK: # %bb.0: # %entry
1653 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1654 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1657 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
1658 <vscale x 1 x i32> %0,
1659 <vscale x 1 x i32> %1,
1661 <vscale x 1 x i1> %3,
1664 ret <vscale x 1 x i32> %a
1667 declare <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
1673 define <vscale x 2 x i32> @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1674 ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32:
1675 ; CHECK: # %bb.0: # %entry
1676 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1677 ; CHECK-NEXT: vsub.vx v8, v8, a0
1680 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
1681 <vscale x 2 x i32> undef,
1682 <vscale x 2 x i32> %0,
1686 ret <vscale x 2 x i32> %a
1689 declare <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
1696 define <vscale x 2 x i32> @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1697 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1700 ; CHECK-NEXT: vsub.vx v8, v9, a0, v0.t
1703 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
1704 <vscale x 2 x i32> %0,
1705 <vscale x 2 x i32> %1,
1707 <vscale x 2 x i1> %3,
1710 ret <vscale x 2 x i32> %a
1713 declare <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
1719 define <vscale x 4 x i32> @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1720 ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1723 ; CHECK-NEXT: vsub.vx v8, v8, a0
1726 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
1727 <vscale x 4 x i32> undef,
1728 <vscale x 4 x i32> %0,
1732 ret <vscale x 4 x i32> %a
1735 declare <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
1742 define <vscale x 4 x i32> @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1743 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1746 ; CHECK-NEXT: vsub.vx v8, v10, a0, v0.t
1749 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
1750 <vscale x 4 x i32> %0,
1751 <vscale x 4 x i32> %1,
1753 <vscale x 4 x i1> %3,
1756 ret <vscale x 4 x i32> %a
1759 declare <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
1765 define <vscale x 8 x i32> @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1766 ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32:
1767 ; CHECK: # %bb.0: # %entry
1768 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1769 ; CHECK-NEXT: vsub.vx v8, v8, a0
1772 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
1773 <vscale x 8 x i32> undef,
1774 <vscale x 8 x i32> %0,
1778 ret <vscale x 8 x i32> %a
1781 declare <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
1788 define <vscale x 8 x i32> @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1789 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1792 ; CHECK-NEXT: vsub.vx v8, v12, a0, v0.t
1795 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
1796 <vscale x 8 x i32> %0,
1797 <vscale x 8 x i32> %1,
1799 <vscale x 8 x i1> %3,
1802 ret <vscale x 8 x i32> %a
1805 declare <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
1806 <vscale x 16 x i32>,
1807 <vscale x 16 x i32>,
1811 define <vscale x 16 x i32> @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
1812 ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32:
1813 ; CHECK: # %bb.0: # %entry
1814 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1815 ; CHECK-NEXT: vsub.vx v8, v8, a0
1818 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
1819 <vscale x 16 x i32> undef,
1820 <vscale x 16 x i32> %0,
1824 ret <vscale x 16 x i32> %a
1827 declare <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
1828 <vscale x 16 x i32>,
1829 <vscale x 16 x i32>,
1834 define <vscale x 16 x i32> @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1835 ; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32:
1836 ; CHECK: # %bb.0: # %entry
1837 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1838 ; CHECK-NEXT: vsub.vx v8, v16, a0, v0.t
1841 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
1842 <vscale x 16 x i32> %0,
1843 <vscale x 16 x i32> %1,
1845 <vscale x 16 x i1> %3,
1848 ret <vscale x 16 x i32> %a
1851 declare <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
1857 define <vscale x 1 x i64> @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1858 ; RV32-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64:
1859 ; RV32: # %bb.0: # %entry
1860 ; RV32-NEXT: addi sp, sp, -16
1861 ; RV32-NEXT: sw a1, 12(sp)
1862 ; RV32-NEXT: sw a0, 8(sp)
1863 ; RV32-NEXT: addi a0, sp, 8
1864 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1865 ; RV32-NEXT: vlse64.v v9, (a0), zero
1866 ; RV32-NEXT: vsub.vv v8, v8, v9
1867 ; RV32-NEXT: addi sp, sp, 16
1870 ; RV64-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64:
1871 ; RV64: # %bb.0: # %entry
1872 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1873 ; RV64-NEXT: vsub.vx v8, v8, a0
1876 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
1877 <vscale x 1 x i64> undef,
1878 <vscale x 1 x i64> %0,
1882 ret <vscale x 1 x i64> %a
1885 declare <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
1892 define <vscale x 1 x i64> @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1893 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64:
1894 ; RV32: # %bb.0: # %entry
1895 ; RV32-NEXT: addi sp, sp, -16
1896 ; RV32-NEXT: sw a1, 12(sp)
1897 ; RV32-NEXT: sw a0, 8(sp)
1898 ; RV32-NEXT: addi a0, sp, 8
1899 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1900 ; RV32-NEXT: vlse64.v v10, (a0), zero
1901 ; RV32-NEXT: vsub.vv v8, v9, v10, v0.t
1902 ; RV32-NEXT: addi sp, sp, 16
1905 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64:
1906 ; RV64: # %bb.0: # %entry
1907 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1908 ; RV64-NEXT: vsub.vx v8, v9, a0, v0.t
1911 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
1912 <vscale x 1 x i64> %0,
1913 <vscale x 1 x i64> %1,
1915 <vscale x 1 x i1> %3,
1918 ret <vscale x 1 x i64> %a
1921 declare <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
1927 define <vscale x 2 x i64> @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1928 ; RV32-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64:
1929 ; RV32: # %bb.0: # %entry
1930 ; RV32-NEXT: addi sp, sp, -16
1931 ; RV32-NEXT: sw a1, 12(sp)
1932 ; RV32-NEXT: sw a0, 8(sp)
1933 ; RV32-NEXT: addi a0, sp, 8
1934 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1935 ; RV32-NEXT: vlse64.v v10, (a0), zero
1936 ; RV32-NEXT: vsub.vv v8, v8, v10
1937 ; RV32-NEXT: addi sp, sp, 16
1940 ; RV64-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64:
1941 ; RV64: # %bb.0: # %entry
1942 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1943 ; RV64-NEXT: vsub.vx v8, v8, a0
1946 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
1947 <vscale x 2 x i64> undef,
1948 <vscale x 2 x i64> %0,
1952 ret <vscale x 2 x i64> %a
1955 declare <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
1962 define <vscale x 2 x i64> @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1963 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64:
1964 ; RV32: # %bb.0: # %entry
1965 ; RV32-NEXT: addi sp, sp, -16
1966 ; RV32-NEXT: sw a1, 12(sp)
1967 ; RV32-NEXT: sw a0, 8(sp)
1968 ; RV32-NEXT: addi a0, sp, 8
1969 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
1970 ; RV32-NEXT: vlse64.v v12, (a0), zero
1971 ; RV32-NEXT: vsub.vv v8, v10, v12, v0.t
1972 ; RV32-NEXT: addi sp, sp, 16
1975 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64:
1976 ; RV64: # %bb.0: # %entry
1977 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1978 ; RV64-NEXT: vsub.vx v8, v10, a0, v0.t
1981 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
1982 <vscale x 2 x i64> %0,
1983 <vscale x 2 x i64> %1,
1985 <vscale x 2 x i1> %3,
1988 ret <vscale x 2 x i64> %a
1991 declare <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
1997 define <vscale x 4 x i64> @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1998 ; RV32-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64:
1999 ; RV32: # %bb.0: # %entry
2000 ; RV32-NEXT: addi sp, sp, -16
2001 ; RV32-NEXT: sw a1, 12(sp)
2002 ; RV32-NEXT: sw a0, 8(sp)
2003 ; RV32-NEXT: addi a0, sp, 8
2004 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2005 ; RV32-NEXT: vlse64.v v12, (a0), zero
2006 ; RV32-NEXT: vsub.vv v8, v8, v12
2007 ; RV32-NEXT: addi sp, sp, 16
2010 ; RV64-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64:
2011 ; RV64: # %bb.0: # %entry
2012 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2013 ; RV64-NEXT: vsub.vx v8, v8, a0
2016 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
2017 <vscale x 4 x i64> undef,
2018 <vscale x 4 x i64> %0,
2022 ret <vscale x 4 x i64> %a
2025 declare <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
2032 define <vscale x 4 x i64> @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2033 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64:
2034 ; RV32: # %bb.0: # %entry
2035 ; RV32-NEXT: addi sp, sp, -16
2036 ; RV32-NEXT: sw a1, 12(sp)
2037 ; RV32-NEXT: sw a0, 8(sp)
2038 ; RV32-NEXT: addi a0, sp, 8
2039 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2040 ; RV32-NEXT: vlse64.v v16, (a0), zero
2041 ; RV32-NEXT: vsub.vv v8, v12, v16, v0.t
2042 ; RV32-NEXT: addi sp, sp, 16
2045 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64:
2046 ; RV64: # %bb.0: # %entry
2047 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2048 ; RV64-NEXT: vsub.vx v8, v12, a0, v0.t
2051 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
2052 <vscale x 4 x i64> %0,
2053 <vscale x 4 x i64> %1,
2055 <vscale x 4 x i1> %3,
2058 ret <vscale x 4 x i64> %a
2061 declare <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
2067 define <vscale x 8 x i64> @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
2068 ; RV32-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64:
2069 ; RV32: # %bb.0: # %entry
2070 ; RV32-NEXT: addi sp, sp, -16
2071 ; RV32-NEXT: sw a1, 12(sp)
2072 ; RV32-NEXT: sw a0, 8(sp)
2073 ; RV32-NEXT: addi a0, sp, 8
2074 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2075 ; RV32-NEXT: vlse64.v v16, (a0), zero
2076 ; RV32-NEXT: vsub.vv v8, v8, v16
2077 ; RV32-NEXT: addi sp, sp, 16
2080 ; RV64-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64:
2081 ; RV64: # %bb.0: # %entry
2082 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2083 ; RV64-NEXT: vsub.vx v8, v8, a0
2086 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
2087 <vscale x 8 x i64> undef,
2088 <vscale x 8 x i64> %0,
2092 ret <vscale x 8 x i64> %a
2095 declare <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
2102 define <vscale x 8 x i64> @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2103 ; RV32-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64:
2104 ; RV32: # %bb.0: # %entry
2105 ; RV32-NEXT: addi sp, sp, -16
2106 ; RV32-NEXT: sw a1, 12(sp)
2107 ; RV32-NEXT: sw a0, 8(sp)
2108 ; RV32-NEXT: addi a0, sp, 8
2109 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2110 ; RV32-NEXT: vlse64.v v24, (a0), zero
2111 ; RV32-NEXT: vsub.vv v8, v16, v24, v0.t
2112 ; RV32-NEXT: addi sp, sp, 16
2115 ; RV64-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64:
2116 ; RV64: # %bb.0: # %entry
2117 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2118 ; RV64-NEXT: vsub.vx v8, v16, a0, v0.t
2121 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
2122 <vscale x 8 x i64> %0,
2123 <vscale x 8 x i64> %1,
2125 <vscale x 8 x i1> %3,
2128 ret <vscale x 8 x i64> %a
2131 define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
2132 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8:
2133 ; CHECK: # %bb.0: # %entry
2134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2135 ; CHECK-NEXT: vadd.vi v8, v8, -9
2138 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
2139 <vscale x 1 x i8> undef,
2140 <vscale x 1 x i8> %0,
2144 ret <vscale x 1 x i8> %a
2147 define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2148 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
2149 ; CHECK: # %bb.0: # %entry
2150 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
2151 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2154 %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
2155 <vscale x 1 x i8> %0,
2156 <vscale x 1 x i8> %1,
2158 <vscale x 1 x i1> %2,
2161 ret <vscale x 1 x i8> %a
2164 define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
2165 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8:
2166 ; CHECK: # %bb.0: # %entry
2167 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2168 ; CHECK-NEXT: vadd.vi v8, v8, -9
2171 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
2172 <vscale x 2 x i8> undef,
2173 <vscale x 2 x i8> %0,
2177 ret <vscale x 2 x i8> %a
2180 define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2181 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8:
2182 ; CHECK: # %bb.0: # %entry
2183 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
2184 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2187 %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
2188 <vscale x 2 x i8> %0,
2189 <vscale x 2 x i8> %1,
2191 <vscale x 2 x i1> %2,
2194 ret <vscale x 2 x i8> %a
2197 define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2198 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8:
2199 ; CHECK: # %bb.0: # %entry
2200 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2201 ; CHECK-NEXT: vadd.vi v8, v8, -9
2204 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
2205 <vscale x 4 x i8> undef,
2206 <vscale x 4 x i8> %0,
2210 ret <vscale x 4 x i8> %a
2213 define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2214 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8:
2215 ; CHECK: # %bb.0: # %entry
2216 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
2217 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2220 %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
2221 <vscale x 4 x i8> %0,
2222 <vscale x 4 x i8> %1,
2224 <vscale x 4 x i1> %2,
2227 ret <vscale x 4 x i8> %a
2230 define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2231 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2234 ; CHECK-NEXT: vadd.vi v8, v8, -9
2237 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
2238 <vscale x 8 x i8> undef,
2239 <vscale x 8 x i8> %0,
2243 ret <vscale x 8 x i8> %a
2246 define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2247 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8:
2248 ; CHECK: # %bb.0: # %entry
2249 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
2250 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2253 %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
2254 <vscale x 8 x i8> %0,
2255 <vscale x 8 x i8> %1,
2257 <vscale x 8 x i1> %2,
2260 ret <vscale x 8 x i8> %a
2263 define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2264 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8:
2265 ; CHECK: # %bb.0: # %entry
2266 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2267 ; CHECK-NEXT: vadd.vi v8, v8, -9
2270 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
2271 <vscale x 16 x i8> undef,
2272 <vscale x 16 x i8> %0,
2276 ret <vscale x 16 x i8> %a
2279 define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2280 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8:
2281 ; CHECK: # %bb.0: # %entry
2282 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
2283 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2286 %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
2287 <vscale x 16 x i8> %0,
2288 <vscale x 16 x i8> %1,
2290 <vscale x 16 x i1> %2,
2293 ret <vscale x 16 x i8> %a
2296 define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2297 ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8:
2298 ; CHECK: # %bb.0: # %entry
2299 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2300 ; CHECK-NEXT: vadd.vi v8, v8, -9
2303 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
2304 <vscale x 32 x i8> undef,
2305 <vscale x 32 x i8> %0,
2309 ret <vscale x 32 x i8> %a
2312 define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2313 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8:
2314 ; CHECK: # %bb.0: # %entry
2315 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
2316 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2319 %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
2320 <vscale x 32 x i8> %0,
2321 <vscale x 32 x i8> %1,
2323 <vscale x 32 x i1> %2,
2326 ret <vscale x 32 x i8> %a
2329 define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
2330 ; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8:
2331 ; CHECK: # %bb.0: # %entry
2332 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2333 ; CHECK-NEXT: vadd.vi v8, v8, 9
2336 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
2337 <vscale x 64 x i8> undef,
2338 <vscale x 64 x i8> %0,
2342 ret <vscale x 64 x i8> %a
2345 define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
2346 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8:
2347 ; CHECK: # %bb.0: # %entry
2348 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
2349 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2352 %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
2353 <vscale x 64 x i8> %0,
2354 <vscale x 64 x i8> %1,
2356 <vscale x 64 x i1> %2,
2359 ret <vscale x 64 x i8> %a
2362 define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2363 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16:
2364 ; CHECK: # %bb.0: # %entry
2365 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2366 ; CHECK-NEXT: vadd.vi v8, v8, -9
2369 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
2370 <vscale x 1 x i16> undef,
2371 <vscale x 1 x i16> %0,
2375 ret <vscale x 1 x i16> %a
2378 define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2379 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16:
2380 ; CHECK: # %bb.0: # %entry
2381 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
2382 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2385 %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
2386 <vscale x 1 x i16> %0,
2387 <vscale x 1 x i16> %1,
2389 <vscale x 1 x i1> %2,
2392 ret <vscale x 1 x i16> %a
2395 define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2396 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16:
2397 ; CHECK: # %bb.0: # %entry
2398 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2399 ; CHECK-NEXT: vadd.vi v8, v8, -9
2402 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
2403 <vscale x 2 x i16> undef,
2404 <vscale x 2 x i16> %0,
2408 ret <vscale x 2 x i16> %a
2411 define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2412 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16:
2413 ; CHECK: # %bb.0: # %entry
2414 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
2415 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2418 %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
2419 <vscale x 2 x i16> %0,
2420 <vscale x 2 x i16> %1,
2422 <vscale x 2 x i1> %2,
2425 ret <vscale x 2 x i16> %a
2428 define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2429 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16:
2430 ; CHECK: # %bb.0: # %entry
2431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2432 ; CHECK-NEXT: vadd.vi v8, v8, -9
2435 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
2436 <vscale x 4 x i16> undef,
2437 <vscale x 4 x i16> %0,
2441 ret <vscale x 4 x i16> %a
2444 define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2445 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16:
2446 ; CHECK: # %bb.0: # %entry
2447 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
2448 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2451 %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
2452 <vscale x 4 x i16> %0,
2453 <vscale x 4 x i16> %1,
2455 <vscale x 4 x i1> %2,
2458 ret <vscale x 4 x i16> %a
2461 define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2462 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16:
2463 ; CHECK: # %bb.0: # %entry
2464 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2465 ; CHECK-NEXT: vadd.vi v8, v8, -9
2468 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
2469 <vscale x 8 x i16> undef,
2470 <vscale x 8 x i16> %0,
2474 ret <vscale x 8 x i16> %a
2477 define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2478 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16:
2479 ; CHECK: # %bb.0: # %entry
2480 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
2481 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2484 %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
2485 <vscale x 8 x i16> %0,
2486 <vscale x 8 x i16> %1,
2488 <vscale x 8 x i1> %2,
2491 ret <vscale x 8 x i16> %a
2494 define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2495 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16:
2496 ; CHECK: # %bb.0: # %entry
2497 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2498 ; CHECK-NEXT: vadd.vi v8, v8, -9
2501 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
2502 <vscale x 16 x i16> undef,
2503 <vscale x 16 x i16> %0,
2507 ret <vscale x 16 x i16> %a
2510 define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2511 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16:
2512 ; CHECK: # %bb.0: # %entry
2513 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
2514 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2517 %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
2518 <vscale x 16 x i16> %0,
2519 <vscale x 16 x i16> %1,
2521 <vscale x 16 x i1> %2,
2524 ret <vscale x 16 x i16> %a
2527 define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
2528 ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16:
2529 ; CHECK: # %bb.0: # %entry
2530 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2531 ; CHECK-NEXT: vadd.vi v8, v8, -9
2534 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
2535 <vscale x 32 x i16> undef,
2536 <vscale x 32 x i16> %0,
2540 ret <vscale x 32 x i16> %a
2543 define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2544 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16:
2545 ; CHECK: # %bb.0: # %entry
2546 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
2547 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2550 %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
2551 <vscale x 32 x i16> %0,
2552 <vscale x 32 x i16> %1,
2554 <vscale x 32 x i1> %2,
2557 ret <vscale x 32 x i16> %a
2560 define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2561 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32:
2562 ; CHECK: # %bb.0: # %entry
2563 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2564 ; CHECK-NEXT: vadd.vi v8, v8, -9
2567 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
2568 <vscale x 1 x i32> undef,
2569 <vscale x 1 x i32> %0,
2573 ret <vscale x 1 x i32> %a
2576 define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2577 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32:
2578 ; CHECK: # %bb.0: # %entry
2579 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
2580 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2583 %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
2584 <vscale x 1 x i32> %0,
2585 <vscale x 1 x i32> %1,
2587 <vscale x 1 x i1> %2,
2590 ret <vscale x 1 x i32> %a
2593 define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2594 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32:
2595 ; CHECK: # %bb.0: # %entry
2596 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2597 ; CHECK-NEXT: vadd.vi v8, v8, -9
2600 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
2601 <vscale x 2 x i32> undef,
2602 <vscale x 2 x i32> %0,
2606 ret <vscale x 2 x i32> %a
2609 define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2610 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32:
2611 ; CHECK: # %bb.0: # %entry
2612 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
2613 ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t
2616 %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
2617 <vscale x 2 x i32> %0,
2618 <vscale x 2 x i32> %1,
2620 <vscale x 2 x i1> %2,
2623 ret <vscale x 2 x i32> %a
2626 define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2627 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32:
2628 ; CHECK: # %bb.0: # %entry
2629 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2630 ; CHECK-NEXT: vadd.vi v8, v8, -9
2633 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
2634 <vscale x 4 x i32> undef,
2635 <vscale x 4 x i32> %0,
2639 ret <vscale x 4 x i32> %a
2642 define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2643 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32:
2644 ; CHECK: # %bb.0: # %entry
2645 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
2646 ; CHECK-NEXT: vadd.vi v8, v10, 9, v0.t
2649 %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
2650 <vscale x 4 x i32> %0,
2651 <vscale x 4 x i32> %1,
2653 <vscale x 4 x i1> %2,
2656 ret <vscale x 4 x i32> %a
2659 define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2660 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32:
2661 ; CHECK: # %bb.0: # %entry
2662 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2663 ; CHECK-NEXT: vadd.vi v8, v8, -9
2666 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
2667 <vscale x 8 x i32> undef,
2668 <vscale x 8 x i32> %0,
2672 ret <vscale x 8 x i32> %a
2675 define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2676 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32:
2677 ; CHECK: # %bb.0: # %entry
2678 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
2679 ; CHECK-NEXT: vadd.vi v8, v12, 9, v0.t
2682 %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
2683 <vscale x 8 x i32> %0,
2684 <vscale x 8 x i32> %1,
2686 <vscale x 8 x i1> %2,
2689 ret <vscale x 8 x i32> %a
2692 define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
2693 ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32:
2694 ; CHECK: # %bb.0: # %entry
2695 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2696 ; CHECK-NEXT: vadd.vi v8, v8, -9
2699 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
2700 <vscale x 16 x i32> undef,
2701 <vscale x 16 x i32> %0,
2705 ret <vscale x 16 x i32> %a
2708 define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2709 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32:
2710 ; CHECK: # %bb.0: # %entry
2711 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
2712 ; CHECK-NEXT: vadd.vi v8, v16, 9, v0.t
2715 %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
2716 <vscale x 16 x i32> %0,
2717 <vscale x 16 x i32> %1,
2719 <vscale x 16 x i1> %2,
2722 ret <vscale x 16 x i32> %a
2725 define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2726 ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64:
2727 ; CHECK: # %bb.0: # %entry
2728 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2729 ; CHECK-NEXT: vadd.vi v8, v8, -9
2732 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
2733 <vscale x 1 x i64> undef,
2734 <vscale x 1 x i64> %0,
2738 ret <vscale x 1 x i64> %a
2741 define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2742 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64:
2743 ; CHECK: # %bb.0: # %entry
2744 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
2745 ; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t
2748 %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
2749 <vscale x 1 x i64> %0,
2750 <vscale x 1 x i64> %1,
2752 <vscale x 1 x i1> %2,
2755 ret <vscale x 1 x i64> %a
2758 define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2759 ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64:
2760 ; CHECK: # %bb.0: # %entry
2761 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2762 ; CHECK-NEXT: vadd.vi v8, v8, -9
2765 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
2766 <vscale x 2 x i64> undef,
2767 <vscale x 2 x i64> %0,
2771 ret <vscale x 2 x i64> %a
2774 define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2775 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64:
2776 ; CHECK: # %bb.0: # %entry
2777 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2778 ; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t
2781 %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
2782 <vscale x 2 x i64> %0,
2783 <vscale x 2 x i64> %1,
2785 <vscale x 2 x i1> %2,
2788 ret <vscale x 2 x i64> %a
2791 define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2792 ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64:
2793 ; CHECK: # %bb.0: # %entry
2794 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2795 ; CHECK-NEXT: vadd.vi v8, v8, -9
2798 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
2799 <vscale x 4 x i64> undef,
2800 <vscale x 4 x i64> %0,
2804 ret <vscale x 4 x i64> %a
2807 define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2808 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64:
2809 ; CHECK: # %bb.0: # %entry
2810 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2811 ; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t
2814 %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
2815 <vscale x 4 x i64> %0,
2816 <vscale x 4 x i64> %1,
2818 <vscale x 4 x i1> %2,
2821 ret <vscale x 4 x i64> %a
2824 define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
2825 ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64:
2826 ; CHECK: # %bb.0: # %entry
2827 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2828 ; CHECK-NEXT: vadd.vi v8, v8, -9
2831 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
2832 <vscale x 8 x i64> undef,
2833 <vscale x 8 x i64> %0,
2837 ret <vscale x 8 x i64> %a
2840 define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2841 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64:
2842 ; CHECK: # %bb.0: # %entry
2843 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
2844 ; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t
2847 %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
2848 <vscale x 8 x i64> %0,
2849 <vscale x 8 x i64> %1,
2851 <vscale x 8 x i1> %2,
2854 ret <vscale x 8 x i64> %a