1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
13 define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
14 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
17 ; CHECK-NEXT: vrsub.vx v8, v8, a0
20 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
21 <vscale x 1 x i8> undef,
26 ret <vscale x 1 x i8> %a
29 declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
36 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
40 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
43 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
50 ret <vscale x 1 x i8> %a
53 declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
59 define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
60 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
63 ; CHECK-NEXT: vrsub.vx v8, v8, a0
66 %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
67 <vscale x 2 x i8> undef,
72 ret <vscale x 2 x i8> %a
75 declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
82 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
86 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
89 %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
96 ret <vscale x 2 x i8> %a
99 declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
105 define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
106 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
109 ; CHECK-NEXT: vrsub.vx v8, v8, a0
112 %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
113 <vscale x 4 x i8> undef,
114 <vscale x 4 x i8> %0,
118 ret <vscale x 4 x i8> %a
121 declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
128 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
132 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
135 %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
136 <vscale x 4 x i8> %0,
137 <vscale x 4 x i8> %1,
139 <vscale x 4 x i1> %3,
142 ret <vscale x 4 x i8> %a
145 declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
151 define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
152 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
155 ; CHECK-NEXT: vrsub.vx v8, v8, a0
158 %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
159 <vscale x 8 x i8> undef,
160 <vscale x 8 x i8> %0,
164 ret <vscale x 8 x i8> %a
167 declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
174 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8:
176 ; CHECK: # %bb.0: # %entry
177 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
178 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
181 %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
182 <vscale x 8 x i8> %0,
183 <vscale x 8 x i8> %1,
185 <vscale x 8 x i1> %3,
188 ret <vscale x 8 x i8> %a
191 declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
197 define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
198 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8:
199 ; CHECK: # %bb.0: # %entry
200 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
201 ; CHECK-NEXT: vrsub.vx v8, v8, a0
204 %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
205 <vscale x 16 x i8> undef,
206 <vscale x 16 x i8> %0,
210 ret <vscale x 16 x i8> %a
213 declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
220 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
224 ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t
227 %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
228 <vscale x 16 x i8> %0,
229 <vscale x 16 x i8> %1,
231 <vscale x 16 x i1> %3,
234 ret <vscale x 16 x i8> %a
237 declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
243 define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
244 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8:
245 ; CHECK: # %bb.0: # %entry
246 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
247 ; CHECK-NEXT: vrsub.vx v8, v8, a0
250 %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
251 <vscale x 32 x i8> undef,
252 <vscale x 32 x i8> %0,
256 ret <vscale x 32 x i8> %a
259 declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
266 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8:
268 ; CHECK: # %bb.0: # %entry
269 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
270 ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t
273 %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
274 <vscale x 32 x i8> %0,
275 <vscale x 32 x i8> %1,
277 <vscale x 32 x i1> %3,
280 ret <vscale x 32 x i8> %a
283 declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
289 define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
290 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
293 ; CHECK-NEXT: vrsub.vx v8, v8, a0
296 %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
297 <vscale x 64 x i8> undef,
298 <vscale x 64 x i8> %0,
302 ret <vscale x 64 x i8> %a
305 declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
312 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
316 ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t
319 %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
320 <vscale x 64 x i8> %0,
321 <vscale x 64 x i8> %1,
323 <vscale x 64 x i1> %3,
326 ret <vscale x 64 x i8> %a
329 declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
335 define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
336 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16:
337 ; CHECK: # %bb.0: # %entry
338 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
339 ; CHECK-NEXT: vrsub.vx v8, v8, a0
342 %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
343 <vscale x 1 x i16> undef,
344 <vscale x 1 x i16> %0,
348 ret <vscale x 1 x i16> %a
351 declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
358 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
359 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
362 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
365 %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
366 <vscale x 1 x i16> %0,
367 <vscale x 1 x i16> %1,
369 <vscale x 1 x i1> %3,
372 ret <vscale x 1 x i16> %a
375 declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
381 define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
382 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16:
383 ; CHECK: # %bb.0: # %entry
384 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
385 ; CHECK-NEXT: vrsub.vx v8, v8, a0
388 %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
389 <vscale x 2 x i16> undef,
390 <vscale x 2 x i16> %0,
394 ret <vscale x 2 x i16> %a
397 declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
404 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
405 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
408 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
411 %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
412 <vscale x 2 x i16> %0,
413 <vscale x 2 x i16> %1,
415 <vscale x 2 x i1> %3,
418 ret <vscale x 2 x i16> %a
421 declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
427 define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
428 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16:
429 ; CHECK: # %bb.0: # %entry
430 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
431 ; CHECK-NEXT: vrsub.vx v8, v8, a0
434 %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
435 <vscale x 4 x i16> undef,
436 <vscale x 4 x i16> %0,
440 ret <vscale x 4 x i16> %a
443 declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
450 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
451 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16:
452 ; CHECK: # %bb.0: # %entry
453 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
454 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
457 %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
458 <vscale x 4 x i16> %0,
459 <vscale x 4 x i16> %1,
461 <vscale x 4 x i1> %3,
464 ret <vscale x 4 x i16> %a
467 declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
473 define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
474 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16:
475 ; CHECK: # %bb.0: # %entry
476 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
477 ; CHECK-NEXT: vrsub.vx v8, v8, a0
480 %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
481 <vscale x 8 x i16> undef,
482 <vscale x 8 x i16> %0,
486 ret <vscale x 8 x i16> %a
489 declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
496 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
497 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16:
498 ; CHECK: # %bb.0: # %entry
499 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
500 ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t
503 %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
504 <vscale x 8 x i16> %0,
505 <vscale x 8 x i16> %1,
507 <vscale x 8 x i1> %3,
510 ret <vscale x 8 x i16> %a
513 declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
519 define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
520 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
523 ; CHECK-NEXT: vrsub.vx v8, v8, a0
526 %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
527 <vscale x 16 x i16> undef,
528 <vscale x 16 x i16> %0,
532 ret <vscale x 16 x i16> %a
535 declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
542 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
543 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16:
544 ; CHECK: # %bb.0: # %entry
545 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
546 ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t
549 %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
550 <vscale x 16 x i16> %0,
551 <vscale x 16 x i16> %1,
553 <vscale x 16 x i1> %3,
556 ret <vscale x 16 x i16> %a
559 declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
565 define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
566 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16:
567 ; CHECK: # %bb.0: # %entry
568 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
569 ; CHECK-NEXT: vrsub.vx v8, v8, a0
572 %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
573 <vscale x 32 x i16> undef,
574 <vscale x 32 x i16> %0,
578 ret <vscale x 32 x i16> %a
581 declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
588 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
592 ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t
595 %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
596 <vscale x 32 x i16> %0,
597 <vscale x 32 x i16> %1,
599 <vscale x 32 x i1> %3,
602 ret <vscale x 32 x i16> %a
605 declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
611 define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
612 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
615 ; CHECK-NEXT: vrsub.vx v8, v8, a0
618 %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
619 <vscale x 1 x i32> undef,
620 <vscale x 1 x i32> %0,
624 ret <vscale x 1 x i32> %a
627 declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
634 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
635 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
638 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
641 %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
642 <vscale x 1 x i32> %0,
643 <vscale x 1 x i32> %1,
645 <vscale x 1 x i1> %3,
648 ret <vscale x 1 x i32> %a
651 declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
657 define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
658 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
661 ; CHECK-NEXT: vrsub.vx v8, v8, a0
664 %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
665 <vscale x 2 x i32> undef,
666 <vscale x 2 x i32> %0,
670 ret <vscale x 2 x i32> %a
673 declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
680 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
681 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32:
682 ; CHECK: # %bb.0: # %entry
683 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
684 ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t
687 %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
688 <vscale x 2 x i32> %0,
689 <vscale x 2 x i32> %1,
691 <vscale x 2 x i1> %3,
694 ret <vscale x 2 x i32> %a
697 declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
703 define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
704 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32:
705 ; CHECK: # %bb.0: # %entry
706 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
707 ; CHECK-NEXT: vrsub.vx v8, v8, a0
710 %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
711 <vscale x 4 x i32> undef,
712 <vscale x 4 x i32> %0,
716 ret <vscale x 4 x i32> %a
719 declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
726 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
727 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32:
728 ; CHECK: # %bb.0: # %entry
729 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
730 ; CHECK-NEXT: vrsub.vx v8, v10, a0, v0.t
733 %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
734 <vscale x 4 x i32> %0,
735 <vscale x 4 x i32> %1,
737 <vscale x 4 x i1> %3,
740 ret <vscale x 4 x i32> %a
743 declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
749 define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
750 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32:
751 ; CHECK: # %bb.0: # %entry
752 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
753 ; CHECK-NEXT: vrsub.vx v8, v8, a0
756 %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
757 <vscale x 8 x i32> undef,
758 <vscale x 8 x i32> %0,
762 ret <vscale x 8 x i32> %a
765 declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
772 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
773 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
776 ; CHECK-NEXT: vrsub.vx v8, v12, a0, v0.t
779 %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
780 <vscale x 8 x i32> %0,
781 <vscale x 8 x i32> %1,
783 <vscale x 8 x i1> %3,
786 ret <vscale x 8 x i32> %a
789 declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
795 define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
796 ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32:
797 ; CHECK: # %bb.0: # %entry
798 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
799 ; CHECK-NEXT: vrsub.vx v8, v8, a0
802 %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
803 <vscale x 16 x i32> undef,
804 <vscale x 16 x i32> %0,
808 ret <vscale x 16 x i32> %a
811 declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
818 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
819 ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32:
820 ; CHECK: # %bb.0: # %entry
821 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
822 ; CHECK-NEXT: vrsub.vx v8, v16, a0, v0.t
825 %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
826 <vscale x 16 x i32> %0,
827 <vscale x 16 x i32> %1,
829 <vscale x 16 x i1> %3,
832 ret <vscale x 16 x i32> %a
835 declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
841 define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
842 ; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
843 ; RV32: # %bb.0: # %entry
844 ; RV32-NEXT: addi sp, sp, -16
845 ; RV32-NEXT: sw a1, 12(sp)
846 ; RV32-NEXT: sw a0, 8(sp)
847 ; RV32-NEXT: addi a0, sp, 8
848 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
849 ; RV32-NEXT: vlse64.v v9, (a0), zero
850 ; RV32-NEXT: vsub.vv v8, v9, v8
851 ; RV32-NEXT: addi sp, sp, 16
854 ; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
855 ; RV64: # %bb.0: # %entry
856 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
857 ; RV64-NEXT: vrsub.vx v8, v8, a0
860 %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
861 <vscale x 1 x i64> undef,
862 <vscale x 1 x i64> %0,
866 ret <vscale x 1 x i64> %a
869 declare <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
876 define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
877 ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64:
878 ; RV32: # %bb.0: # %entry
879 ; RV32-NEXT: addi sp, sp, -16
880 ; RV32-NEXT: sw a1, 12(sp)
881 ; RV32-NEXT: sw a0, 8(sp)
882 ; RV32-NEXT: addi a0, sp, 8
883 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
884 ; RV32-NEXT: vlse64.v v10, (a0), zero
885 ; RV32-NEXT: vsub.vv v8, v10, v9, v0.t
886 ; RV32-NEXT: addi sp, sp, 16
889 ; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64:
890 ; RV64: # %bb.0: # %entry
891 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
892 ; RV64-NEXT: vrsub.vx v8, v9, a0, v0.t
895 %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
896 <vscale x 1 x i64> %0,
897 <vscale x 1 x i64> %1,
899 <vscale x 1 x i1> %3,
902 ret <vscale x 1 x i64> %a
905 declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
911 define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
912 ; RV32-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64:
913 ; RV32: # %bb.0: # %entry
914 ; RV32-NEXT: addi sp, sp, -16
915 ; RV32-NEXT: sw a1, 12(sp)
916 ; RV32-NEXT: sw a0, 8(sp)
917 ; RV32-NEXT: addi a0, sp, 8
918 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
919 ; RV32-NEXT: vlse64.v v10, (a0), zero
920 ; RV32-NEXT: vsub.vv v8, v10, v8
921 ; RV32-NEXT: addi sp, sp, 16
924 ; RV64-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64:
925 ; RV64: # %bb.0: # %entry
926 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
927 ; RV64-NEXT: vrsub.vx v8, v8, a0
930 %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
931 <vscale x 2 x i64> undef,
932 <vscale x 2 x i64> %0,
936 ret <vscale x 2 x i64> %a
939 declare <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
946 define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
947 ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64:
948 ; RV32: # %bb.0: # %entry
949 ; RV32-NEXT: addi sp, sp, -16
950 ; RV32-NEXT: sw a1, 12(sp)
951 ; RV32-NEXT: sw a0, 8(sp)
952 ; RV32-NEXT: addi a0, sp, 8
953 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
954 ; RV32-NEXT: vlse64.v v12, (a0), zero
955 ; RV32-NEXT: vsub.vv v8, v12, v10, v0.t
956 ; RV32-NEXT: addi sp, sp, 16
959 ; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64:
960 ; RV64: # %bb.0: # %entry
961 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
962 ; RV64-NEXT: vrsub.vx v8, v10, a0, v0.t
965 %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
966 <vscale x 2 x i64> %0,
967 <vscale x 2 x i64> %1,
969 <vscale x 2 x i1> %3,
972 ret <vscale x 2 x i64> %a
975 declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
981 define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
982 ; RV32-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64:
983 ; RV32: # %bb.0: # %entry
984 ; RV32-NEXT: addi sp, sp, -16
985 ; RV32-NEXT: sw a1, 12(sp)
986 ; RV32-NEXT: sw a0, 8(sp)
987 ; RV32-NEXT: addi a0, sp, 8
988 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
989 ; RV32-NEXT: vlse64.v v12, (a0), zero
990 ; RV32-NEXT: vsub.vv v8, v12, v8
991 ; RV32-NEXT: addi sp, sp, 16
994 ; RV64-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64:
995 ; RV64: # %bb.0: # %entry
996 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
997 ; RV64-NEXT: vrsub.vx v8, v8, a0
1000 %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
1001 <vscale x 4 x i64> undef,
1002 <vscale x 4 x i64> %0,
1006 ret <vscale x 4 x i64> %a
1009 declare <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1016 define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1017 ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64:
1018 ; RV32: # %bb.0: # %entry
1019 ; RV32-NEXT: addi sp, sp, -16
1020 ; RV32-NEXT: sw a1, 12(sp)
1021 ; RV32-NEXT: sw a0, 8(sp)
1022 ; RV32-NEXT: addi a0, sp, 8
1023 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
1024 ; RV32-NEXT: vlse64.v v16, (a0), zero
1025 ; RV32-NEXT: vsub.vv v8, v16, v12, v0.t
1026 ; RV32-NEXT: addi sp, sp, 16
1029 ; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64:
1030 ; RV64: # %bb.0: # %entry
1031 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
1032 ; RV64-NEXT: vrsub.vx v8, v12, a0, v0.t
1035 %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1036 <vscale x 4 x i64> %0,
1037 <vscale x 4 x i64> %1,
1039 <vscale x 4 x i1> %3,
1042 ret <vscale x 4 x i64> %a
1045 declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1051 define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
1052 ; RV32-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64:
1053 ; RV32: # %bb.0: # %entry
1054 ; RV32-NEXT: addi sp, sp, -16
1055 ; RV32-NEXT: sw a1, 12(sp)
1056 ; RV32-NEXT: sw a0, 8(sp)
1057 ; RV32-NEXT: addi a0, sp, 8
1058 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1059 ; RV32-NEXT: vlse64.v v16, (a0), zero
1060 ; RV32-NEXT: vsub.vv v8, v16, v8
1061 ; RV32-NEXT: addi sp, sp, 16
1064 ; RV64-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64:
1065 ; RV64: # %bb.0: # %entry
1066 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1067 ; RV64-NEXT: vrsub.vx v8, v8, a0
1070 %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1071 <vscale x 8 x i64> undef,
1072 <vscale x 8 x i64> %0,
1076 ret <vscale x 8 x i64> %a
1079 declare <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1086 define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1087 ; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64:
1088 ; RV32: # %bb.0: # %entry
1089 ; RV32-NEXT: addi sp, sp, -16
1090 ; RV32-NEXT: sw a1, 12(sp)
1091 ; RV32-NEXT: sw a0, 8(sp)
1092 ; RV32-NEXT: addi a0, sp, 8
1093 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
1094 ; RV32-NEXT: vlse64.v v24, (a0), zero
1095 ; RV32-NEXT: vsub.vv v8, v24, v16, v0.t
1096 ; RV32-NEXT: addi sp, sp, 16
1099 ; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64:
1100 ; RV64: # %bb.0: # %entry
1101 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
1102 ; RV64-NEXT: vrsub.vx v8, v16, a0, v0.t
1105 %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1106 <vscale x 8 x i64> %0,
1107 <vscale x 8 x i64> %1,
1109 <vscale x 8 x i1> %3,
1112 ret <vscale x 8 x i64> %a
1115 define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1116 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8:
1117 ; CHECK: # %bb.0: # %entry
1118 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1119 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1122 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
1123 <vscale x 1 x i8> undef,
1124 <vscale x 1 x i8> %0,
1128 ret <vscale x 1 x i8> %a
1131 define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1132 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8:
1133 ; CHECK: # %bb.0: # %entry
1134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
1135 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1138 %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
1139 <vscale x 1 x i8> %0,
1140 <vscale x 1 x i8> %1,
1142 <vscale x 1 x i1> %2,
1145 ret <vscale x 1 x i8> %a
1148 define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1149 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8:
1150 ; CHECK: # %bb.0: # %entry
1151 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1152 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1155 %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
1156 <vscale x 2 x i8> undef,
1157 <vscale x 2 x i8> %0,
1161 ret <vscale x 2 x i8> %a
1164 define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1165 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8:
1166 ; CHECK: # %bb.0: # %entry
1167 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
1168 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1171 %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
1172 <vscale x 2 x i8> %0,
1173 <vscale x 2 x i8> %1,
1175 <vscale x 2 x i1> %2,
1178 ret <vscale x 2 x i8> %a
1181 define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1182 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8:
1183 ; CHECK: # %bb.0: # %entry
1184 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1185 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1188 %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
1189 <vscale x 4 x i8> undef,
1190 <vscale x 4 x i8> %0,
1194 ret <vscale x 4 x i8> %a
1197 define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1198 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8:
1199 ; CHECK: # %bb.0: # %entry
1200 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
1201 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1204 %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
1205 <vscale x 4 x i8> %0,
1206 <vscale x 4 x i8> %1,
1208 <vscale x 4 x i1> %2,
1211 ret <vscale x 4 x i8> %a
1214 define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1215 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8:
1216 ; CHECK: # %bb.0: # %entry
1217 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1218 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1221 %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
1222 <vscale x 8 x i8> undef,
1223 <vscale x 8 x i8> %0,
1227 ret <vscale x 8 x i8> %a
1230 define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1231 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8:
1232 ; CHECK: # %bb.0: # %entry
1233 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
1234 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1237 %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
1238 <vscale x 8 x i8> %0,
1239 <vscale x 8 x i8> %1,
1241 <vscale x 8 x i1> %2,
1244 ret <vscale x 8 x i8> %a
1247 define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
1248 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8:
1249 ; CHECK: # %bb.0: # %entry
1250 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1251 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1254 %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
1255 <vscale x 16 x i8> undef,
1256 <vscale x 16 x i8> %0,
1260 ret <vscale x 16 x i8> %a
1263 define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1264 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
1267 ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t
1270 %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
1271 <vscale x 16 x i8> %0,
1272 <vscale x 16 x i8> %1,
1274 <vscale x 16 x i1> %2,
1277 ret <vscale x 16 x i8> %a
1280 define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
1281 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8:
1282 ; CHECK: # %bb.0: # %entry
1283 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1284 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1287 %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
1288 <vscale x 32 x i8> undef,
1289 <vscale x 32 x i8> %0,
1293 ret <vscale x 32 x i8> %a
1296 define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1297 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8:
1298 ; CHECK: # %bb.0: # %entry
1299 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
1300 ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t
1303 %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
1304 <vscale x 32 x i8> %0,
1305 <vscale x 32 x i8> %1,
1307 <vscale x 32 x i1> %2,
1310 ret <vscale x 32 x i8> %a
1313 define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
1314 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8:
1315 ; CHECK: # %bb.0: # %entry
1316 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1317 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1320 %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
1321 <vscale x 64 x i8> undef,
1322 <vscale x 64 x i8> %0,
1326 ret <vscale x 64 x i8> %a
1329 define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
1330 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8:
1331 ; CHECK: # %bb.0: # %entry
1332 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
1333 ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t
1336 %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
1337 <vscale x 64 x i8> %0,
1338 <vscale x 64 x i8> %1,
1340 <vscale x 64 x i1> %2,
1343 ret <vscale x 64 x i8> %a
1346 define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1347 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1350 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1353 %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
1354 <vscale x 1 x i16> undef,
1355 <vscale x 1 x i16> %0,
1359 ret <vscale x 1 x i16> %a
1362 define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1363 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16:
1364 ; CHECK: # %bb.0: # %entry
1365 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
1366 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1369 %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
1370 <vscale x 1 x i16> %0,
1371 <vscale x 1 x i16> %1,
1373 <vscale x 1 x i1> %2,
1376 ret <vscale x 1 x i16> %a
1379 define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1380 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16:
1381 ; CHECK: # %bb.0: # %entry
1382 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1383 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1386 %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
1387 <vscale x 2 x i16> undef,
1388 <vscale x 2 x i16> %0,
1392 ret <vscale x 2 x i16> %a
1395 define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1396 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16:
1397 ; CHECK: # %bb.0: # %entry
1398 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
1399 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1402 %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
1403 <vscale x 2 x i16> %0,
1404 <vscale x 2 x i16> %1,
1406 <vscale x 2 x i1> %2,
1409 ret <vscale x 2 x i16> %a
1412 define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1413 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16:
1414 ; CHECK: # %bb.0: # %entry
1415 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1416 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1419 %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
1420 <vscale x 4 x i16> undef,
1421 <vscale x 4 x i16> %0,
1425 ret <vscale x 4 x i16> %a
1428 define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1429 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16:
1430 ; CHECK: # %bb.0: # %entry
1431 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
1432 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1435 %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
1436 <vscale x 4 x i16> %0,
1437 <vscale x 4 x i16> %1,
1439 <vscale x 4 x i1> %2,
1442 ret <vscale x 4 x i16> %a
1445 define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1446 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16:
1447 ; CHECK: # %bb.0: # %entry
1448 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1449 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1452 %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
1453 <vscale x 8 x i16> undef,
1454 <vscale x 8 x i16> %0,
1458 ret <vscale x 8 x i16> %a
1461 define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1462 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16:
1463 ; CHECK: # %bb.0: # %entry
1464 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1465 ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t
1468 %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
1469 <vscale x 8 x i16> %0,
1470 <vscale x 8 x i16> %1,
1472 <vscale x 8 x i1> %2,
1475 ret <vscale x 8 x i16> %a
1478 define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1479 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16:
1480 ; CHECK: # %bb.0: # %entry
1481 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1482 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1485 %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
1486 <vscale x 16 x i16> undef,
1487 <vscale x 16 x i16> %0,
1491 ret <vscale x 16 x i16> %a
1494 define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1495 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16:
1496 ; CHECK: # %bb.0: # %entry
1497 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1498 ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t
1501 %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
1502 <vscale x 16 x i16> %0,
1503 <vscale x 16 x i16> %1,
1505 <vscale x 16 x i1> %2,
1508 ret <vscale x 16 x i16> %a
1511 define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1512 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16:
1513 ; CHECK: # %bb.0: # %entry
1514 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1515 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1518 %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
1519 <vscale x 32 x i16> undef,
1520 <vscale x 32 x i16> %0,
1524 ret <vscale x 32 x i16> %a
1527 define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1528 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
1531 ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t
1534 %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
1535 <vscale x 32 x i16> %0,
1536 <vscale x 32 x i16> %1,
1538 <vscale x 32 x i1> %2,
1541 ret <vscale x 32 x i16> %a
1544 define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1545 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32:
1546 ; CHECK: # %bb.0: # %entry
1547 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1548 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1551 %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
1552 <vscale x 1 x i32> undef,
1553 <vscale x 1 x i32> %0,
1557 ret <vscale x 1 x i32> %a
1560 define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1561 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1564 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1567 %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
1568 <vscale x 1 x i32> %0,
1569 <vscale x 1 x i32> %1,
1571 <vscale x 1 x i1> %2,
1574 ret <vscale x 1 x i32> %a
1577 define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1578 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32:
1579 ; CHECK: # %bb.0: # %entry
1580 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1581 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1584 %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
1585 <vscale x 2 x i32> undef,
1586 <vscale x 2 x i32> %0,
1590 ret <vscale x 2 x i32> %a
1593 define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1594 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32:
1595 ; CHECK: # %bb.0: # %entry
1596 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1597 ; CHECK-NEXT: vrsub.vi v8, v9, -9, v0.t
1600 %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
1601 <vscale x 2 x i32> %0,
1602 <vscale x 2 x i32> %1,
1604 <vscale x 2 x i1> %2,
1607 ret <vscale x 2 x i32> %a
1610 define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1611 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32:
1612 ; CHECK: # %bb.0: # %entry
1613 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1614 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1617 %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
1618 <vscale x 4 x i32> undef,
1619 <vscale x 4 x i32> %0,
1623 ret <vscale x 4 x i32> %a
1626 define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1627 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32:
1628 ; CHECK: # %bb.0: # %entry
1629 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1630 ; CHECK-NEXT: vrsub.vi v8, v10, -9, v0.t
1633 %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
1634 <vscale x 4 x i32> %0,
1635 <vscale x 4 x i32> %1,
1637 <vscale x 4 x i1> %2,
1640 ret <vscale x 4 x i32> %a
1643 define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1644 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32:
1645 ; CHECK: # %bb.0: # %entry
1646 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1647 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1650 %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
1651 <vscale x 8 x i32> undef,
1652 <vscale x 8 x i32> %0,
1656 ret <vscale x 8 x i32> %a
1659 define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1660 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32:
1661 ; CHECK: # %bb.0: # %entry
1662 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1663 ; CHECK-NEXT: vrsub.vi v8, v12, -9, v0.t
1666 %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
1667 <vscale x 8 x i32> %0,
1668 <vscale x 8 x i32> %1,
1670 <vscale x 8 x i1> %2,
1673 ret <vscale x 8 x i32> %a
1676 define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1677 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32:
1678 ; CHECK: # %bb.0: # %entry
1679 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1680 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1683 %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
1684 <vscale x 16 x i32> undef,
1685 <vscale x 16 x i32> %0,
1689 ret <vscale x 16 x i32> %a
1692 define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1693 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32:
1694 ; CHECK: # %bb.0: # %entry
1695 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
1696 ; CHECK-NEXT: vrsub.vi v8, v16, -9, v0.t
1699 %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
1700 <vscale x 16 x i32> %0,
1701 <vscale x 16 x i32> %1,
1703 <vscale x 16 x i1> %2,
1706 ret <vscale x 16 x i32> %a
1709 define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1710 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1713 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1716 %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1717 <vscale x 1 x i64> undef,
1718 <vscale x 1 x i64> %0,
1722 ret <vscale x 1 x i64> %a
1725 define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1726 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64:
1727 ; CHECK: # %bb.0: # %entry
1728 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1729 ; CHECK-NEXT: vrsub.vi v8, v9, 9, v0.t
1732 %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
1733 <vscale x 1 x i64> %0,
1734 <vscale x 1 x i64> %1,
1736 <vscale x 1 x i1> %2,
1739 ret <vscale x 1 x i64> %a
1742 define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1743 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64:
1744 ; CHECK: # %bb.0: # %entry
1745 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1746 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1749 %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
1750 <vscale x 2 x i64> undef,
1751 <vscale x 2 x i64> %0,
1755 ret <vscale x 2 x i64> %a
1758 define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1759 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64:
1760 ; CHECK: # %bb.0: # %entry
1761 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1762 ; CHECK-NEXT: vrsub.vi v8, v10, 9, v0.t
1765 %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
1766 <vscale x 2 x i64> %0,
1767 <vscale x 2 x i64> %1,
1769 <vscale x 2 x i1> %2,
1772 ret <vscale x 2 x i64> %a
1775 define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1776 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64:
1777 ; CHECK: # %bb.0: # %entry
1778 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1779 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1782 %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
1783 <vscale x 4 x i64> undef,
1784 <vscale x 4 x i64> %0,
1788 ret <vscale x 4 x i64> %a
1791 define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1792 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64:
1793 ; CHECK: # %bb.0: # %entry
1794 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1795 ; CHECK-NEXT: vrsub.vi v8, v12, 9, v0.t
1798 %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1799 <vscale x 4 x i64> %0,
1800 <vscale x 4 x i64> %1,
1802 <vscale x 4 x i1> %2,
1805 ret <vscale x 4 x i64> %a
1808 define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1809 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64:
1810 ; CHECK: # %bb.0: # %entry
1811 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1812 ; CHECK-NEXT: vrsub.vi v8, v8, 9
1815 %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1816 <vscale x 8 x i64> undef,
1817 <vscale x 8 x i64> %0,
1821 ret <vscale x 8 x i64> %a
1824 define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1825 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64:
1826 ; CHECK: # %bb.0: # %entry
1827 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
1828 ; CHECK-NEXT: vrsub.vi v8, v16, 9, v0.t
1831 %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1832 <vscale x 8 x i64> %0,
1833 <vscale x 8 x i64> %1,
1835 <vscale x 8 x i1> %2,
1838 ret <vscale x 8 x i64> %a