1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vsub_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vsub_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
15 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17 %v = call <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18 ret <vscale x 8 x i7> %v
21 declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
23 define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24 ; CHECK-LABEL: vsub_vv_nxv1i8:
26 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
29 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30 ret <vscale x 1 x i8> %v
33 define <vscale x 1 x i8> @vsub_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34 ; CHECK-LABEL: vsub_vv_nxv1i8_unmasked:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
37 ; CHECK-NEXT: vsub.vv v8, v8, v9
39 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40 ret <vscale x 1 x i8> %v
43 define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44 ; CHECK-LABEL: vsub_vx_nxv1i8:
46 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
47 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52 ret <vscale x 1 x i8> %v
55 define <vscale x 1 x i8> @vsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56 ; CHECK-LABEL: vsub_vx_nxv1i8_unmasked:
58 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
59 ; CHECK-NEXT: vsub.vx v8, v8, a0
61 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64 ret <vscale x 1 x i8> %v
67 declare <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
69 define <vscale x 2 x i8> @vsub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
70 ; CHECK-LABEL: vsub_vv_nxv2i8:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
73 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
75 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
76 ret <vscale x 2 x i8> %v
79 define <vscale x 2 x i8> @vsub_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
80 ; CHECK-LABEL: vsub_vv_nxv2i8_unmasked:
82 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
83 ; CHECK-NEXT: vsub.vv v8, v8, v9
85 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
86 ret <vscale x 2 x i8> %v
89 define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90 ; CHECK-LABEL: vsub_vx_nxv2i8:
92 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
93 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
95 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
96 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
97 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
98 ret <vscale x 2 x i8> %v
101 define <vscale x 2 x i8> @vsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
102 ; CHECK-LABEL: vsub_vx_nxv2i8_unmasked:
104 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
105 ; CHECK-NEXT: vsub.vx v8, v8, a0
107 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
108 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
109 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
110 ret <vscale x 2 x i8> %v
113 declare <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
115 define <vscale x 4 x i8> @vsub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
116 ; CHECK-LABEL: vsub_vv_nxv4i8:
118 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
119 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
121 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
122 ret <vscale x 4 x i8> %v
125 define <vscale x 4 x i8> @vsub_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
126 ; CHECK-LABEL: vsub_vv_nxv4i8_unmasked:
128 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
129 ; CHECK-NEXT: vsub.vv v8, v8, v9
131 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
132 ret <vscale x 4 x i8> %v
135 define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
136 ; CHECK-LABEL: vsub_vx_nxv4i8:
138 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
139 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
141 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
142 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
143 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
144 ret <vscale x 4 x i8> %v
147 define <vscale x 4 x i8> @vsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
148 ; CHECK-LABEL: vsub_vx_nxv4i8_unmasked:
150 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
151 ; CHECK-NEXT: vsub.vx v8, v8, a0
153 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
154 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
155 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
156 ret <vscale x 4 x i8> %v
159 declare <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8>, <vscale x 5 x i8>, <vscale x 5 x i1>, i32)
161 define <vscale x 5 x i8> @vsub_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
162 ; CHECK-LABEL: vsub_vv_nxv5i8:
164 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
165 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
167 %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
168 ret <vscale x 5 x i8> %v
171 define <vscale x 5 x i8> @vsub_vv_nxv5i8_unmasked(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, i32 zeroext %evl) {
172 ; CHECK-LABEL: vsub_vv_nxv5i8_unmasked:
174 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
175 ; CHECK-NEXT: vsub.vv v8, v8, v9
177 %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> splat (i1 true), i32 %evl)
178 ret <vscale x 5 x i8> %v
181 define <vscale x 5 x i8> @vsub_vx_nxv5i8(<vscale x 5 x i8> %va, i8 %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
182 ; CHECK-LABEL: vsub_vx_nxv5i8:
184 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
185 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
187 %elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
188 %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> poison, <vscale x 5 x i32> zeroinitializer
189 %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> %m, i32 %evl)
190 ret <vscale x 5 x i8> %v
193 define <vscale x 5 x i8> @vsub_vx_nxv5i8_unmasked(<vscale x 5 x i8> %va, i8 %b, i32 zeroext %evl) {
194 ; CHECK-LABEL: vsub_vx_nxv5i8_unmasked:
196 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
197 ; CHECK-NEXT: vsub.vx v8, v8, a0
199 %elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
200 %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> poison, <vscale x 5 x i32> zeroinitializer
201 %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> splat (i1 true), i32 %evl)
202 ret <vscale x 5 x i8> %v
205 declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
207 define <vscale x 8 x i8> @vsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
208 ; CHECK-LABEL: vsub_vv_nxv8i8:
210 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
211 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
213 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
214 ret <vscale x 8 x i8> %v
217 define <vscale x 8 x i8> @vsub_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
218 ; CHECK-LABEL: vsub_vv_nxv8i8_unmasked:
220 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
221 ; CHECK-NEXT: vsub.vv v8, v8, v9
223 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
224 ret <vscale x 8 x i8> %v
227 define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
228 ; CHECK-LABEL: vsub_vx_nxv8i8:
230 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
231 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
233 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
234 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
235 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
236 ret <vscale x 8 x i8> %v
239 define <vscale x 8 x i8> @vsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
240 ; CHECK-LABEL: vsub_vx_nxv8i8_unmasked:
242 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
243 ; CHECK-NEXT: vsub.vx v8, v8, a0
245 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
246 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
247 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
248 ret <vscale x 8 x i8> %v
251 declare <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
253 define <vscale x 16 x i8> @vsub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
254 ; CHECK-LABEL: vsub_vv_nxv16i8:
256 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
257 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
259 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
260 ret <vscale x 16 x i8> %v
263 define <vscale x 16 x i8> @vsub_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
264 ; CHECK-LABEL: vsub_vv_nxv16i8_unmasked:
266 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
267 ; CHECK-NEXT: vsub.vv v8, v8, v10
269 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
270 ret <vscale x 16 x i8> %v
273 define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
274 ; CHECK-LABEL: vsub_vx_nxv16i8:
276 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
277 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
279 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
280 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
281 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
282 ret <vscale x 16 x i8> %v
285 define <vscale x 16 x i8> @vsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
286 ; CHECK-LABEL: vsub_vx_nxv16i8_unmasked:
288 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
289 ; CHECK-NEXT: vsub.vx v8, v8, a0
291 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
292 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
293 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
294 ret <vscale x 16 x i8> %v
297 declare <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
299 define <vscale x 32 x i8> @vsub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
300 ; CHECK-LABEL: vsub_vv_nxv32i8:
302 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
303 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
305 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
306 ret <vscale x 32 x i8> %v
309 define <vscale x 32 x i8> @vsub_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
310 ; CHECK-LABEL: vsub_vv_nxv32i8_unmasked:
312 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
313 ; CHECK-NEXT: vsub.vv v8, v8, v12
315 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
316 ret <vscale x 32 x i8> %v
319 define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
320 ; CHECK-LABEL: vsub_vx_nxv32i8:
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
323 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
325 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
326 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
327 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
328 ret <vscale x 32 x i8> %v
331 define <vscale x 32 x i8> @vsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
332 ; CHECK-LABEL: vsub_vx_nxv32i8_unmasked:
334 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
335 ; CHECK-NEXT: vsub.vx v8, v8, a0
337 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
338 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
339 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
340 ret <vscale x 32 x i8> %v
343 declare <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
345 define <vscale x 64 x i8> @vsub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vsub_vv_nxv64i8:
348 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
349 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
351 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
352 ret <vscale x 64 x i8> %v
355 define <vscale x 64 x i8> @vsub_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
356 ; CHECK-LABEL: vsub_vv_nxv64i8_unmasked:
358 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
359 ; CHECK-NEXT: vsub.vv v8, v8, v16
361 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
362 ret <vscale x 64 x i8> %v
365 define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
366 ; CHECK-LABEL: vsub_vx_nxv64i8:
368 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
369 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
371 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
372 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
373 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
374 ret <vscale x 64 x i8> %v
377 define <vscale x 64 x i8> @vsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
378 ; CHECK-LABEL: vsub_vx_nxv64i8_unmasked:
380 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
381 ; CHECK-NEXT: vsub.vx v8, v8, a0
383 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
384 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
385 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
386 ret <vscale x 64 x i8> %v
389 declare <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
391 define <vscale x 1 x i16> @vsub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
392 ; CHECK-LABEL: vsub_vv_nxv1i16:
394 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
395 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
397 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
398 ret <vscale x 1 x i16> %v
401 define <vscale x 1 x i16> @vsub_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
402 ; CHECK-LABEL: vsub_vv_nxv1i16_unmasked:
404 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
405 ; CHECK-NEXT: vsub.vv v8, v8, v9
407 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
408 ret <vscale x 1 x i16> %v
411 define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
412 ; CHECK-LABEL: vsub_vx_nxv1i16:
414 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
415 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
417 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
418 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
419 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
420 ret <vscale x 1 x i16> %v
423 define <vscale x 1 x i16> @vsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
424 ; CHECK-LABEL: vsub_vx_nxv1i16_unmasked:
426 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
427 ; CHECK-NEXT: vsub.vx v8, v8, a0
429 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
430 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
431 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
432 ret <vscale x 1 x i16> %v
435 declare <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
437 define <vscale x 2 x i16> @vsub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
438 ; CHECK-LABEL: vsub_vv_nxv2i16:
440 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
441 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
443 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
444 ret <vscale x 2 x i16> %v
447 define <vscale x 2 x i16> @vsub_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
448 ; CHECK-LABEL: vsub_vv_nxv2i16_unmasked:
450 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
451 ; CHECK-NEXT: vsub.vv v8, v8, v9
453 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
454 ret <vscale x 2 x i16> %v
457 define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
458 ; CHECK-LABEL: vsub_vx_nxv2i16:
460 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
461 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
463 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
464 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
465 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
466 ret <vscale x 2 x i16> %v
469 define <vscale x 2 x i16> @vsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
470 ; CHECK-LABEL: vsub_vx_nxv2i16_unmasked:
472 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
473 ; CHECK-NEXT: vsub.vx v8, v8, a0
475 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
476 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
477 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
478 ret <vscale x 2 x i16> %v
481 declare <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
483 define <vscale x 4 x i16> @vsub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
484 ; CHECK-LABEL: vsub_vv_nxv4i16:
486 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
487 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
489 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
490 ret <vscale x 4 x i16> %v
493 define <vscale x 4 x i16> @vsub_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
494 ; CHECK-LABEL: vsub_vv_nxv4i16_unmasked:
496 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
497 ; CHECK-NEXT: vsub.vv v8, v8, v9
499 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
500 ret <vscale x 4 x i16> %v
503 define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
504 ; CHECK-LABEL: vsub_vx_nxv4i16:
506 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
507 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
509 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
510 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
511 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
512 ret <vscale x 4 x i16> %v
515 define <vscale x 4 x i16> @vsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
516 ; CHECK-LABEL: vsub_vx_nxv4i16_unmasked:
518 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
519 ; CHECK-NEXT: vsub.vx v8, v8, a0
521 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
522 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
523 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
524 ret <vscale x 4 x i16> %v
527 declare <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
529 define <vscale x 8 x i16> @vsub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
530 ; CHECK-LABEL: vsub_vv_nxv8i16:
532 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
533 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
535 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
536 ret <vscale x 8 x i16> %v
539 define <vscale x 8 x i16> @vsub_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
540 ; CHECK-LABEL: vsub_vv_nxv8i16_unmasked:
542 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
543 ; CHECK-NEXT: vsub.vv v8, v8, v10
545 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
546 ret <vscale x 8 x i16> %v
549 define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
550 ; CHECK-LABEL: vsub_vx_nxv8i16:
552 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
553 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
555 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
556 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
557 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
558 ret <vscale x 8 x i16> %v
561 define <vscale x 8 x i16> @vsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
562 ; CHECK-LABEL: vsub_vx_nxv8i16_unmasked:
564 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
565 ; CHECK-NEXT: vsub.vx v8, v8, a0
567 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
568 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
569 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
570 ret <vscale x 8 x i16> %v
573 declare <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
575 define <vscale x 16 x i16> @vsub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
576 ; CHECK-LABEL: vsub_vv_nxv16i16:
578 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
579 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
581 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
582 ret <vscale x 16 x i16> %v
585 define <vscale x 16 x i16> @vsub_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
586 ; CHECK-LABEL: vsub_vv_nxv16i16_unmasked:
588 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
589 ; CHECK-NEXT: vsub.vv v8, v8, v12
591 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
592 ret <vscale x 16 x i16> %v
595 define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
596 ; CHECK-LABEL: vsub_vx_nxv16i16:
598 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
599 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
601 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
602 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
603 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
604 ret <vscale x 16 x i16> %v
607 define <vscale x 16 x i16> @vsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
608 ; CHECK-LABEL: vsub_vx_nxv16i16_unmasked:
610 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
611 ; CHECK-NEXT: vsub.vx v8, v8, a0
613 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
614 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
615 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
616 ret <vscale x 16 x i16> %v
619 declare <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
621 define <vscale x 32 x i16> @vsub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
622 ; CHECK-LABEL: vsub_vv_nxv32i16:
624 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
625 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
627 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
628 ret <vscale x 32 x i16> %v
631 define <vscale x 32 x i16> @vsub_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
632 ; CHECK-LABEL: vsub_vv_nxv32i16_unmasked:
634 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
635 ; CHECK-NEXT: vsub.vv v8, v8, v16
637 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
638 ret <vscale x 32 x i16> %v
641 define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
642 ; CHECK-LABEL: vsub_vx_nxv32i16:
644 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
645 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
647 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
648 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
649 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
650 ret <vscale x 32 x i16> %v
653 define <vscale x 32 x i16> @vsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
654 ; CHECK-LABEL: vsub_vx_nxv32i16_unmasked:
656 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
657 ; CHECK-NEXT: vsub.vx v8, v8, a0
659 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
660 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
661 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
662 ret <vscale x 32 x i16> %v
665 declare <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
667 define <vscale x 1 x i32> @vsub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
668 ; CHECK-LABEL: vsub_vv_nxv1i32:
670 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
671 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
673 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
674 ret <vscale x 1 x i32> %v
677 define <vscale x 1 x i32> @vsub_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
678 ; CHECK-LABEL: vsub_vv_nxv1i32_unmasked:
680 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
681 ; CHECK-NEXT: vsub.vv v8, v8, v9
683 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
684 ret <vscale x 1 x i32> %v
687 define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
688 ; CHECK-LABEL: vsub_vx_nxv1i32:
690 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
691 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
693 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
694 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
695 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
696 ret <vscale x 1 x i32> %v
699 define <vscale x 1 x i32> @vsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
700 ; CHECK-LABEL: vsub_vx_nxv1i32_unmasked:
702 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
703 ; CHECK-NEXT: vsub.vx v8, v8, a0
705 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
706 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
707 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
708 ret <vscale x 1 x i32> %v
711 declare <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
713 define <vscale x 2 x i32> @vsub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
714 ; CHECK-LABEL: vsub_vv_nxv2i32:
716 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
717 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
719 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
720 ret <vscale x 2 x i32> %v
723 define <vscale x 2 x i32> @vsub_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
724 ; CHECK-LABEL: vsub_vv_nxv2i32_unmasked:
726 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
727 ; CHECK-NEXT: vsub.vv v8, v8, v9
729 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
730 ret <vscale x 2 x i32> %v
733 define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
734 ; CHECK-LABEL: vsub_vx_nxv2i32:
736 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
737 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
739 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
740 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
741 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
742 ret <vscale x 2 x i32> %v
745 define <vscale x 2 x i32> @vsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
746 ; CHECK-LABEL: vsub_vx_nxv2i32_unmasked:
748 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
749 ; CHECK-NEXT: vsub.vx v8, v8, a0
751 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
752 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
753 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
754 ret <vscale x 2 x i32> %v
757 declare <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
759 define <vscale x 4 x i32> @vsub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
760 ; CHECK-LABEL: vsub_vv_nxv4i32:
762 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
763 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
765 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
766 ret <vscale x 4 x i32> %v
769 define <vscale x 4 x i32> @vsub_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
770 ; CHECK-LABEL: vsub_vv_nxv4i32_unmasked:
772 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
773 ; CHECK-NEXT: vsub.vv v8, v8, v10
775 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
776 ret <vscale x 4 x i32> %v
779 define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
780 ; CHECK-LABEL: vsub_vx_nxv4i32:
782 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
783 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
785 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
786 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
787 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
788 ret <vscale x 4 x i32> %v
791 define <vscale x 4 x i32> @vsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
792 ; CHECK-LABEL: vsub_vx_nxv4i32_unmasked:
794 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
795 ; CHECK-NEXT: vsub.vx v8, v8, a0
797 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
798 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
799 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
800 ret <vscale x 4 x i32> %v
803 declare <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
805 define <vscale x 8 x i32> @vsub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
806 ; CHECK-LABEL: vsub_vv_nxv8i32:
808 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
809 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
811 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
812 ret <vscale x 8 x i32> %v
815 define <vscale x 8 x i32> @vsub_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
816 ; CHECK-LABEL: vsub_vv_nxv8i32_unmasked:
818 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
819 ; CHECK-NEXT: vsub.vv v8, v8, v12
821 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
822 ret <vscale x 8 x i32> %v
825 define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
826 ; CHECK-LABEL: vsub_vx_nxv8i32:
828 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
829 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
831 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
832 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
833 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
834 ret <vscale x 8 x i32> %v
837 define <vscale x 8 x i32> @vsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
838 ; CHECK-LABEL: vsub_vx_nxv8i32_unmasked:
840 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
841 ; CHECK-NEXT: vsub.vx v8, v8, a0
843 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
844 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
845 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
846 ret <vscale x 8 x i32> %v
849 declare <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
851 define <vscale x 16 x i32> @vsub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
852 ; CHECK-LABEL: vsub_vv_nxv16i32:
854 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
855 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
857 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
858 ret <vscale x 16 x i32> %v
861 define <vscale x 16 x i32> @vsub_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
862 ; CHECK-LABEL: vsub_vv_nxv16i32_unmasked:
864 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
865 ; CHECK-NEXT: vsub.vv v8, v8, v16
867 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
868 ret <vscale x 16 x i32> %v
871 define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
872 ; CHECK-LABEL: vsub_vx_nxv16i32:
874 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
875 ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
877 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
878 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
879 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
880 ret <vscale x 16 x i32> %v
883 define <vscale x 16 x i32> @vsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
884 ; CHECK-LABEL: vsub_vx_nxv16i32_unmasked:
886 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
887 ; CHECK-NEXT: vsub.vx v8, v8, a0
889 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
890 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
891 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
892 ret <vscale x 16 x i32> %v
895 declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
897 define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
898 ; CHECK-LABEL: vsub_vv_nxv1i64:
900 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
901 ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
903 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
904 ret <vscale x 1 x i64> %v
907 define <vscale x 1 x i64> @vsub_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
908 ; CHECK-LABEL: vsub_vv_nxv1i64_unmasked:
910 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
911 ; CHECK-NEXT: vsub.vv v8, v8, v9
913 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
914 ret <vscale x 1 x i64> %v
917 define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
918 ; RV32-LABEL: vsub_vx_nxv1i64:
920 ; RV32-NEXT: addi sp, sp, -16
921 ; RV32-NEXT: .cfi_def_cfa_offset 16
922 ; RV32-NEXT: sw a1, 12(sp)
923 ; RV32-NEXT: sw a0, 8(sp)
924 ; RV32-NEXT: addi a0, sp, 8
925 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
926 ; RV32-NEXT: vlse64.v v9, (a0), zero
927 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
928 ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
929 ; RV32-NEXT: addi sp, sp, 16
932 ; RV64-LABEL: vsub_vx_nxv1i64:
934 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
935 ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
937 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
938 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
939 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
940 ret <vscale x 1 x i64> %v
943 define <vscale x 1 x i64> @vsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
944 ; RV32-LABEL: vsub_vx_nxv1i64_unmasked:
946 ; RV32-NEXT: addi sp, sp, -16
947 ; RV32-NEXT: .cfi_def_cfa_offset 16
948 ; RV32-NEXT: sw a1, 12(sp)
949 ; RV32-NEXT: sw a0, 8(sp)
950 ; RV32-NEXT: addi a0, sp, 8
951 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
952 ; RV32-NEXT: vlse64.v v9, (a0), zero
953 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
954 ; RV32-NEXT: vsub.vv v8, v8, v9
955 ; RV32-NEXT: addi sp, sp, 16
958 ; RV64-LABEL: vsub_vx_nxv1i64_unmasked:
960 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
961 ; RV64-NEXT: vsub.vx v8, v8, a0
963 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
964 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
965 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
966 ret <vscale x 1 x i64> %v
969 declare <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
971 define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
972 ; CHECK-LABEL: vsub_vv_nxv2i64:
974 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
975 ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
977 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
978 ret <vscale x 2 x i64> %v
981 define <vscale x 2 x i64> @vsub_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
982 ; CHECK-LABEL: vsub_vv_nxv2i64_unmasked:
984 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
985 ; CHECK-NEXT: vsub.vv v8, v8, v10
987 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
988 ret <vscale x 2 x i64> %v
991 define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
992 ; RV32-LABEL: vsub_vx_nxv2i64:
994 ; RV32-NEXT: addi sp, sp, -16
995 ; RV32-NEXT: .cfi_def_cfa_offset 16
996 ; RV32-NEXT: sw a1, 12(sp)
997 ; RV32-NEXT: sw a0, 8(sp)
998 ; RV32-NEXT: addi a0, sp, 8
999 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1000 ; RV32-NEXT: vlse64.v v10, (a0), zero
1001 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1002 ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
1003 ; RV32-NEXT: addi sp, sp, 16
1006 ; RV64-LABEL: vsub_vx_nxv2i64:
1008 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1009 ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
1011 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1012 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1013 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1014 ret <vscale x 2 x i64> %v
1017 define <vscale x 2 x i64> @vsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1018 ; RV32-LABEL: vsub_vx_nxv2i64_unmasked:
1020 ; RV32-NEXT: addi sp, sp, -16
1021 ; RV32-NEXT: .cfi_def_cfa_offset 16
1022 ; RV32-NEXT: sw a1, 12(sp)
1023 ; RV32-NEXT: sw a0, 8(sp)
1024 ; RV32-NEXT: addi a0, sp, 8
1025 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1026 ; RV32-NEXT: vlse64.v v10, (a0), zero
1027 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1028 ; RV32-NEXT: vsub.vv v8, v8, v10
1029 ; RV32-NEXT: addi sp, sp, 16
1032 ; RV64-LABEL: vsub_vx_nxv2i64_unmasked:
1034 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1035 ; RV64-NEXT: vsub.vx v8, v8, a0
1037 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1038 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1039 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1040 ret <vscale x 2 x i64> %v
1043 declare <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1045 define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1046 ; CHECK-LABEL: vsub_vv_nxv4i64:
1048 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1049 ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
1051 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1052 ret <vscale x 4 x i64> %v
1055 define <vscale x 4 x i64> @vsub_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1056 ; CHECK-LABEL: vsub_vv_nxv4i64_unmasked:
1058 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1059 ; CHECK-NEXT: vsub.vv v8, v8, v12
1061 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1062 ret <vscale x 4 x i64> %v
1065 define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1066 ; RV32-LABEL: vsub_vx_nxv4i64:
1068 ; RV32-NEXT: addi sp, sp, -16
1069 ; RV32-NEXT: .cfi_def_cfa_offset 16
1070 ; RV32-NEXT: sw a1, 12(sp)
1071 ; RV32-NEXT: sw a0, 8(sp)
1072 ; RV32-NEXT: addi a0, sp, 8
1073 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1074 ; RV32-NEXT: vlse64.v v12, (a0), zero
1075 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1076 ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
1077 ; RV32-NEXT: addi sp, sp, 16
1080 ; RV64-LABEL: vsub_vx_nxv4i64:
1082 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1083 ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
1085 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1086 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1087 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1088 ret <vscale x 4 x i64> %v
1091 define <vscale x 4 x i64> @vsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1092 ; RV32-LABEL: vsub_vx_nxv4i64_unmasked:
1094 ; RV32-NEXT: addi sp, sp, -16
1095 ; RV32-NEXT: .cfi_def_cfa_offset 16
1096 ; RV32-NEXT: sw a1, 12(sp)
1097 ; RV32-NEXT: sw a0, 8(sp)
1098 ; RV32-NEXT: addi a0, sp, 8
1099 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1100 ; RV32-NEXT: vlse64.v v12, (a0), zero
1101 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1102 ; RV32-NEXT: vsub.vv v8, v8, v12
1103 ; RV32-NEXT: addi sp, sp, 16
1106 ; RV64-LABEL: vsub_vx_nxv4i64_unmasked:
1108 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1109 ; RV64-NEXT: vsub.vx v8, v8, a0
1111 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1112 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1113 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1114 ret <vscale x 4 x i64> %v
1117 declare <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1119 define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1120 ; CHECK-LABEL: vsub_vv_nxv8i64:
1122 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1123 ; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
1125 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1126 ret <vscale x 8 x i64> %v
1129 define <vscale x 8 x i64> @vsub_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1130 ; CHECK-LABEL: vsub_vv_nxv8i64_unmasked:
1132 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1133 ; CHECK-NEXT: vsub.vv v8, v8, v16
1135 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1136 ret <vscale x 8 x i64> %v
1139 define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1140 ; RV32-LABEL: vsub_vx_nxv8i64:
1142 ; RV32-NEXT: addi sp, sp, -16
1143 ; RV32-NEXT: .cfi_def_cfa_offset 16
1144 ; RV32-NEXT: sw a1, 12(sp)
1145 ; RV32-NEXT: sw a0, 8(sp)
1146 ; RV32-NEXT: addi a0, sp, 8
1147 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1148 ; RV32-NEXT: vlse64.v v16, (a0), zero
1149 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1150 ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
1151 ; RV32-NEXT: addi sp, sp, 16
1154 ; RV64-LABEL: vsub_vx_nxv8i64:
1156 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1157 ; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
1159 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1160 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1161 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1162 ret <vscale x 8 x i64> %v
1165 define <vscale x 8 x i64> @vsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1166 ; RV32-LABEL: vsub_vx_nxv8i64_unmasked:
1168 ; RV32-NEXT: addi sp, sp, -16
1169 ; RV32-NEXT: .cfi_def_cfa_offset 16
1170 ; RV32-NEXT: sw a1, 12(sp)
1171 ; RV32-NEXT: sw a0, 8(sp)
1172 ; RV32-NEXT: addi a0, sp, 8
1173 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1174 ; RV32-NEXT: vlse64.v v16, (a0), zero
1175 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1176 ; RV32-NEXT: vsub.vv v8, v8, v16
1177 ; RV32-NEXT: addi sp, sp, 16
1180 ; RV64-LABEL: vsub_vx_nxv8i64_unmasked:
1182 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1183 ; RV64-NEXT: vsub.vx v8, v8, a0
1185 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1186 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1187 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1188 ret <vscale x 8 x i64> %v