1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
9 define <vscale x 1 x i8> @vrsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vrsub_vx_nxv1i8:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
13 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
15 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
16 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
17 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
18 ret <vscale x 1 x i8> %v
21 define <vscale x 1 x i8> @vrsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
22 ; CHECK-LABEL: vrsub_vx_nxv1i8_unmasked:
24 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
25 ; CHECK-NEXT: vrsub.vx v8, v8, a0
27 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
28 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
29 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
30 ret <vscale x 1 x i8> %v
33 define <vscale x 1 x i8> @vrsub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
34 ; CHECK-LABEL: vrsub_vi_nxv1i8:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
37 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
39 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
40 ret <vscale x 1 x i8> %v
43 define <vscale x 1 x i8> @vrsub_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
44 ; CHECK-LABEL: vrsub_vi_nxv1i8_unmasked:
46 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
47 ; CHECK-NEXT: vrsub.vi v8, v8, 2
49 %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
50 ret <vscale x 1 x i8> %v
53 declare <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
55 define <vscale x 2 x i8> @vrsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
56 ; CHECK-LABEL: vrsub_vx_nxv2i8:
58 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
59 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
61 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
62 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
63 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
64 ret <vscale x 2 x i8> %v
67 define <vscale x 2 x i8> @vrsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
68 ; CHECK-LABEL: vrsub_vx_nxv2i8_unmasked:
70 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
71 ; CHECK-NEXT: vrsub.vx v8, v8, a0
73 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
74 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
75 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
76 ret <vscale x 2 x i8> %v
79 define <vscale x 2 x i8> @vrsub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
80 ; CHECK-LABEL: vrsub_vi_nxv2i8:
82 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
83 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
85 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
86 ret <vscale x 2 x i8> %v
89 define <vscale x 2 x i8> @vrsub_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
90 ; CHECK-LABEL: vrsub_vi_nxv2i8_unmasked:
92 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
93 ; CHECK-NEXT: vrsub.vi v8, v8, 2
95 %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
96 ret <vscale x 2 x i8> %v
99 declare <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
101 define <vscale x 4 x i8> @vrsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
102 ; CHECK-LABEL: vrsub_vx_nxv4i8:
104 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
105 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
107 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
108 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
109 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
110 ret <vscale x 4 x i8> %v
113 define <vscale x 4 x i8> @vrsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
114 ; CHECK-LABEL: vrsub_vx_nxv4i8_unmasked:
116 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
117 ; CHECK-NEXT: vrsub.vx v8, v8, a0
119 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
120 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
121 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
122 ret <vscale x 4 x i8> %v
125 define <vscale x 4 x i8> @vrsub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
126 ; CHECK-LABEL: vrsub_vi_nxv4i8:
128 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
129 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
131 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
132 ret <vscale x 4 x i8> %v
135 define <vscale x 4 x i8> @vrsub_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
136 ; CHECK-LABEL: vrsub_vi_nxv4i8_unmasked:
138 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
139 ; CHECK-NEXT: vrsub.vi v8, v8, 2
141 %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
142 ret <vscale x 4 x i8> %v
145 declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
147 define <vscale x 8 x i8> @vrsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
148 ; CHECK-LABEL: vrsub_vx_nxv8i8:
150 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
151 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
153 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
154 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
155 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
156 ret <vscale x 8 x i8> %v
159 define <vscale x 8 x i8> @vrsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
160 ; CHECK-LABEL: vrsub_vx_nxv8i8_unmasked:
162 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
163 ; CHECK-NEXT: vrsub.vx v8, v8, a0
165 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
166 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
167 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
168 ret <vscale x 8 x i8> %v
171 define <vscale x 8 x i8> @vrsub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
172 ; CHECK-LABEL: vrsub_vi_nxv8i8:
174 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
175 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
177 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
178 ret <vscale x 8 x i8> %v
181 define <vscale x 8 x i8> @vrsub_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
182 ; CHECK-LABEL: vrsub_vi_nxv8i8_unmasked:
184 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
185 ; CHECK-NEXT: vrsub.vi v8, v8, 2
187 %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
188 ret <vscale x 8 x i8> %v
191 declare <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
193 define <vscale x 16 x i8> @vrsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
194 ; CHECK-LABEL: vrsub_vx_nxv16i8:
196 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
197 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
199 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
200 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
201 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
202 ret <vscale x 16 x i8> %v
205 define <vscale x 16 x i8> @vrsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
206 ; CHECK-LABEL: vrsub_vx_nxv16i8_unmasked:
208 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
209 ; CHECK-NEXT: vrsub.vx v8, v8, a0
211 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
212 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
213 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
214 ret <vscale x 16 x i8> %v
217 define <vscale x 16 x i8> @vrsub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
218 ; CHECK-LABEL: vrsub_vi_nxv16i8:
220 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
221 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
223 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
224 ret <vscale x 16 x i8> %v
227 define <vscale x 16 x i8> @vrsub_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
228 ; CHECK-LABEL: vrsub_vi_nxv16i8_unmasked:
230 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
231 ; CHECK-NEXT: vrsub.vi v8, v8, 2
233 %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
234 ret <vscale x 16 x i8> %v
237 declare <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
239 define <vscale x 32 x i8> @vrsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
240 ; CHECK-LABEL: vrsub_vx_nxv32i8:
242 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
243 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
245 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
246 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
247 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
248 ret <vscale x 32 x i8> %v
251 define <vscale x 32 x i8> @vrsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
252 ; CHECK-LABEL: vrsub_vx_nxv32i8_unmasked:
254 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
255 ; CHECK-NEXT: vrsub.vx v8, v8, a0
257 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
258 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
259 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
260 ret <vscale x 32 x i8> %v
263 define <vscale x 32 x i8> @vrsub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
264 ; CHECK-LABEL: vrsub_vi_nxv32i8:
266 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
267 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
269 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
270 ret <vscale x 32 x i8> %v
273 define <vscale x 32 x i8> @vrsub_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
274 ; CHECK-LABEL: vrsub_vi_nxv32i8_unmasked:
276 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
277 ; CHECK-NEXT: vrsub.vi v8, v8, 2
279 %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
280 ret <vscale x 32 x i8> %v
283 declare <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
285 define <vscale x 64 x i8> @vrsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
286 ; CHECK-LABEL: vrsub_vx_nxv64i8:
288 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
289 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
291 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
292 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
293 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
294 ret <vscale x 64 x i8> %v
297 define <vscale x 64 x i8> @vrsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
298 ; CHECK-LABEL: vrsub_vx_nxv64i8_unmasked:
300 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
301 ; CHECK-NEXT: vrsub.vx v8, v8, a0
303 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
304 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
305 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
306 ret <vscale x 64 x i8> %v
309 define <vscale x 64 x i8> @vrsub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
310 ; CHECK-LABEL: vrsub_vi_nxv64i8:
312 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
313 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
315 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
316 ret <vscale x 64 x i8> %v
319 define <vscale x 64 x i8> @vrsub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
320 ; CHECK-LABEL: vrsub_vi_nxv64i8_unmasked:
322 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
323 ; CHECK-NEXT: vrsub.vi v8, v8, 2
325 %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
326 ret <vscale x 64 x i8> %v
329 declare <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
331 define <vscale x 1 x i16> @vrsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
332 ; CHECK-LABEL: vrsub_vx_nxv1i16:
334 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
335 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
337 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
338 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
339 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
340 ret <vscale x 1 x i16> %v
343 define <vscale x 1 x i16> @vrsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
344 ; CHECK-LABEL: vrsub_vx_nxv1i16_unmasked:
346 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
347 ; CHECK-NEXT: vrsub.vx v8, v8, a0
349 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
350 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
351 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
352 ret <vscale x 1 x i16> %v
355 define <vscale x 1 x i16> @vrsub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
356 ; CHECK-LABEL: vrsub_vi_nxv1i16:
358 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
359 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
361 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
362 ret <vscale x 1 x i16> %v
365 define <vscale x 1 x i16> @vrsub_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
366 ; CHECK-LABEL: vrsub_vi_nxv1i16_unmasked:
368 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
369 ; CHECK-NEXT: vrsub.vi v8, v8, 2
371 %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
372 ret <vscale x 1 x i16> %v
375 declare <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
377 define <vscale x 2 x i16> @vrsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
378 ; CHECK-LABEL: vrsub_vx_nxv2i16:
380 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
381 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
383 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
384 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
385 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
386 ret <vscale x 2 x i16> %v
389 define <vscale x 2 x i16> @vrsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
390 ; CHECK-LABEL: vrsub_vx_nxv2i16_unmasked:
392 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
393 ; CHECK-NEXT: vrsub.vx v8, v8, a0
395 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
396 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
397 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
398 ret <vscale x 2 x i16> %v
401 define <vscale x 2 x i16> @vrsub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
402 ; CHECK-LABEL: vrsub_vi_nxv2i16:
404 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
405 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
407 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
408 ret <vscale x 2 x i16> %v
411 define <vscale x 2 x i16> @vrsub_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
412 ; CHECK-LABEL: vrsub_vi_nxv2i16_unmasked:
414 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
415 ; CHECK-NEXT: vrsub.vi v8, v8, 2
417 %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
418 ret <vscale x 2 x i16> %v
421 declare <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
423 define <vscale x 4 x i16> @vrsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
424 ; CHECK-LABEL: vrsub_vx_nxv4i16:
426 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
427 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
429 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
430 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
431 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
432 ret <vscale x 4 x i16> %v
435 define <vscale x 4 x i16> @vrsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
436 ; CHECK-LABEL: vrsub_vx_nxv4i16_unmasked:
438 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
439 ; CHECK-NEXT: vrsub.vx v8, v8, a0
441 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
442 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
443 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
444 ret <vscale x 4 x i16> %v
447 define <vscale x 4 x i16> @vrsub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
448 ; CHECK-LABEL: vrsub_vi_nxv4i16:
450 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
451 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
453 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
454 ret <vscale x 4 x i16> %v
457 define <vscale x 4 x i16> @vrsub_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
458 ; CHECK-LABEL: vrsub_vi_nxv4i16_unmasked:
460 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
461 ; CHECK-NEXT: vrsub.vi v8, v8, 2
463 %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
464 ret <vscale x 4 x i16> %v
467 declare <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
469 define <vscale x 8 x i16> @vrsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
470 ; CHECK-LABEL: vrsub_vx_nxv8i16:
472 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
473 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
475 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
476 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
477 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
478 ret <vscale x 8 x i16> %v
481 define <vscale x 8 x i16> @vrsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
482 ; CHECK-LABEL: vrsub_vx_nxv8i16_unmasked:
484 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
485 ; CHECK-NEXT: vrsub.vx v8, v8, a0
487 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
488 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
489 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
490 ret <vscale x 8 x i16> %v
493 define <vscale x 8 x i16> @vrsub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
494 ; CHECK-LABEL: vrsub_vi_nxv8i16:
496 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
497 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
499 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
500 ret <vscale x 8 x i16> %v
503 define <vscale x 8 x i16> @vrsub_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
504 ; CHECK-LABEL: vrsub_vi_nxv8i16_unmasked:
506 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
507 ; CHECK-NEXT: vrsub.vi v8, v8, 2
509 %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
510 ret <vscale x 8 x i16> %v
513 declare <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
515 define <vscale x 16 x i16> @vrsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
516 ; CHECK-LABEL: vrsub_vx_nxv16i16:
518 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
519 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
521 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
522 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
523 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
524 ret <vscale x 16 x i16> %v
527 define <vscale x 16 x i16> @vrsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
528 ; CHECK-LABEL: vrsub_vx_nxv16i16_unmasked:
530 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
531 ; CHECK-NEXT: vrsub.vx v8, v8, a0
533 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
534 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
535 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
536 ret <vscale x 16 x i16> %v
539 define <vscale x 16 x i16> @vrsub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
540 ; CHECK-LABEL: vrsub_vi_nxv16i16:
542 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
543 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
545 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
546 ret <vscale x 16 x i16> %v
549 define <vscale x 16 x i16> @vrsub_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
550 ; CHECK-LABEL: vrsub_vi_nxv16i16_unmasked:
552 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
553 ; CHECK-NEXT: vrsub.vi v8, v8, 2
555 %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
556 ret <vscale x 16 x i16> %v
559 declare <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
561 define <vscale x 32 x i16> @vrsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
562 ; CHECK-LABEL: vrsub_vx_nxv32i16:
564 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
565 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
567 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
568 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
569 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
570 ret <vscale x 32 x i16> %v
573 define <vscale x 32 x i16> @vrsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
574 ; CHECK-LABEL: vrsub_vx_nxv32i16_unmasked:
576 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
577 ; CHECK-NEXT: vrsub.vx v8, v8, a0
579 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
580 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
581 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
582 ret <vscale x 32 x i16> %v
585 define <vscale x 32 x i16> @vrsub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
586 ; CHECK-LABEL: vrsub_vi_nxv32i16:
588 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
589 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
591 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
592 ret <vscale x 32 x i16> %v
595 define <vscale x 32 x i16> @vrsub_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
596 ; CHECK-LABEL: vrsub_vi_nxv32i16_unmasked:
598 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
599 ; CHECK-NEXT: vrsub.vi v8, v8, 2
601 %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
602 ret <vscale x 32 x i16> %v
605 declare <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
607 define <vscale x 1 x i32> @vrsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
608 ; CHECK-LABEL: vrsub_vx_nxv1i32:
610 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
611 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
613 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
614 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
615 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
616 ret <vscale x 1 x i32> %v
619 define <vscale x 1 x i32> @vrsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
620 ; CHECK-LABEL: vrsub_vx_nxv1i32_unmasked:
622 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
623 ; CHECK-NEXT: vrsub.vx v8, v8, a0
625 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
626 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
627 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
628 ret <vscale x 1 x i32> %v
631 define <vscale x 1 x i32> @vrsub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
632 ; CHECK-LABEL: vrsub_vi_nxv1i32:
634 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
635 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
637 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
638 ret <vscale x 1 x i32> %v
641 define <vscale x 1 x i32> @vrsub_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
642 ; CHECK-LABEL: vrsub_vi_nxv1i32_unmasked:
644 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
645 ; CHECK-NEXT: vrsub.vi v8, v8, 2
647 %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
648 ret <vscale x 1 x i32> %v
651 declare <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
653 define <vscale x 2 x i32> @vrsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
654 ; CHECK-LABEL: vrsub_vx_nxv2i32:
656 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
657 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
659 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
660 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
661 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
662 ret <vscale x 2 x i32> %v
665 define <vscale x 2 x i32> @vrsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
666 ; CHECK-LABEL: vrsub_vx_nxv2i32_unmasked:
668 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
669 ; CHECK-NEXT: vrsub.vx v8, v8, a0
671 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
672 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
673 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
674 ret <vscale x 2 x i32> %v
677 define <vscale x 2 x i32> @vrsub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
678 ; CHECK-LABEL: vrsub_vi_nxv2i32:
680 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
681 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
683 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
684 ret <vscale x 2 x i32> %v
687 define <vscale x 2 x i32> @vrsub_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
688 ; CHECK-LABEL: vrsub_vi_nxv2i32_unmasked:
690 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
691 ; CHECK-NEXT: vrsub.vi v8, v8, 2
693 %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
694 ret <vscale x 2 x i32> %v
697 declare <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
699 define <vscale x 4 x i32> @vrsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
700 ; CHECK-LABEL: vrsub_vx_nxv4i32:
702 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
703 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
705 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
706 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
707 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
708 ret <vscale x 4 x i32> %v
711 define <vscale x 4 x i32> @vrsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
712 ; CHECK-LABEL: vrsub_vx_nxv4i32_unmasked:
714 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
715 ; CHECK-NEXT: vrsub.vx v8, v8, a0
717 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
718 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
719 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
720 ret <vscale x 4 x i32> %v
723 define <vscale x 4 x i32> @vrsub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
724 ; CHECK-LABEL: vrsub_vi_nxv4i32:
726 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
727 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
729 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
730 ret <vscale x 4 x i32> %v
733 define <vscale x 4 x i32> @vrsub_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
734 ; CHECK-LABEL: vrsub_vi_nxv4i32_unmasked:
736 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
737 ; CHECK-NEXT: vrsub.vi v8, v8, 2
739 %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
740 ret <vscale x 4 x i32> %v
743 declare <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
745 define <vscale x 8 x i32> @vrsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
746 ; CHECK-LABEL: vrsub_vx_nxv8i32:
748 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
749 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
751 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
752 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
753 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
754 ret <vscale x 8 x i32> %v
757 define <vscale x 8 x i32> @vrsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
758 ; CHECK-LABEL: vrsub_vx_nxv8i32_unmasked:
760 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
761 ; CHECK-NEXT: vrsub.vx v8, v8, a0
763 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
764 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
765 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
766 ret <vscale x 8 x i32> %v
769 define <vscale x 8 x i32> @vrsub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
770 ; CHECK-LABEL: vrsub_vi_nxv8i32:
772 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
773 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
775 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
776 ret <vscale x 8 x i32> %v
779 define <vscale x 8 x i32> @vrsub_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
780 ; CHECK-LABEL: vrsub_vi_nxv8i32_unmasked:
782 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
783 ; CHECK-NEXT: vrsub.vi v8, v8, 2
785 %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
786 ret <vscale x 8 x i32> %v
789 declare <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
791 define <vscale x 16 x i32> @vrsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
792 ; CHECK-LABEL: vrsub_vx_nxv16i32:
794 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
795 ; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
797 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
798 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
799 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
800 ret <vscale x 16 x i32> %v
803 define <vscale x 16 x i32> @vrsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
804 ; CHECK-LABEL: vrsub_vx_nxv16i32_unmasked:
806 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
807 ; CHECK-NEXT: vrsub.vx v8, v8, a0
809 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
810 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
811 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
812 ret <vscale x 16 x i32> %v
815 define <vscale x 16 x i32> @vrsub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
816 ; CHECK-LABEL: vrsub_vi_nxv16i32:
818 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
819 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
821 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
822 ret <vscale x 16 x i32> %v
825 define <vscale x 16 x i32> @vrsub_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
826 ; CHECK-LABEL: vrsub_vi_nxv16i32_unmasked:
828 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
829 ; CHECK-NEXT: vrsub.vi v8, v8, 2
831 %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
832 ret <vscale x 16 x i32> %v
835 declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
837 define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
838 ; RV32-LABEL: vrsub_vx_nxv1i64:
840 ; RV32-NEXT: addi sp, sp, -16
841 ; RV32-NEXT: .cfi_def_cfa_offset 16
842 ; RV32-NEXT: sw a1, 12(sp)
843 ; RV32-NEXT: sw a0, 8(sp)
844 ; RV32-NEXT: addi a0, sp, 8
845 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
846 ; RV32-NEXT: vlse64.v v9, (a0), zero
847 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
848 ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t
849 ; RV32-NEXT: addi sp, sp, 16
852 ; RV64-LABEL: vrsub_vx_nxv1i64:
854 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
855 ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
857 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
858 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
859 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
860 ret <vscale x 1 x i64> %v
863 define <vscale x 1 x i64> @vrsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
864 ; RV32-LABEL: vrsub_vx_nxv1i64_unmasked:
866 ; RV32-NEXT: addi sp, sp, -16
867 ; RV32-NEXT: .cfi_def_cfa_offset 16
868 ; RV32-NEXT: sw a1, 12(sp)
869 ; RV32-NEXT: sw a0, 8(sp)
870 ; RV32-NEXT: addi a0, sp, 8
871 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
872 ; RV32-NEXT: vlse64.v v9, (a0), zero
873 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
874 ; RV32-NEXT: vsub.vv v8, v9, v8
875 ; RV32-NEXT: addi sp, sp, 16
878 ; RV64-LABEL: vrsub_vx_nxv1i64_unmasked:
880 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
881 ; RV64-NEXT: vrsub.vx v8, v8, a0
883 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
884 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
885 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
886 ret <vscale x 1 x i64> %v
889 define <vscale x 1 x i64> @vrsub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
890 ; CHECK-LABEL: vrsub_vi_nxv1i64:
892 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
893 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
895 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
896 ret <vscale x 1 x i64> %v
899 define <vscale x 1 x i64> @vrsub_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
900 ; CHECK-LABEL: vrsub_vi_nxv1i64_unmasked:
902 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
903 ; CHECK-NEXT: vrsub.vi v8, v8, 2
905 %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
906 ret <vscale x 1 x i64> %v
909 declare <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
911 define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
912 ; RV32-LABEL: vrsub_vx_nxv2i64:
914 ; RV32-NEXT: addi sp, sp, -16
915 ; RV32-NEXT: .cfi_def_cfa_offset 16
916 ; RV32-NEXT: sw a1, 12(sp)
917 ; RV32-NEXT: sw a0, 8(sp)
918 ; RV32-NEXT: addi a0, sp, 8
919 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
920 ; RV32-NEXT: vlse64.v v10, (a0), zero
921 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
922 ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t
923 ; RV32-NEXT: addi sp, sp, 16
926 ; RV64-LABEL: vrsub_vx_nxv2i64:
928 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
929 ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
931 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
932 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
933 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
934 ret <vscale x 2 x i64> %v
937 define <vscale x 2 x i64> @vrsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
938 ; RV32-LABEL: vrsub_vx_nxv2i64_unmasked:
940 ; RV32-NEXT: addi sp, sp, -16
941 ; RV32-NEXT: .cfi_def_cfa_offset 16
942 ; RV32-NEXT: sw a1, 12(sp)
943 ; RV32-NEXT: sw a0, 8(sp)
944 ; RV32-NEXT: addi a0, sp, 8
945 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
946 ; RV32-NEXT: vlse64.v v10, (a0), zero
947 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
948 ; RV32-NEXT: vsub.vv v8, v10, v8
949 ; RV32-NEXT: addi sp, sp, 16
952 ; RV64-LABEL: vrsub_vx_nxv2i64_unmasked:
954 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
955 ; RV64-NEXT: vrsub.vx v8, v8, a0
957 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
958 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
959 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
960 ret <vscale x 2 x i64> %v
963 define <vscale x 2 x i64> @vrsub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
964 ; CHECK-LABEL: vrsub_vi_nxv2i64:
966 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
967 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
969 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
970 ret <vscale x 2 x i64> %v
973 define <vscale x 2 x i64> @vrsub_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
974 ; CHECK-LABEL: vrsub_vi_nxv2i64_unmasked:
976 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
977 ; CHECK-NEXT: vrsub.vi v8, v8, 2
979 %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
980 ret <vscale x 2 x i64> %v
983 declare <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
985 define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
986 ; RV32-LABEL: vrsub_vx_nxv4i64:
988 ; RV32-NEXT: addi sp, sp, -16
989 ; RV32-NEXT: .cfi_def_cfa_offset 16
990 ; RV32-NEXT: sw a1, 12(sp)
991 ; RV32-NEXT: sw a0, 8(sp)
992 ; RV32-NEXT: addi a0, sp, 8
993 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
994 ; RV32-NEXT: vlse64.v v12, (a0), zero
995 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
996 ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t
997 ; RV32-NEXT: addi sp, sp, 16
1000 ; RV64-LABEL: vrsub_vx_nxv4i64:
1002 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1003 ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
1005 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1006 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1007 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
1008 ret <vscale x 4 x i64> %v
1011 define <vscale x 4 x i64> @vrsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1012 ; RV32-LABEL: vrsub_vx_nxv4i64_unmasked:
1014 ; RV32-NEXT: addi sp, sp, -16
1015 ; RV32-NEXT: .cfi_def_cfa_offset 16
1016 ; RV32-NEXT: sw a1, 12(sp)
1017 ; RV32-NEXT: sw a0, 8(sp)
1018 ; RV32-NEXT: addi a0, sp, 8
1019 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1020 ; RV32-NEXT: vlse64.v v12, (a0), zero
1021 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1022 ; RV32-NEXT: vsub.vv v8, v12, v8
1023 ; RV32-NEXT: addi sp, sp, 16
1026 ; RV64-LABEL: vrsub_vx_nxv4i64_unmasked:
1028 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1029 ; RV64-NEXT: vrsub.vx v8, v8, a0
1031 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1032 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1033 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1034 ret <vscale x 4 x i64> %v
1037 define <vscale x 4 x i64> @vrsub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1038 ; CHECK-LABEL: vrsub_vi_nxv4i64:
1040 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1041 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
1043 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
1044 ret <vscale x 4 x i64> %v
1047 define <vscale x 4 x i64> @vrsub_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1048 ; CHECK-LABEL: vrsub_vi_nxv4i64_unmasked:
1050 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1051 ; CHECK-NEXT: vrsub.vi v8, v8, 2
1053 %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1054 ret <vscale x 4 x i64> %v
1057 declare <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1059 define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1060 ; RV32-LABEL: vrsub_vx_nxv8i64:
1062 ; RV32-NEXT: addi sp, sp, -16
1063 ; RV32-NEXT: .cfi_def_cfa_offset 16
1064 ; RV32-NEXT: sw a1, 12(sp)
1065 ; RV32-NEXT: sw a0, 8(sp)
1066 ; RV32-NEXT: addi a0, sp, 8
1067 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1068 ; RV32-NEXT: vlse64.v v16, (a0), zero
1069 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1070 ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
1071 ; RV32-NEXT: addi sp, sp, 16
1074 ; RV64-LABEL: vrsub_vx_nxv8i64:
1076 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1077 ; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
1079 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1080 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1081 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
1082 ret <vscale x 8 x i64> %v
1085 define <vscale x 8 x i64> @vrsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1086 ; RV32-LABEL: vrsub_vx_nxv8i64_unmasked:
1088 ; RV32-NEXT: addi sp, sp, -16
1089 ; RV32-NEXT: .cfi_def_cfa_offset 16
1090 ; RV32-NEXT: sw a1, 12(sp)
1091 ; RV32-NEXT: sw a0, 8(sp)
1092 ; RV32-NEXT: addi a0, sp, 8
1093 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1094 ; RV32-NEXT: vlse64.v v16, (a0), zero
1095 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1096 ; RV32-NEXT: vsub.vv v8, v16, v8
1097 ; RV32-NEXT: addi sp, sp, 16
1100 ; RV64-LABEL: vrsub_vx_nxv8i64_unmasked:
1102 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1103 ; RV64-NEXT: vrsub.vx v8, v8, a0
1105 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1106 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1107 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1108 ret <vscale x 8 x i64> %v
1111 define <vscale x 8 x i64> @vrsub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1112 ; CHECK-LABEL: vrsub_vi_nxv8i64:
1114 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1115 ; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
1117 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
1118 ret <vscale x 8 x i64> %v
1121 define <vscale x 8 x i64> @vrsub_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1122 ; CHECK-LABEL: vrsub_vi_nxv8i64_unmasked:
1124 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1125 ; CHECK-NEXT: vrsub.vi v8, v8, 2
1127 %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1128 ret <vscale x 8 x i64> %v