1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.add.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vadd_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vadd_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
15 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17 %v = call <vscale x 8 x i7> @llvm.vp.add.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18 ret <vscale x 8 x i7> %v
21 declare <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
23 define <vscale x 1 x i8> @vadd_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24 ; CHECK-LABEL: vadd_vv_nxv1i8:
26 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
29 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30 ret <vscale x 1 x i8> %v
33 define <vscale x 1 x i8> @vadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34 ; CHECK-LABEL: vadd_vv_nxv1i8_unmasked:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
37 ; CHECK-NEXT: vadd.vv v8, v8, v9
39 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40 ret <vscale x 1 x i8> %v
43 define <vscale x 1 x i8> @vadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44 ; CHECK-LABEL: vadd_vx_nxv1i8:
46 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
47 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52 ret <vscale x 1 x i8> %v
55 define <vscale x 1 x i8> @vadd_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
56 ; CHECK-LABEL: vadd_vx_nxv1i8_commute:
58 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
59 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
61 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
64 ret <vscale x 1 x i8> %v
67 define <vscale x 1 x i8> @vadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
68 ; CHECK-LABEL: vadd_vx_nxv1i8_unmasked:
70 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
71 ; CHECK-NEXT: vadd.vx v8, v8, a0
73 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
74 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
75 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
76 ret <vscale x 1 x i8> %v
79 define <vscale x 1 x i8> @vadd_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
80 ; CHECK-LABEL: vadd_vi_nxv1i8:
82 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
83 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
85 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %m, i32 %evl)
86 ret <vscale x 1 x i8> %v
89 define <vscale x 1 x i8> @vadd_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
90 ; CHECK-LABEL: vadd_vi_nxv1i8_unmasked:
92 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
93 ; CHECK-NEXT: vadd.vi v8, v8, -1
95 %v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
96 ret <vscale x 1 x i8> %v
99 declare <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
101 define <vscale x 2 x i8> @vadd_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
102 ; CHECK-LABEL: vadd_vv_nxv2i8:
104 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
105 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
107 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
108 ret <vscale x 2 x i8> %v
111 define <vscale x 2 x i8> @vadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
112 ; CHECK-LABEL: vadd_vv_nxv2i8_unmasked:
114 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
115 ; CHECK-NEXT: vadd.vv v8, v8, v9
117 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
118 ret <vscale x 2 x i8> %v
121 define <vscale x 2 x i8> @vadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
122 ; CHECK-LABEL: vadd_vx_nxv2i8:
124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
125 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
127 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
128 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
129 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
130 ret <vscale x 2 x i8> %v
133 define <vscale x 2 x i8> @vadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
134 ; CHECK-LABEL: vadd_vx_nxv2i8_unmasked:
136 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
137 ; CHECK-NEXT: vadd.vx v8, v8, a0
139 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
140 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
141 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
142 ret <vscale x 2 x i8> %v
145 define <vscale x 2 x i8> @vadd_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
146 ; CHECK-LABEL: vadd_vi_nxv2i8:
148 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
149 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
151 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %m, i32 %evl)
152 ret <vscale x 2 x i8> %v
155 define <vscale x 2 x i8> @vadd_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
156 ; CHECK-LABEL: vadd_vi_nxv2i8_unmasked:
158 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
159 ; CHECK-NEXT: vadd.vi v8, v8, -1
161 %v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
162 ret <vscale x 2 x i8> %v
165 declare <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
167 define <vscale x 3 x i8> @vadd_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
168 ; CHECK-LABEL: vadd_vv_nxv3i8:
170 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
171 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
173 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
174 ret <vscale x 3 x i8> %v
177 define <vscale x 3 x i8> @vadd_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
178 ; CHECK-LABEL: vadd_vv_nxv3i8_unmasked:
180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
181 ; CHECK-NEXT: vadd.vv v8, v8, v9
183 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> splat (i1 true), i32 %evl)
184 ret <vscale x 3 x i8> %v
187 define <vscale x 3 x i8> @vadd_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
188 ; CHECK-LABEL: vadd_vx_nxv3i8:
190 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
191 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
193 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
194 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
195 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
196 ret <vscale x 3 x i8> %v
199 define <vscale x 3 x i8> @vadd_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
200 ; CHECK-LABEL: vadd_vx_nxv3i8_unmasked:
202 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
203 ; CHECK-NEXT: vadd.vx v8, v8, a0
205 %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
206 %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
207 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> splat (i1 true), i32 %evl)
208 ret <vscale x 3 x i8> %v
211 define <vscale x 3 x i8> @vadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
212 ; CHECK-LABEL: vadd_vi_nxv3i8:
214 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
215 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
217 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> %m, i32 %evl)
218 ret <vscale x 3 x i8> %v
221 define <vscale x 3 x i8> @vadd_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
222 ; CHECK-LABEL: vadd_vi_nxv3i8_unmasked:
224 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
225 ; CHECK-NEXT: vadd.vi v8, v8, -1
227 %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> splat (i8 -1), <vscale x 3 x i1> splat (i1 true), i32 %evl)
228 ret <vscale x 3 x i8> %v
231 declare <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
233 define <vscale x 4 x i8> @vadd_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
234 ; CHECK-LABEL: vadd_vv_nxv4i8:
236 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
237 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
239 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
240 ret <vscale x 4 x i8> %v
243 define <vscale x 4 x i8> @vadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
244 ; CHECK-LABEL: vadd_vv_nxv4i8_unmasked:
246 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
247 ; CHECK-NEXT: vadd.vv v8, v8, v9
249 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
250 ret <vscale x 4 x i8> %v
253 define <vscale x 4 x i8> @vadd_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
254 ; CHECK-LABEL: vadd_vx_nxv4i8:
256 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
257 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
259 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
260 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
261 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
262 ret <vscale x 4 x i8> %v
265 define <vscale x 4 x i8> @vadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
266 ; CHECK-LABEL: vadd_vx_nxv4i8_unmasked:
268 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
269 ; CHECK-NEXT: vadd.vx v8, v8, a0
271 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
272 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
273 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
274 ret <vscale x 4 x i8> %v
277 define <vscale x 4 x i8> @vadd_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
278 ; CHECK-LABEL: vadd_vi_nxv4i8:
280 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
281 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
283 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %m, i32 %evl)
284 ret <vscale x 4 x i8> %v
287 define <vscale x 4 x i8> @vadd_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
288 ; CHECK-LABEL: vadd_vi_nxv4i8_unmasked:
290 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
291 ; CHECK-NEXT: vadd.vi v8, v8, -1
293 %v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
294 ret <vscale x 4 x i8> %v
297 declare <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
299 define <vscale x 8 x i8> @vadd_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
300 ; CHECK-LABEL: vadd_vv_nxv8i8:
302 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
303 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
305 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
306 ret <vscale x 8 x i8> %v
309 define <vscale x 8 x i8> @vadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
310 ; CHECK-LABEL: vadd_vv_nxv8i8_unmasked:
312 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
313 ; CHECK-NEXT: vadd.vv v8, v8, v9
315 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
316 ret <vscale x 8 x i8> %v
319 define <vscale x 8 x i8> @vadd_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
320 ; CHECK-LABEL: vadd_vx_nxv8i8:
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
323 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
325 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
326 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
327 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
328 ret <vscale x 8 x i8> %v
331 define <vscale x 8 x i8> @vadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
332 ; CHECK-LABEL: vadd_vx_nxv8i8_unmasked:
334 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
335 ; CHECK-NEXT: vadd.vx v8, v8, a0
337 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
338 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
339 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
340 ret <vscale x 8 x i8> %v
343 define <vscale x 8 x i8> @vadd_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
344 ; CHECK-LABEL: vadd_vi_nxv8i8:
346 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
347 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
349 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %m, i32 %evl)
350 ret <vscale x 8 x i8> %v
353 define <vscale x 8 x i8> @vadd_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
354 ; CHECK-LABEL: vadd_vi_nxv8i8_unmasked:
356 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
357 ; CHECK-NEXT: vadd.vi v8, v8, -1
359 %v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
360 ret <vscale x 8 x i8> %v
363 declare <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
365 define <vscale x 16 x i8> @vadd_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
366 ; CHECK-LABEL: vadd_vv_nxv16i8:
368 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
369 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
371 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
372 ret <vscale x 16 x i8> %v
375 define <vscale x 16 x i8> @vadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
376 ; CHECK-LABEL: vadd_vv_nxv16i8_unmasked:
378 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
379 ; CHECK-NEXT: vadd.vv v8, v8, v10
381 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
382 ret <vscale x 16 x i8> %v
385 define <vscale x 16 x i8> @vadd_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
386 ; CHECK-LABEL: vadd_vx_nxv16i8:
388 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
389 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
391 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
392 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
393 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
394 ret <vscale x 16 x i8> %v
397 define <vscale x 16 x i8> @vadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
398 ; CHECK-LABEL: vadd_vx_nxv16i8_unmasked:
400 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
401 ; CHECK-NEXT: vadd.vx v8, v8, a0
403 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
404 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
405 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
406 ret <vscale x 16 x i8> %v
409 define <vscale x 16 x i8> @vadd_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
410 ; CHECK-LABEL: vadd_vi_nxv16i8:
412 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
413 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
415 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %m, i32 %evl)
416 ret <vscale x 16 x i8> %v
419 define <vscale x 16 x i8> @vadd_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
420 ; CHECK-LABEL: vadd_vi_nxv16i8_unmasked:
422 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
423 ; CHECK-NEXT: vadd.vi v8, v8, -1
425 %v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
426 ret <vscale x 16 x i8> %v
429 declare <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
431 define <vscale x 32 x i8> @vadd_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
432 ; CHECK-LABEL: vadd_vv_nxv32i8:
434 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
435 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
437 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
438 ret <vscale x 32 x i8> %v
441 define <vscale x 32 x i8> @vadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
442 ; CHECK-LABEL: vadd_vv_nxv32i8_unmasked:
444 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
445 ; CHECK-NEXT: vadd.vv v8, v8, v12
447 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
448 ret <vscale x 32 x i8> %v
451 define <vscale x 32 x i8> @vadd_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
452 ; CHECK-LABEL: vadd_vx_nxv32i8:
454 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
455 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
457 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
458 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
459 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
460 ret <vscale x 32 x i8> %v
463 define <vscale x 32 x i8> @vadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
464 ; CHECK-LABEL: vadd_vx_nxv32i8_unmasked:
466 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
467 ; CHECK-NEXT: vadd.vx v8, v8, a0
469 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
470 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
471 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
472 ret <vscale x 32 x i8> %v
475 define <vscale x 32 x i8> @vadd_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
476 ; CHECK-LABEL: vadd_vi_nxv32i8:
478 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
479 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
481 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %m, i32 %evl)
482 ret <vscale x 32 x i8> %v
485 define <vscale x 32 x i8> @vadd_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
486 ; CHECK-LABEL: vadd_vi_nxv32i8_unmasked:
488 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
489 ; CHECK-NEXT: vadd.vi v8, v8, -1
491 %v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
492 ret <vscale x 32 x i8> %v
495 declare <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
497 define <vscale x 64 x i8> @vadd_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
498 ; CHECK-LABEL: vadd_vv_nxv64i8:
500 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
501 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
503 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
504 ret <vscale x 64 x i8> %v
507 define <vscale x 64 x i8> @vadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
508 ; CHECK-LABEL: vadd_vv_nxv64i8_unmasked:
510 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
511 ; CHECK-NEXT: vadd.vv v8, v8, v16
513 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
514 ret <vscale x 64 x i8> %v
517 define <vscale x 64 x i8> @vadd_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
518 ; CHECK-LABEL: vadd_vx_nxv64i8:
520 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
521 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
523 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
524 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
525 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
526 ret <vscale x 64 x i8> %v
529 define <vscale x 64 x i8> @vadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
530 ; CHECK-LABEL: vadd_vx_nxv64i8_unmasked:
532 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
533 ; CHECK-NEXT: vadd.vx v8, v8, a0
535 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
536 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
537 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
538 ret <vscale x 64 x i8> %v
541 define <vscale x 64 x i8> @vadd_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
542 ; CHECK-LABEL: vadd_vi_nxv64i8:
544 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
545 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
547 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %m, i32 %evl)
548 ret <vscale x 64 x i8> %v
551 define <vscale x 64 x i8> @vadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
552 ; CHECK-LABEL: vadd_vi_nxv64i8_unmasked:
554 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
555 ; CHECK-NEXT: vadd.vi v8, v8, -1
557 %v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> splat (i1 true), i32 %evl)
558 ret <vscale x 64 x i8> %v
561 ; Test that split-legalization works when the mask itself needs splitting.
563 declare <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
565 define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
566 ; CHECK-LABEL: vadd_vi_nxv128i8:
568 ; CHECK-NEXT: vmv1r.v v24, v0
569 ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
570 ; CHECK-NEXT: vlm.v v0, (a0)
571 ; CHECK-NEXT: csrr a0, vlenb
572 ; CHECK-NEXT: slli a0, a0, 3
573 ; CHECK-NEXT: sub a2, a1, a0
574 ; CHECK-NEXT: sltu a3, a1, a2
575 ; CHECK-NEXT: addi a3, a3, -1
576 ; CHECK-NEXT: and a2, a3, a2
577 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
578 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
579 ; CHECK-NEXT: bltu a1, a0, .LBB50_2
580 ; CHECK-NEXT: # %bb.1:
581 ; CHECK-NEXT: mv a1, a0
582 ; CHECK-NEXT: .LBB50_2:
583 ; CHECK-NEXT: vmv1r.v v0, v24
584 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
585 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
587 %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
588 ret <vscale x 128 x i8> %v
591 define <vscale x 128 x i8> @vadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i32 zeroext %evl) {
592 ; CHECK-LABEL: vadd_vi_nxv128i8_unmasked:
594 ; CHECK-NEXT: csrr a1, vlenb
595 ; CHECK-NEXT: slli a1, a1, 3
596 ; CHECK-NEXT: sub a2, a0, a1
597 ; CHECK-NEXT: sltu a3, a0, a2
598 ; CHECK-NEXT: addi a3, a3, -1
599 ; CHECK-NEXT: and a2, a3, a2
600 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
601 ; CHECK-NEXT: vadd.vi v16, v16, -1
602 ; CHECK-NEXT: bltu a0, a1, .LBB51_2
603 ; CHECK-NEXT: # %bb.1:
604 ; CHECK-NEXT: mv a0, a1
605 ; CHECK-NEXT: .LBB51_2:
606 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
607 ; CHECK-NEXT: vadd.vi v8, v8, -1
609 %v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> splat (i1 true), i32 %evl)
610 ret <vscale x 128 x i8> %v
613 declare <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
615 define <vscale x 1 x i16> @vadd_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
616 ; CHECK-LABEL: vadd_vv_nxv1i16:
618 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
619 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
621 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
622 ret <vscale x 1 x i16> %v
625 define <vscale x 1 x i16> @vadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
626 ; CHECK-LABEL: vadd_vv_nxv1i16_unmasked:
628 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
629 ; CHECK-NEXT: vadd.vv v8, v8, v9
631 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
632 ret <vscale x 1 x i16> %v
635 define <vscale x 1 x i16> @vadd_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
636 ; CHECK-LABEL: vadd_vx_nxv1i16:
638 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
639 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
641 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
642 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
643 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
644 ret <vscale x 1 x i16> %v
647 define <vscale x 1 x i16> @vadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
648 ; CHECK-LABEL: vadd_vx_nxv1i16_unmasked:
650 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
651 ; CHECK-NEXT: vadd.vx v8, v8, a0
653 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
654 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
655 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
656 ret <vscale x 1 x i16> %v
659 define <vscale x 1 x i16> @vadd_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
660 ; CHECK-LABEL: vadd_vi_nxv1i16:
662 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
663 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
665 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %m, i32 %evl)
666 ret <vscale x 1 x i16> %v
669 define <vscale x 1 x i16> @vadd_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
670 ; CHECK-LABEL: vadd_vi_nxv1i16_unmasked:
672 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
673 ; CHECK-NEXT: vadd.vi v8, v8, -1
675 %v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
676 ret <vscale x 1 x i16> %v
679 declare <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
681 define <vscale x 2 x i16> @vadd_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
682 ; CHECK-LABEL: vadd_vv_nxv2i16:
684 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
685 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
687 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
688 ret <vscale x 2 x i16> %v
691 define <vscale x 2 x i16> @vadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
692 ; CHECK-LABEL: vadd_vv_nxv2i16_unmasked:
694 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
695 ; CHECK-NEXT: vadd.vv v8, v8, v9
697 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
698 ret <vscale x 2 x i16> %v
701 define <vscale x 2 x i16> @vadd_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
702 ; CHECK-LABEL: vadd_vx_nxv2i16:
704 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
705 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
707 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
708 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
709 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
710 ret <vscale x 2 x i16> %v
713 define <vscale x 2 x i16> @vadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
714 ; CHECK-LABEL: vadd_vx_nxv2i16_unmasked:
716 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
717 ; CHECK-NEXT: vadd.vx v8, v8, a0
719 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
720 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
721 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
722 ret <vscale x 2 x i16> %v
725 define <vscale x 2 x i16> @vadd_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
726 ; CHECK-LABEL: vadd_vi_nxv2i16:
728 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
729 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
731 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %m, i32 %evl)
732 ret <vscale x 2 x i16> %v
735 define <vscale x 2 x i16> @vadd_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
736 ; CHECK-LABEL: vadd_vi_nxv2i16_unmasked:
738 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
739 ; CHECK-NEXT: vadd.vi v8, v8, -1
741 %v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
742 ret <vscale x 2 x i16> %v
745 declare <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
747 define <vscale x 4 x i16> @vadd_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
748 ; CHECK-LABEL: vadd_vv_nxv4i16:
750 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
751 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
753 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
754 ret <vscale x 4 x i16> %v
757 define <vscale x 4 x i16> @vadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
758 ; CHECK-LABEL: vadd_vv_nxv4i16_unmasked:
760 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
761 ; CHECK-NEXT: vadd.vv v8, v8, v9
763 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
764 ret <vscale x 4 x i16> %v
767 define <vscale x 4 x i16> @vadd_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
768 ; CHECK-LABEL: vadd_vx_nxv4i16:
770 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
771 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
773 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
774 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
775 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
776 ret <vscale x 4 x i16> %v
779 define <vscale x 4 x i16> @vadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
780 ; CHECK-LABEL: vadd_vx_nxv4i16_unmasked:
782 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
783 ; CHECK-NEXT: vadd.vx v8, v8, a0
785 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
786 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
787 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
788 ret <vscale x 4 x i16> %v
791 define <vscale x 4 x i16> @vadd_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
792 ; CHECK-LABEL: vadd_vi_nxv4i16:
794 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
795 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
797 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %m, i32 %evl)
798 ret <vscale x 4 x i16> %v
801 define <vscale x 4 x i16> @vadd_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
802 ; CHECK-LABEL: vadd_vi_nxv4i16_unmasked:
804 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
805 ; CHECK-NEXT: vadd.vi v8, v8, -1
807 %v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
808 ret <vscale x 4 x i16> %v
811 declare <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
813 define <vscale x 8 x i16> @vadd_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
814 ; CHECK-LABEL: vadd_vv_nxv8i16:
816 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
817 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
819 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
820 ret <vscale x 8 x i16> %v
823 define <vscale x 8 x i16> @vadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
824 ; CHECK-LABEL: vadd_vv_nxv8i16_unmasked:
826 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
827 ; CHECK-NEXT: vadd.vv v8, v8, v10
829 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
830 ret <vscale x 8 x i16> %v
833 define <vscale x 8 x i16> @vadd_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
834 ; CHECK-LABEL: vadd_vx_nxv8i16:
836 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
837 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
839 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
840 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
841 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
842 ret <vscale x 8 x i16> %v
845 define <vscale x 8 x i16> @vadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
846 ; CHECK-LABEL: vadd_vx_nxv8i16_unmasked:
848 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
849 ; CHECK-NEXT: vadd.vx v8, v8, a0
851 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
852 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
853 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
854 ret <vscale x 8 x i16> %v
857 define <vscale x 8 x i16> @vadd_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
858 ; CHECK-LABEL: vadd_vi_nxv8i16:
860 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
861 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
863 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %m, i32 %evl)
864 ret <vscale x 8 x i16> %v
867 define <vscale x 8 x i16> @vadd_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
868 ; CHECK-LABEL: vadd_vi_nxv8i16_unmasked:
870 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
871 ; CHECK-NEXT: vadd.vi v8, v8, -1
873 %v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
874 ret <vscale x 8 x i16> %v
877 declare <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
879 define <vscale x 16 x i16> @vadd_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
880 ; CHECK-LABEL: vadd_vv_nxv16i16:
882 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
883 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
885 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
886 ret <vscale x 16 x i16> %v
889 define <vscale x 16 x i16> @vadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
890 ; CHECK-LABEL: vadd_vv_nxv16i16_unmasked:
892 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
893 ; CHECK-NEXT: vadd.vv v8, v8, v12
895 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
896 ret <vscale x 16 x i16> %v
899 define <vscale x 16 x i16> @vadd_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
900 ; CHECK-LABEL: vadd_vx_nxv16i16:
902 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
903 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
905 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
906 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
907 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
908 ret <vscale x 16 x i16> %v
911 define <vscale x 16 x i16> @vadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
912 ; CHECK-LABEL: vadd_vx_nxv16i16_unmasked:
914 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
915 ; CHECK-NEXT: vadd.vx v8, v8, a0
917 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
918 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
919 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
920 ret <vscale x 16 x i16> %v
923 define <vscale x 16 x i16> @vadd_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
924 ; CHECK-LABEL: vadd_vi_nxv16i16:
926 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
927 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
929 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %m, i32 %evl)
930 ret <vscale x 16 x i16> %v
933 define <vscale x 16 x i16> @vadd_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
934 ; CHECK-LABEL: vadd_vi_nxv16i16_unmasked:
936 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
937 ; CHECK-NEXT: vadd.vi v8, v8, -1
939 %v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
940 ret <vscale x 16 x i16> %v
943 declare <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
945 define <vscale x 32 x i16> @vadd_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
946 ; CHECK-LABEL: vadd_vv_nxv32i16:
948 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
949 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
951 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
952 ret <vscale x 32 x i16> %v
955 define <vscale x 32 x i16> @vadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
956 ; CHECK-LABEL: vadd_vv_nxv32i16_unmasked:
958 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
959 ; CHECK-NEXT: vadd.vv v8, v8, v16
961 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
962 ret <vscale x 32 x i16> %v
965 define <vscale x 32 x i16> @vadd_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
966 ; CHECK-LABEL: vadd_vx_nxv32i16:
968 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
969 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
971 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
972 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
973 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
974 ret <vscale x 32 x i16> %v
977 define <vscale x 32 x i16> @vadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
978 ; CHECK-LABEL: vadd_vx_nxv32i16_unmasked:
980 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
981 ; CHECK-NEXT: vadd.vx v8, v8, a0
983 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
984 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
985 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
986 ret <vscale x 32 x i16> %v
989 define <vscale x 32 x i16> @vadd_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
990 ; CHECK-LABEL: vadd_vi_nxv32i16:
992 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
993 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
995 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %m, i32 %evl)
996 ret <vscale x 32 x i16> %v
999 define <vscale x 32 x i16> @vadd_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
1000 ; CHECK-LABEL: vadd_vi_nxv32i16_unmasked:
1002 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1003 ; CHECK-NEXT: vadd.vi v8, v8, -1
1005 %v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
1006 ret <vscale x 32 x i16> %v
1009 declare <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
1011 define <vscale x 1 x i32> @vadd_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1012 ; CHECK-LABEL: vadd_vv_nxv1i32:
1014 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1015 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
1017 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
1018 ret <vscale x 1 x i32> %v
1021 define <vscale x 1 x i32> @vadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
1022 ; CHECK-LABEL: vadd_vv_nxv1i32_unmasked:
1024 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1025 ; CHECK-NEXT: vadd.vv v8, v8, v9
1027 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1028 ret <vscale x 1 x i32> %v
1031 define <vscale x 1 x i32> @vadd_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1032 ; CHECK-LABEL: vadd_vx_nxv1i32:
1034 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1035 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1037 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1038 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1039 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
1040 ret <vscale x 1 x i32> %v
1043 define <vscale x 1 x i32> @vadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
1044 ; CHECK-LABEL: vadd_vx_nxv1i32_unmasked:
1046 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1047 ; CHECK-NEXT: vadd.vx v8, v8, a0
1049 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1050 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1051 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1052 ret <vscale x 1 x i32> %v
1055 define <vscale x 1 x i32> @vadd_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1056 ; CHECK-LABEL: vadd_vi_nxv1i32:
1058 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1059 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1061 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %m, i32 %evl)
1062 ret <vscale x 1 x i32> %v
1065 define <vscale x 1 x i32> @vadd_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
1066 ; CHECK-LABEL: vadd_vi_nxv1i32_unmasked:
1068 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1069 ; CHECK-NEXT: vadd.vi v8, v8, -1
1071 %v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1072 ret <vscale x 1 x i32> %v
1075 declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1077 define <vscale x 2 x i32> @vadd_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1078 ; CHECK-LABEL: vadd_vv_nxv2i32:
1080 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1081 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
1083 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
1084 ret <vscale x 2 x i32> %v
1087 define <vscale x 2 x i32> @vadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
1088 ; CHECK-LABEL: vadd_vv_nxv2i32_unmasked:
1090 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1091 ; CHECK-NEXT: vadd.vv v8, v8, v9
1093 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1094 ret <vscale x 2 x i32> %v
1097 define <vscale x 2 x i32> @vadd_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1098 ; CHECK-LABEL: vadd_vx_nxv2i32:
1100 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1101 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1103 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1104 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1105 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
1106 ret <vscale x 2 x i32> %v
1109 define <vscale x 2 x i32> @vadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
1110 ; CHECK-LABEL: vadd_vx_nxv2i32_unmasked:
1112 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1113 ; CHECK-NEXT: vadd.vx v8, v8, a0
1115 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1116 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1117 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1118 ret <vscale x 2 x i32> %v
1121 define <vscale x 2 x i32> @vadd_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1122 ; CHECK-LABEL: vadd_vi_nxv2i32:
1124 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1125 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1127 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %m, i32 %evl)
1128 ret <vscale x 2 x i32> %v
1131 define <vscale x 2 x i32> @vadd_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
1132 ; CHECK-LABEL: vadd_vi_nxv2i32_unmasked:
1134 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1135 ; CHECK-NEXT: vadd.vi v8, v8, -1
1137 %v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1138 ret <vscale x 2 x i32> %v
1141 declare <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1143 define <vscale x 4 x i32> @vadd_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1144 ; CHECK-LABEL: vadd_vv_nxv4i32:
1146 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1147 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
1149 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
1150 ret <vscale x 4 x i32> %v
1153 define <vscale x 4 x i32> @vadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
1154 ; CHECK-LABEL: vadd_vv_nxv4i32_unmasked:
1156 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1157 ; CHECK-NEXT: vadd.vv v8, v8, v10
1159 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1160 ret <vscale x 4 x i32> %v
1163 define <vscale x 4 x i32> @vadd_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1164 ; CHECK-LABEL: vadd_vx_nxv4i32:
1166 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1167 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1169 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1170 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1171 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
1172 ret <vscale x 4 x i32> %v
1175 define <vscale x 4 x i32> @vadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
1176 ; CHECK-LABEL: vadd_vx_nxv4i32_unmasked:
1178 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1179 ; CHECK-NEXT: vadd.vx v8, v8, a0
1181 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1182 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1183 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1184 ret <vscale x 4 x i32> %v
1187 define <vscale x 4 x i32> @vadd_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1188 ; CHECK-LABEL: vadd_vi_nxv4i32:
1190 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1191 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1193 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %m, i32 %evl)
1194 ret <vscale x 4 x i32> %v
1197 define <vscale x 4 x i32> @vadd_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
1198 ; CHECK-LABEL: vadd_vi_nxv4i32_unmasked:
1200 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1201 ; CHECK-NEXT: vadd.vi v8, v8, -1
1203 %v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1204 ret <vscale x 4 x i32> %v
1207 declare <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1209 define <vscale x 8 x i32> @vadd_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1210 ; CHECK-LABEL: vadd_vv_nxv8i32:
1212 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1213 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
1215 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
1216 ret <vscale x 8 x i32> %v
1219 define <vscale x 8 x i32> @vadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
1220 ; CHECK-LABEL: vadd_vv_nxv8i32_unmasked:
1222 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1223 ; CHECK-NEXT: vadd.vv v8, v8, v12
1225 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1226 ret <vscale x 8 x i32> %v
1229 define <vscale x 8 x i32> @vadd_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1230 ; CHECK-LABEL: vadd_vx_nxv8i32:
1232 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1233 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1235 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1236 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1237 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
1238 ret <vscale x 8 x i32> %v
1241 define <vscale x 8 x i32> @vadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
1242 ; CHECK-LABEL: vadd_vx_nxv8i32_unmasked:
1244 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1245 ; CHECK-NEXT: vadd.vx v8, v8, a0
1247 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1248 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1249 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1250 ret <vscale x 8 x i32> %v
1253 define <vscale x 8 x i32> @vadd_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1254 ; CHECK-LABEL: vadd_vi_nxv8i32:
1256 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1257 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1259 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %m, i32 %evl)
1260 ret <vscale x 8 x i32> %v
1263 define <vscale x 8 x i32> @vadd_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
1264 ; CHECK-LABEL: vadd_vi_nxv8i32_unmasked:
1266 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1267 ; CHECK-NEXT: vadd.vi v8, v8, -1
1269 %v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1270 ret <vscale x 8 x i32> %v
1273 declare <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1275 define <vscale x 16 x i32> @vadd_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1276 ; CHECK-LABEL: vadd_vv_nxv16i32:
1278 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1279 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
1281 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
1282 ret <vscale x 16 x i32> %v
1285 define <vscale x 16 x i32> @vadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
1286 ; CHECK-LABEL: vadd_vv_nxv16i32_unmasked:
1288 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1289 ; CHECK-NEXT: vadd.vv v8, v8, v16
1291 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1292 ret <vscale x 16 x i32> %v
1295 define <vscale x 16 x i32> @vadd_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1296 ; CHECK-LABEL: vadd_vx_nxv16i32:
1298 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1299 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
1301 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1302 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1303 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
1304 ret <vscale x 16 x i32> %v
1307 define <vscale x 16 x i32> @vadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
1308 ; CHECK-LABEL: vadd_vx_nxv16i32_unmasked:
1310 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1311 ; CHECK-NEXT: vadd.vx v8, v8, a0
1313 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1314 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1315 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1316 ret <vscale x 16 x i32> %v
1319 define <vscale x 16 x i32> @vadd_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1320 ; CHECK-LABEL: vadd_vi_nxv16i32:
1322 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1323 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1325 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %m, i32 %evl)
1326 ret <vscale x 16 x i32> %v
1329 define <vscale x 16 x i32> @vadd_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
1330 ; CHECK-LABEL: vadd_vi_nxv16i32_unmasked:
1332 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1333 ; CHECK-NEXT: vadd.vi v8, v8, -1
1335 %v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> splat (i1 true), i32 %evl)
1336 ret <vscale x 16 x i32> %v
1339 ; Test that split-legalization works then the mask needs manual splitting.
1341 declare <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
1343 define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1344 ; CHECK-LABEL: vadd_vi_nxv32i32:
1346 ; CHECK-NEXT: vmv1r.v v24, v0
1347 ; CHECK-NEXT: csrr a1, vlenb
1348 ; CHECK-NEXT: srli a2, a1, 2
1349 ; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
1350 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
1351 ; CHECK-NEXT: slli a1, a1, 1
1352 ; CHECK-NEXT: sub a2, a0, a1
1353 ; CHECK-NEXT: sltu a3, a0, a2
1354 ; CHECK-NEXT: addi a3, a3, -1
1355 ; CHECK-NEXT: and a2, a3, a2
1356 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1357 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
1358 ; CHECK-NEXT: bltu a0, a1, .LBB118_2
1359 ; CHECK-NEXT: # %bb.1:
1360 ; CHECK-NEXT: mv a0, a1
1361 ; CHECK-NEXT: .LBB118_2:
1362 ; CHECK-NEXT: vmv1r.v v0, v24
1363 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1364 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1366 %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
1367 ret <vscale x 32 x i32> %v
1370 define <vscale x 32 x i32> @vadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
1371 ; CHECK-LABEL: vadd_vi_nxv32i32_unmasked:
1373 ; CHECK-NEXT: csrr a1, vlenb
1374 ; CHECK-NEXT: slli a1, a1, 1
1375 ; CHECK-NEXT: sub a2, a0, a1
1376 ; CHECK-NEXT: sltu a3, a0, a2
1377 ; CHECK-NEXT: addi a3, a3, -1
1378 ; CHECK-NEXT: and a2, a3, a2
1379 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1380 ; CHECK-NEXT: vadd.vi v16, v16, -1
1381 ; CHECK-NEXT: bltu a0, a1, .LBB119_2
1382 ; CHECK-NEXT: # %bb.1:
1383 ; CHECK-NEXT: mv a0, a1
1384 ; CHECK-NEXT: .LBB119_2:
1385 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1386 ; CHECK-NEXT: vadd.vi v8, v8, -1
1388 %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> splat (i1 true), i32 %evl)
1389 ret <vscale x 32 x i32> %v
1392 ; Test splitting when the %evl is a constant (albeit an unknown one).
1394 declare i32 @llvm.vscale.i32()
1396 ; FIXME: The upper half of the operation is doing nothing.
1397 ; FIXME: The branches comparing vscale vs. vscale should be constant-foldable.
1399 define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
1400 ; CHECK-LABEL: vadd_vi_nxv32i32_evl_nx8:
1402 ; CHECK-NEXT: vmv1r.v v24, v0
1403 ; CHECK-NEXT: csrr a0, vlenb
1404 ; CHECK-NEXT: srli a1, a0, 2
1405 ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1406 ; CHECK-NEXT: vslidedown.vx v0, v0, a1
1407 ; CHECK-NEXT: slli a1, a0, 1
1408 ; CHECK-NEXT: sub a2, a0, a1
1409 ; CHECK-NEXT: sltu a3, a0, a2
1410 ; CHECK-NEXT: addi a3, a3, -1
1411 ; CHECK-NEXT: and a2, a3, a2
1412 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
1413 ; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
1414 ; CHECK-NEXT: bltu a0, a1, .LBB120_2
1415 ; CHECK-NEXT: # %bb.1:
1416 ; CHECK-NEXT: mv a0, a1
1417 ; CHECK-NEXT: .LBB120_2:
1418 ; CHECK-NEXT: vmv1r.v v0, v24
1419 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1420 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1422 %evl = call i32 @llvm.vscale.i32()
1423 %evl0 = mul i32 %evl, 8
1424 %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl0)
1425 ret <vscale x 32 x i32> %v
1428 ; FIXME: The first vadd.vi should be able to infer that its AVL is equivalent to VLMAX.
1429 ; FIXME: The upper half of the operation is doing nothing but we don't catch
1430 ; that on RV64; we issue a usubsat(and (vscale x 16), 0xffffffff, vscale x 16)
1431 ; (the "original" %evl is the "and", due to known-bits issues with legalizing
1432 ; the i32 %evl to i64) and this isn't detected as 0.
1433 ; This could be resolved in the future with more detailed KnownBits analysis
1436 define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m) {
1437 ; RV32-LABEL: vadd_vi_nxv32i32_evl_nx16:
1439 ; RV32-NEXT: csrr a0, vlenb
1440 ; RV32-NEXT: slli a0, a0, 1
1441 ; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1442 ; RV32-NEXT: vadd.vi v8, v8, -1, v0.t
1445 ; RV64-LABEL: vadd_vi_nxv32i32_evl_nx16:
1447 ; RV64-NEXT: csrr a0, vlenb
1448 ; RV64-NEXT: srli a1, a0, 2
1449 ; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
1450 ; RV64-NEXT: vslidedown.vx v24, v0, a1
1451 ; RV64-NEXT: slli a0, a0, 1
1452 ; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1453 ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
1454 ; RV64-NEXT: vmv1r.v v0, v24
1455 ; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
1456 ; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
1458 %evl = call i32 @llvm.vscale.i32()
1459 %evl0 = mul i32 %evl, 16
1460 %v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl0)
1461 ret <vscale x 32 x i32> %v
1464 declare <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1466 define <vscale x 1 x i64> @vadd_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1467 ; CHECK-LABEL: vadd_vv_nxv1i64:
1469 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1470 ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
1472 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1473 ret <vscale x 1 x i64> %v
1476 define <vscale x 1 x i64> @vadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1477 ; CHECK-LABEL: vadd_vv_nxv1i64_unmasked:
1479 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1480 ; CHECK-NEXT: vadd.vv v8, v8, v9
1482 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1483 ret <vscale x 1 x i64> %v
1486 define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1487 ; RV32-LABEL: vadd_vx_nxv1i64:
1489 ; RV32-NEXT: addi sp, sp, -16
1490 ; RV32-NEXT: .cfi_def_cfa_offset 16
1491 ; RV32-NEXT: sw a1, 12(sp)
1492 ; RV32-NEXT: sw a0, 8(sp)
1493 ; RV32-NEXT: addi a0, sp, 8
1494 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1495 ; RV32-NEXT: vlse64.v v9, (a0), zero
1496 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1497 ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
1498 ; RV32-NEXT: addi sp, sp, 16
1501 ; RV64-LABEL: vadd_vx_nxv1i64:
1503 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1504 ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
1506 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1507 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1508 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1509 ret <vscale x 1 x i64> %v
1512 define <vscale x 1 x i64> @vadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1513 ; RV32-LABEL: vadd_vx_nxv1i64_unmasked:
1515 ; RV32-NEXT: addi sp, sp, -16
1516 ; RV32-NEXT: .cfi_def_cfa_offset 16
1517 ; RV32-NEXT: sw a1, 12(sp)
1518 ; RV32-NEXT: sw a0, 8(sp)
1519 ; RV32-NEXT: addi a0, sp, 8
1520 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1521 ; RV32-NEXT: vlse64.v v9, (a0), zero
1522 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1523 ; RV32-NEXT: vadd.vv v8, v8, v9
1524 ; RV32-NEXT: addi sp, sp, 16
1527 ; RV64-LABEL: vadd_vx_nxv1i64_unmasked:
1529 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1530 ; RV64-NEXT: vadd.vx v8, v8, a0
1532 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1533 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1534 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1535 ret <vscale x 1 x i64> %v
1538 define <vscale x 1 x i64> @vadd_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1539 ; CHECK-LABEL: vadd_vi_nxv1i64:
1541 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1542 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1544 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %m, i32 %evl)
1545 ret <vscale x 1 x i64> %v
1548 define <vscale x 1 x i64> @vadd_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
1549 ; CHECK-LABEL: vadd_vi_nxv1i64_unmasked:
1551 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1552 ; CHECK-NEXT: vadd.vi v8, v8, -1
1554 %v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1555 ret <vscale x 1 x i64> %v
1558 declare <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1560 define <vscale x 2 x i64> @vadd_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1561 ; CHECK-LABEL: vadd_vv_nxv2i64:
1563 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1564 ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
1566 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1567 ret <vscale x 2 x i64> %v
1570 define <vscale x 2 x i64> @vadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1571 ; CHECK-LABEL: vadd_vv_nxv2i64_unmasked:
1573 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1574 ; CHECK-NEXT: vadd.vv v8, v8, v10
1576 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1577 ret <vscale x 2 x i64> %v
1580 define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1581 ; RV32-LABEL: vadd_vx_nxv2i64:
1583 ; RV32-NEXT: addi sp, sp, -16
1584 ; RV32-NEXT: .cfi_def_cfa_offset 16
1585 ; RV32-NEXT: sw a1, 12(sp)
1586 ; RV32-NEXT: sw a0, 8(sp)
1587 ; RV32-NEXT: addi a0, sp, 8
1588 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1589 ; RV32-NEXT: vlse64.v v10, (a0), zero
1590 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1591 ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
1592 ; RV32-NEXT: addi sp, sp, 16
1595 ; RV64-LABEL: vadd_vx_nxv2i64:
1597 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1598 ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
1600 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1601 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1602 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1603 ret <vscale x 2 x i64> %v
1606 define <vscale x 2 x i64> @vadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1607 ; RV32-LABEL: vadd_vx_nxv2i64_unmasked:
1609 ; RV32-NEXT: addi sp, sp, -16
1610 ; RV32-NEXT: .cfi_def_cfa_offset 16
1611 ; RV32-NEXT: sw a1, 12(sp)
1612 ; RV32-NEXT: sw a0, 8(sp)
1613 ; RV32-NEXT: addi a0, sp, 8
1614 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1615 ; RV32-NEXT: vlse64.v v10, (a0), zero
1616 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1617 ; RV32-NEXT: vadd.vv v8, v8, v10
1618 ; RV32-NEXT: addi sp, sp, 16
1621 ; RV64-LABEL: vadd_vx_nxv2i64_unmasked:
1623 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1624 ; RV64-NEXT: vadd.vx v8, v8, a0
1626 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1627 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1628 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1629 ret <vscale x 2 x i64> %v
1632 define <vscale x 2 x i64> @vadd_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1633 ; CHECK-LABEL: vadd_vi_nxv2i64:
1635 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1636 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1638 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %m, i32 %evl)
1639 ret <vscale x 2 x i64> %v
1642 define <vscale x 2 x i64> @vadd_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
1643 ; CHECK-LABEL: vadd_vi_nxv2i64_unmasked:
1645 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1646 ; CHECK-NEXT: vadd.vi v8, v8, -1
1648 %v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1649 ret <vscale x 2 x i64> %v
1652 declare <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1654 define <vscale x 4 x i64> @vadd_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1655 ; CHECK-LABEL: vadd_vv_nxv4i64:
1657 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1658 ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
1660 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1661 ret <vscale x 4 x i64> %v
1664 define <vscale x 4 x i64> @vadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1665 ; CHECK-LABEL: vadd_vv_nxv4i64_unmasked:
1667 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1668 ; CHECK-NEXT: vadd.vv v8, v8, v12
1670 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1671 ret <vscale x 4 x i64> %v
1674 define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1675 ; RV32-LABEL: vadd_vx_nxv4i64:
1677 ; RV32-NEXT: addi sp, sp, -16
1678 ; RV32-NEXT: .cfi_def_cfa_offset 16
1679 ; RV32-NEXT: sw a1, 12(sp)
1680 ; RV32-NEXT: sw a0, 8(sp)
1681 ; RV32-NEXT: addi a0, sp, 8
1682 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1683 ; RV32-NEXT: vlse64.v v12, (a0), zero
1684 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1685 ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
1686 ; RV32-NEXT: addi sp, sp, 16
1689 ; RV64-LABEL: vadd_vx_nxv4i64:
1691 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1692 ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
1694 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1695 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1696 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1697 ret <vscale x 4 x i64> %v
1700 define <vscale x 4 x i64> @vadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1701 ; RV32-LABEL: vadd_vx_nxv4i64_unmasked:
1703 ; RV32-NEXT: addi sp, sp, -16
1704 ; RV32-NEXT: .cfi_def_cfa_offset 16
1705 ; RV32-NEXT: sw a1, 12(sp)
1706 ; RV32-NEXT: sw a0, 8(sp)
1707 ; RV32-NEXT: addi a0, sp, 8
1708 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1709 ; RV32-NEXT: vlse64.v v12, (a0), zero
1710 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1711 ; RV32-NEXT: vadd.vv v8, v8, v12
1712 ; RV32-NEXT: addi sp, sp, 16
1715 ; RV64-LABEL: vadd_vx_nxv4i64_unmasked:
1717 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1718 ; RV64-NEXT: vadd.vx v8, v8, a0
1720 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1721 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1722 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1723 ret <vscale x 4 x i64> %v
1726 define <vscale x 4 x i64> @vadd_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1727 ; CHECK-LABEL: vadd_vi_nxv4i64:
1729 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1730 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1732 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %m, i32 %evl)
1733 ret <vscale x 4 x i64> %v
1736 define <vscale x 4 x i64> @vadd_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1737 ; CHECK-LABEL: vadd_vi_nxv4i64_unmasked:
1739 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1740 ; CHECK-NEXT: vadd.vi v8, v8, -1
1742 %v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1743 ret <vscale x 4 x i64> %v
1746 declare <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1748 define <vscale x 8 x i64> @vadd_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1749 ; CHECK-LABEL: vadd_vv_nxv8i64:
1751 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1752 ; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
1754 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1755 ret <vscale x 8 x i64> %v
1758 define <vscale x 8 x i64> @vadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1759 ; CHECK-LABEL: vadd_vv_nxv8i64_unmasked:
1761 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1762 ; CHECK-NEXT: vadd.vv v8, v8, v16
1764 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1765 ret <vscale x 8 x i64> %v
1768 define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1769 ; RV32-LABEL: vadd_vx_nxv8i64:
1771 ; RV32-NEXT: addi sp, sp, -16
1772 ; RV32-NEXT: .cfi_def_cfa_offset 16
1773 ; RV32-NEXT: sw a1, 12(sp)
1774 ; RV32-NEXT: sw a0, 8(sp)
1775 ; RV32-NEXT: addi a0, sp, 8
1776 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1777 ; RV32-NEXT: vlse64.v v16, (a0), zero
1778 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1779 ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
1780 ; RV32-NEXT: addi sp, sp, 16
1783 ; RV64-LABEL: vadd_vx_nxv8i64:
1785 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1786 ; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
1788 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1789 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1790 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1791 ret <vscale x 8 x i64> %v
1794 define <vscale x 8 x i64> @vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1795 ; RV32-LABEL: vadd_vx_nxv8i64_unmasked:
1797 ; RV32-NEXT: addi sp, sp, -16
1798 ; RV32-NEXT: .cfi_def_cfa_offset 16
1799 ; RV32-NEXT: sw a1, 12(sp)
1800 ; RV32-NEXT: sw a0, 8(sp)
1801 ; RV32-NEXT: addi a0, sp, 8
1802 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1803 ; RV32-NEXT: vlse64.v v16, (a0), zero
1804 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1805 ; RV32-NEXT: vadd.vv v8, v8, v16
1806 ; RV32-NEXT: addi sp, sp, 16
1809 ; RV64-LABEL: vadd_vx_nxv8i64_unmasked:
1811 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1812 ; RV64-NEXT: vadd.vx v8, v8, a0
1814 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1815 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1816 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1817 ret <vscale x 8 x i64> %v
1820 define <vscale x 8 x i64> @vadd_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1821 ; CHECK-LABEL: vadd_vi_nxv8i64:
1823 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1824 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
1826 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %m, i32 %evl)
1827 ret <vscale x 8 x i64> %v
1830 define <vscale x 8 x i64> @vadd_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1831 ; CHECK-LABEL: vadd_vi_nxv8i64_unmasked:
1833 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1834 ; CHECK-NEXT: vadd.vi v8, v8, -1
1836 %v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1837 ret <vscale x 8 x i64> %v