1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vand_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vand_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
15 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17 %v = call <vscale x 8 x i7> @llvm.vp.and.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18 ret <vscale x 8 x i7> %v
21 declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
23 define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24 ; CHECK-LABEL: vand_vv_nxv1i8:
26 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
29 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30 ret <vscale x 1 x i8> %v
33 define <vscale x 1 x i8> @vand_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34 ; CHECK-LABEL: vand_vv_nxv1i8_unmasked:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
37 ; CHECK-NEXT: vand.vv v8, v8, v9
39 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40 ret <vscale x 1 x i8> %v
43 define <vscale x 1 x i8> @vand_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44 ; CHECK-LABEL: vand_vx_nxv1i8:
46 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
47 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52 ret <vscale x 1 x i8> %v
55 define <vscale x 1 x i8> @vand_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56 ; CHECK-LABEL: vand_vx_nxv1i8_unmasked:
58 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
59 ; CHECK-NEXT: vand.vx v8, v8, a0
61 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64 ret <vscale x 1 x i8> %v
67 define <vscale x 1 x i8> @vand_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
68 ; CHECK-LABEL: vand_vi_nxv1i8:
70 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
71 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
73 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> %m, i32 %evl)
74 ret <vscale x 1 x i8> %v
77 define <vscale x 1 x i8> @vand_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
78 ; CHECK-LABEL: vand_vi_nxv1i8_unmasked:
80 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
81 ; CHECK-NEXT: vand.vi v8, v8, 4
83 %v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> splat (i8 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
84 ret <vscale x 1 x i8> %v
87 declare <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
89 define <vscale x 2 x i8> @vand_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90 ; CHECK-LABEL: vand_vv_nxv2i8:
92 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
93 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
95 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
96 ret <vscale x 2 x i8> %v
99 define <vscale x 2 x i8> @vand_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
100 ; CHECK-LABEL: vand_vv_nxv2i8_unmasked:
102 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
103 ; CHECK-NEXT: vand.vv v8, v8, v9
105 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
106 ret <vscale x 2 x i8> %v
109 define <vscale x 2 x i8> @vand_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
110 ; CHECK-LABEL: vand_vx_nxv2i8:
112 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
113 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
115 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
116 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
117 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
118 ret <vscale x 2 x i8> %v
121 define <vscale x 2 x i8> @vand_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
122 ; CHECK-LABEL: vand_vx_nxv2i8_unmasked:
124 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
125 ; CHECK-NEXT: vand.vx v8, v8, a0
127 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
128 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
129 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
130 ret <vscale x 2 x i8> %v
133 define <vscale x 2 x i8> @vand_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
134 ; CHECK-LABEL: vand_vi_nxv2i8:
136 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
137 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
139 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> %m, i32 %evl)
140 ret <vscale x 2 x i8> %v
143 define <vscale x 2 x i8> @vand_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
144 ; CHECK-LABEL: vand_vi_nxv2i8_unmasked:
146 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
147 ; CHECK-NEXT: vand.vi v8, v8, 4
149 %v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> splat (i8 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
150 ret <vscale x 2 x i8> %v
153 declare <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
155 define <vscale x 4 x i8> @vand_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
156 ; CHECK-LABEL: vand_vv_nxv4i8:
158 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
159 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
161 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
162 ret <vscale x 4 x i8> %v
165 define <vscale x 4 x i8> @vand_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
166 ; CHECK-LABEL: vand_vv_nxv4i8_unmasked:
168 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
169 ; CHECK-NEXT: vand.vv v8, v8, v9
171 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
172 ret <vscale x 4 x i8> %v
175 define <vscale x 4 x i8> @vand_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
176 ; CHECK-LABEL: vand_vx_nxv4i8:
178 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
179 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
181 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
182 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
183 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
184 ret <vscale x 4 x i8> %v
187 define <vscale x 4 x i8> @vand_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
188 ; CHECK-LABEL: vand_vx_nxv4i8_unmasked:
190 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
191 ; CHECK-NEXT: vand.vx v8, v8, a0
193 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
194 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
195 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
196 ret <vscale x 4 x i8> %v
199 define <vscale x 4 x i8> @vand_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
200 ; CHECK-LABEL: vand_vi_nxv4i8:
202 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
203 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
205 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> %m, i32 %evl)
206 ret <vscale x 4 x i8> %v
209 define <vscale x 4 x i8> @vand_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
210 ; CHECK-LABEL: vand_vi_nxv4i8_unmasked:
212 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
213 ; CHECK-NEXT: vand.vi v8, v8, 4
215 %v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> splat (i8 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
216 ret <vscale x 4 x i8> %v
219 declare <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
221 define <vscale x 8 x i8> @vand_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
222 ; CHECK-LABEL: vand_vv_nxv8i8:
224 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
225 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
227 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
228 ret <vscale x 8 x i8> %v
231 define <vscale x 8 x i8> @vand_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
232 ; CHECK-LABEL: vand_vv_nxv8i8_unmasked:
234 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
235 ; CHECK-NEXT: vand.vv v8, v8, v9
237 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
238 ret <vscale x 8 x i8> %v
241 define <vscale x 8 x i8> @vand_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
242 ; CHECK-LABEL: vand_vx_nxv8i8:
244 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
245 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
247 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
248 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
249 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
250 ret <vscale x 8 x i8> %v
253 define <vscale x 8 x i8> @vand_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
254 ; CHECK-LABEL: vand_vx_nxv8i8_unmasked:
256 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
257 ; CHECK-NEXT: vand.vx v8, v8, a0
259 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
260 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
261 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
262 ret <vscale x 8 x i8> %v
265 define <vscale x 8 x i8> @vand_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
266 ; CHECK-LABEL: vand_vi_nxv8i8:
268 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
269 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
271 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> %m, i32 %evl)
272 ret <vscale x 8 x i8> %v
275 define <vscale x 8 x i8> @vand_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
276 ; CHECK-LABEL: vand_vi_nxv8i8_unmasked:
278 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
279 ; CHECK-NEXT: vand.vi v8, v8, 4
281 %v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> splat (i8 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
282 ret <vscale x 8 x i8> %v
285 declare <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
287 define <vscale x 16 x i8> @vand_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
288 ; CHECK-LABEL: vand_vv_nxv16i8:
290 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
291 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
293 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
294 ret <vscale x 16 x i8> %v
297 define <vscale x 16 x i8> @vand_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
298 ; CHECK-LABEL: vand_vv_nxv16i8_unmasked:
300 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
301 ; CHECK-NEXT: vand.vv v8, v8, v10
303 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
304 ret <vscale x 16 x i8> %v
307 define <vscale x 16 x i8> @vand_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
308 ; CHECK-LABEL: vand_vx_nxv16i8:
310 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
311 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
313 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
314 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
315 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
316 ret <vscale x 16 x i8> %v
319 define <vscale x 16 x i8> @vand_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
320 ; CHECK-LABEL: vand_vx_nxv16i8_unmasked:
322 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
323 ; CHECK-NEXT: vand.vx v8, v8, a0
325 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
326 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
327 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
328 ret <vscale x 16 x i8> %v
331 define <vscale x 16 x i8> @vand_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
332 ; CHECK-LABEL: vand_vi_nxv16i8:
334 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
335 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
337 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> %m, i32 %evl)
338 ret <vscale x 16 x i8> %v
341 define <vscale x 16 x i8> @vand_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
342 ; CHECK-LABEL: vand_vi_nxv16i8_unmasked:
344 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
345 ; CHECK-NEXT: vand.vi v8, v8, 4
347 %v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> splat (i8 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
348 ret <vscale x 16 x i8> %v
351 declare <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
353 define <vscale x 32 x i8> @vand_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
354 ; CHECK-LABEL: vand_vv_nxv32i8:
356 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
357 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
359 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
360 ret <vscale x 32 x i8> %v
363 define <vscale x 32 x i8> @vand_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
364 ; CHECK-LABEL: vand_vv_nxv32i8_unmasked:
366 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
367 ; CHECK-NEXT: vand.vv v8, v8, v12
369 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
370 ret <vscale x 32 x i8> %v
373 define <vscale x 32 x i8> @vand_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
374 ; CHECK-LABEL: vand_vx_nxv32i8:
376 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
377 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
379 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
380 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
381 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
382 ret <vscale x 32 x i8> %v
385 define <vscale x 32 x i8> @vand_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
386 ; CHECK-LABEL: vand_vx_nxv32i8_unmasked:
388 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
389 ; CHECK-NEXT: vand.vx v8, v8, a0
391 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
392 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
393 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
394 ret <vscale x 32 x i8> %v
397 define <vscale x 32 x i8> @vand_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
398 ; CHECK-LABEL: vand_vi_nxv32i8:
400 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
401 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
403 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> %m, i32 %evl)
404 ret <vscale x 32 x i8> %v
407 define <vscale x 32 x i8> @vand_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
408 ; CHECK-LABEL: vand_vi_nxv32i8_unmasked:
410 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
411 ; CHECK-NEXT: vand.vi v8, v8, 4
413 %v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> splat (i8 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
414 ret <vscale x 32 x i8> %v
417 declare <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
419 define <vscale x 64 x i8> @vand_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
420 ; CHECK-LABEL: vand_vv_nxv64i8:
422 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
423 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
425 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
426 ret <vscale x 64 x i8> %v
429 define <vscale x 64 x i8> @vand_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
430 ; CHECK-LABEL: vand_vv_nxv64i8_unmasked:
432 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
433 ; CHECK-NEXT: vand.vv v8, v8, v16
435 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
436 ret <vscale x 64 x i8> %v
439 define <vscale x 64 x i8> @vand_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
440 ; CHECK-LABEL: vand_vx_nxv64i8:
442 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
443 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
445 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
446 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
447 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
448 ret <vscale x 64 x i8> %v
451 define <vscale x 64 x i8> @vand_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
452 ; CHECK-LABEL: vand_vx_nxv64i8_unmasked:
454 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
455 ; CHECK-NEXT: vand.vx v8, v8, a0
457 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
458 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
459 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
460 ret <vscale x 64 x i8> %v
463 define <vscale x 64 x i8> @vand_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
464 ; CHECK-LABEL: vand_vi_nxv64i8:
466 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
467 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
469 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> %m, i32 %evl)
470 ret <vscale x 64 x i8> %v
473 define <vscale x 64 x i8> @vand_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
474 ; CHECK-LABEL: vand_vi_nxv64i8_unmasked:
476 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
477 ; CHECK-NEXT: vand.vi v8, v8, 4
479 %v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> splat (i8 4), <vscale x 64 x i1> splat (i1 true), i32 %evl)
480 ret <vscale x 64 x i8> %v
483 declare <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
485 define <vscale x 1 x i16> @vand_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
486 ; CHECK-LABEL: vand_vv_nxv1i16:
488 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
489 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
491 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
492 ret <vscale x 1 x i16> %v
495 define <vscale x 1 x i16> @vand_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
496 ; CHECK-LABEL: vand_vv_nxv1i16_unmasked:
498 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
499 ; CHECK-NEXT: vand.vv v8, v8, v9
501 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
502 ret <vscale x 1 x i16> %v
505 define <vscale x 1 x i16> @vand_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
506 ; CHECK-LABEL: vand_vx_nxv1i16:
508 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
509 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
511 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
512 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
513 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
514 ret <vscale x 1 x i16> %v
517 define <vscale x 1 x i16> @vand_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
518 ; CHECK-LABEL: vand_vx_nxv1i16_unmasked:
520 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
521 ; CHECK-NEXT: vand.vx v8, v8, a0
523 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
524 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
525 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
526 ret <vscale x 1 x i16> %v
529 define <vscale x 1 x i16> @vand_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
530 ; CHECK-LABEL: vand_vi_nxv1i16:
532 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
533 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
535 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> %m, i32 %evl)
536 ret <vscale x 1 x i16> %v
539 define <vscale x 1 x i16> @vand_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
540 ; CHECK-LABEL: vand_vi_nxv1i16_unmasked:
542 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
543 ; CHECK-NEXT: vand.vi v8, v8, 4
545 %v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> splat (i16 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
546 ret <vscale x 1 x i16> %v
549 declare <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
551 define <vscale x 2 x i16> @vand_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
552 ; CHECK-LABEL: vand_vv_nxv2i16:
554 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
555 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
557 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
558 ret <vscale x 2 x i16> %v
561 define <vscale x 2 x i16> @vand_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
562 ; CHECK-LABEL: vand_vv_nxv2i16_unmasked:
564 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
565 ; CHECK-NEXT: vand.vv v8, v8, v9
567 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
568 ret <vscale x 2 x i16> %v
571 define <vscale x 2 x i16> @vand_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
572 ; CHECK-LABEL: vand_vx_nxv2i16:
574 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
575 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
577 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
578 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
579 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
580 ret <vscale x 2 x i16> %v
583 define <vscale x 2 x i16> @vand_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
584 ; CHECK-LABEL: vand_vx_nxv2i16_unmasked:
586 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
587 ; CHECK-NEXT: vand.vx v8, v8, a0
589 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
590 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
591 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
592 ret <vscale x 2 x i16> %v
595 define <vscale x 2 x i16> @vand_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
596 ; CHECK-LABEL: vand_vi_nxv2i16:
598 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
599 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
601 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> %m, i32 %evl)
602 ret <vscale x 2 x i16> %v
605 define <vscale x 2 x i16> @vand_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
606 ; CHECK-LABEL: vand_vi_nxv2i16_unmasked:
608 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
609 ; CHECK-NEXT: vand.vi v8, v8, 4
611 %v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> splat (i16 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
612 ret <vscale x 2 x i16> %v
615 declare <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
617 define <vscale x 4 x i16> @vand_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
618 ; CHECK-LABEL: vand_vv_nxv4i16:
620 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
621 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
623 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
624 ret <vscale x 4 x i16> %v
627 define <vscale x 4 x i16> @vand_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
628 ; CHECK-LABEL: vand_vv_nxv4i16_unmasked:
630 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
631 ; CHECK-NEXT: vand.vv v8, v8, v9
633 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
634 ret <vscale x 4 x i16> %v
637 define <vscale x 4 x i16> @vand_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
638 ; CHECK-LABEL: vand_vx_nxv4i16:
640 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
641 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
643 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
644 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
645 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
646 ret <vscale x 4 x i16> %v
649 define <vscale x 4 x i16> @vand_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
650 ; CHECK-LABEL: vand_vx_nxv4i16_unmasked:
652 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
653 ; CHECK-NEXT: vand.vx v8, v8, a0
655 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
656 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
657 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
658 ret <vscale x 4 x i16> %v
661 define <vscale x 4 x i16> @vand_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
662 ; CHECK-LABEL: vand_vi_nxv4i16:
664 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
665 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
667 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> %m, i32 %evl)
668 ret <vscale x 4 x i16> %v
671 define <vscale x 4 x i16> @vand_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
672 ; CHECK-LABEL: vand_vi_nxv4i16_unmasked:
674 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
675 ; CHECK-NEXT: vand.vi v8, v8, 4
677 %v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> splat (i16 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
678 ret <vscale x 4 x i16> %v
681 declare <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
683 define <vscale x 8 x i16> @vand_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
684 ; CHECK-LABEL: vand_vv_nxv8i16:
686 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
687 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
689 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
690 ret <vscale x 8 x i16> %v
693 define <vscale x 8 x i16> @vand_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
694 ; CHECK-LABEL: vand_vv_nxv8i16_unmasked:
696 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
697 ; CHECK-NEXT: vand.vv v8, v8, v10
699 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
700 ret <vscale x 8 x i16> %v
703 define <vscale x 8 x i16> @vand_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
704 ; CHECK-LABEL: vand_vx_nxv8i16:
706 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
707 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
709 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
710 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
711 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
712 ret <vscale x 8 x i16> %v
715 define <vscale x 8 x i16> @vand_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
716 ; CHECK-LABEL: vand_vx_nxv8i16_unmasked:
718 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
719 ; CHECK-NEXT: vand.vx v8, v8, a0
721 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
722 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
723 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
724 ret <vscale x 8 x i16> %v
727 define <vscale x 8 x i16> @vand_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
728 ; CHECK-LABEL: vand_vi_nxv8i16:
730 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
731 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
733 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> %m, i32 %evl)
734 ret <vscale x 8 x i16> %v
737 define <vscale x 8 x i16> @vand_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
738 ; CHECK-LABEL: vand_vi_nxv8i16_unmasked:
740 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
741 ; CHECK-NEXT: vand.vi v8, v8, 4
743 %v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> splat (i16 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
744 ret <vscale x 8 x i16> %v
747 declare <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16>, <vscale x 14 x i16>, <vscale x 14 x i1>, i32)
749 define <vscale x 14 x i16> @vand_vv_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
750 ; CHECK-LABEL: vand_vv_nxv14i16:
752 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
753 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
755 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
756 ret <vscale x 14 x i16> %v
759 define <vscale x 14 x i16> @vand_vv_nxv14i16_unmasked(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, i32 zeroext %evl) {
760 ; CHECK-LABEL: vand_vv_nxv14i16_unmasked:
762 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
763 ; CHECK-NEXT: vand.vv v8, v8, v12
765 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> splat (i1 true), i32 %evl)
766 ret <vscale x 14 x i16> %v
769 define <vscale x 14 x i16> @vand_vx_nxv14i16(<vscale x 14 x i16> %va, i16 %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
770 ; CHECK-LABEL: vand_vx_nxv14i16:
772 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
773 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
775 %elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
776 %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
777 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
778 ret <vscale x 14 x i16> %v
781 define <vscale x 14 x i16> @vand_vx_nxv14i16_unmasked(<vscale x 14 x i16> %va, i16 %b, i32 zeroext %evl) {
782 ; CHECK-LABEL: vand_vx_nxv14i16_unmasked:
784 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
785 ; CHECK-NEXT: vand.vx v8, v8, a0
787 %elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
788 %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> poison, <vscale x 14 x i32> zeroinitializer
789 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> splat (i1 true), i32 %evl)
790 ret <vscale x 14 x i16> %v
793 define <vscale x 14 x i16> @vand_vi_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i1> %m, i32 zeroext %evl) {
794 ; CHECK-LABEL: vand_vi_nxv14i16:
796 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
797 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
799 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> %m, i32 %evl)
800 ret <vscale x 14 x i16> %v
803 define <vscale x 14 x i16> @vand_vi_nxv14i16_unmasked(<vscale x 14 x i16> %va, i32 zeroext %evl) {
804 ; CHECK-LABEL: vand_vi_nxv14i16_unmasked:
806 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
807 ; CHECK-NEXT: vand.vi v8, v8, 4
809 %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> splat (i16 4), <vscale x 14 x i1> splat (i1 true), i32 %evl)
810 ret <vscale x 14 x i16> %v
813 declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
815 define <vscale x 16 x i16> @vand_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
816 ; CHECK-LABEL: vand_vv_nxv16i16:
818 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
819 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
821 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
822 ret <vscale x 16 x i16> %v
825 define <vscale x 16 x i16> @vand_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
826 ; CHECK-LABEL: vand_vv_nxv16i16_unmasked:
828 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
829 ; CHECK-NEXT: vand.vv v8, v8, v12
831 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
832 ret <vscale x 16 x i16> %v
835 define <vscale x 16 x i16> @vand_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
836 ; CHECK-LABEL: vand_vx_nxv16i16:
838 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
839 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
841 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
842 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
843 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
844 ret <vscale x 16 x i16> %v
847 define <vscale x 16 x i16> @vand_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
848 ; CHECK-LABEL: vand_vx_nxv16i16_unmasked:
850 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
851 ; CHECK-NEXT: vand.vx v8, v8, a0
853 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
854 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
855 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
856 ret <vscale x 16 x i16> %v
859 define <vscale x 16 x i16> @vand_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
860 ; CHECK-LABEL: vand_vi_nxv16i16:
862 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
863 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
865 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> %m, i32 %evl)
866 ret <vscale x 16 x i16> %v
869 define <vscale x 16 x i16> @vand_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
870 ; CHECK-LABEL: vand_vi_nxv16i16_unmasked:
872 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
873 ; CHECK-NEXT: vand.vi v8, v8, 4
875 %v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> splat (i16 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
876 ret <vscale x 16 x i16> %v
879 declare <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
881 define <vscale x 32 x i16> @vand_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
882 ; CHECK-LABEL: vand_vv_nxv32i16:
884 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
885 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
887 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
888 ret <vscale x 32 x i16> %v
891 define <vscale x 32 x i16> @vand_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
892 ; CHECK-LABEL: vand_vv_nxv32i16_unmasked:
894 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
895 ; CHECK-NEXT: vand.vv v8, v8, v16
897 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
898 ret <vscale x 32 x i16> %v
901 define <vscale x 32 x i16> @vand_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
902 ; CHECK-LABEL: vand_vx_nxv32i16:
904 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
905 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
907 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
908 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
909 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
910 ret <vscale x 32 x i16> %v
913 define <vscale x 32 x i16> @vand_vx_nxv32i16_commute(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
914 ; CHECK-LABEL: vand_vx_nxv32i16_commute:
916 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
917 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
919 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
920 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
921 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
922 ret <vscale x 32 x i16> %v
925 define <vscale x 32 x i16> @vand_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
926 ; CHECK-LABEL: vand_vx_nxv32i16_unmasked:
928 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
929 ; CHECK-NEXT: vand.vx v8, v8, a0
931 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
932 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
933 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
934 ret <vscale x 32 x i16> %v
937 define <vscale x 32 x i16> @vand_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
938 ; CHECK-LABEL: vand_vi_nxv32i16:
940 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
941 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
943 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> %m, i32 %evl)
944 ret <vscale x 32 x i16> %v
947 define <vscale x 32 x i16> @vand_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
948 ; CHECK-LABEL: vand_vi_nxv32i16_unmasked:
950 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
951 ; CHECK-NEXT: vand.vi v8, v8, 4
953 %v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> splat (i16 4), <vscale x 32 x i1> splat (i1 true), i32 %evl)
954 ret <vscale x 32 x i16> %v
957 declare <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
959 define <vscale x 1 x i32> @vand_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
960 ; CHECK-LABEL: vand_vv_nxv1i32:
962 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
963 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
965 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
966 ret <vscale x 1 x i32> %v
969 define <vscale x 1 x i32> @vand_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
970 ; CHECK-LABEL: vand_vv_nxv1i32_unmasked:
972 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
973 ; CHECK-NEXT: vand.vv v8, v8, v9
975 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
976 ret <vscale x 1 x i32> %v
979 define <vscale x 1 x i32> @vand_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
980 ; CHECK-LABEL: vand_vx_nxv1i32:
982 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
983 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
985 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
986 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
987 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
988 ret <vscale x 1 x i32> %v
991 define <vscale x 1 x i32> @vand_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
992 ; CHECK-LABEL: vand_vx_nxv1i32_unmasked:
994 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
995 ; CHECK-NEXT: vand.vx v8, v8, a0
997 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
998 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
999 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1000 ret <vscale x 1 x i32> %v
1003 define <vscale x 1 x i32> @vand_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1004 ; CHECK-LABEL: vand_vi_nxv1i32:
1006 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1007 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1009 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> %m, i32 %evl)
1010 ret <vscale x 1 x i32> %v
1013 define <vscale x 1 x i32> @vand_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
1014 ; CHECK-LABEL: vand_vi_nxv1i32_unmasked:
1016 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1017 ; CHECK-NEXT: vand.vi v8, v8, 4
1019 %v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> splat (i32 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1020 ret <vscale x 1 x i32> %v
1023 declare <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1025 define <vscale x 2 x i32> @vand_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1026 ; CHECK-LABEL: vand_vv_nxv2i32:
1028 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1029 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
1031 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
1032 ret <vscale x 2 x i32> %v
1035 define <vscale x 2 x i32> @vand_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
1036 ; CHECK-LABEL: vand_vv_nxv2i32_unmasked:
1038 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1039 ; CHECK-NEXT: vand.vv v8, v8, v9
1041 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1042 ret <vscale x 2 x i32> %v
1045 define <vscale x 2 x i32> @vand_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1046 ; CHECK-LABEL: vand_vx_nxv2i32:
1048 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1049 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1051 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1052 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1053 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
1054 ret <vscale x 2 x i32> %v
1057 define <vscale x 2 x i32> @vand_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
1058 ; CHECK-LABEL: vand_vx_nxv2i32_unmasked:
1060 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1061 ; CHECK-NEXT: vand.vx v8, v8, a0
1063 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1064 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1065 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1066 ret <vscale x 2 x i32> %v
1069 define <vscale x 2 x i32> @vand_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1070 ; CHECK-LABEL: vand_vi_nxv2i32:
1072 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1073 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1075 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> %m, i32 %evl)
1076 ret <vscale x 2 x i32> %v
1079 define <vscale x 2 x i32> @vand_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
1080 ; CHECK-LABEL: vand_vi_nxv2i32_unmasked:
1082 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1083 ; CHECK-NEXT: vand.vi v8, v8, 4
1085 %v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> splat (i32 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1086 ret <vscale x 2 x i32> %v
1089 declare <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1091 define <vscale x 4 x i32> @vand_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1092 ; CHECK-LABEL: vand_vv_nxv4i32:
1094 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1095 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
1097 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
1098 ret <vscale x 4 x i32> %v
1101 define <vscale x 4 x i32> @vand_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
1102 ; CHECK-LABEL: vand_vv_nxv4i32_unmasked:
1104 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1105 ; CHECK-NEXT: vand.vv v8, v8, v10
1107 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1108 ret <vscale x 4 x i32> %v
1111 define <vscale x 4 x i32> @vand_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1112 ; CHECK-LABEL: vand_vx_nxv4i32:
1114 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1115 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1117 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1118 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1119 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
1120 ret <vscale x 4 x i32> %v
1123 define <vscale x 4 x i32> @vand_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
1124 ; CHECK-LABEL: vand_vx_nxv4i32_unmasked:
1126 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1127 ; CHECK-NEXT: vand.vx v8, v8, a0
1129 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1130 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1131 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1132 ret <vscale x 4 x i32> %v
1135 define <vscale x 4 x i32> @vand_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1136 ; CHECK-LABEL: vand_vi_nxv4i32:
1138 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1139 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1141 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> %m, i32 %evl)
1142 ret <vscale x 4 x i32> %v
1145 define <vscale x 4 x i32> @vand_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
1146 ; CHECK-LABEL: vand_vi_nxv4i32_unmasked:
1148 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1149 ; CHECK-NEXT: vand.vi v8, v8, 4
1151 %v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> splat (i32 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1152 ret <vscale x 4 x i32> %v
1155 declare <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1157 define <vscale x 8 x i32> @vand_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1158 ; CHECK-LABEL: vand_vv_nxv8i32:
1160 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1161 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
1163 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
1164 ret <vscale x 8 x i32> %v
1167 define <vscale x 8 x i32> @vand_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
1168 ; CHECK-LABEL: vand_vv_nxv8i32_unmasked:
1170 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1171 ; CHECK-NEXT: vand.vv v8, v8, v12
1173 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1174 ret <vscale x 8 x i32> %v
1177 define <vscale x 8 x i32> @vand_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1178 ; CHECK-LABEL: vand_vx_nxv8i32:
1180 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1181 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1183 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1184 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1185 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
1186 ret <vscale x 8 x i32> %v
1189 define <vscale x 8 x i32> @vand_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
1190 ; CHECK-LABEL: vand_vx_nxv8i32_unmasked:
1192 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1193 ; CHECK-NEXT: vand.vx v8, v8, a0
1195 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1196 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1197 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1198 ret <vscale x 8 x i32> %v
1201 define <vscale x 8 x i32> @vand_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1202 ; CHECK-LABEL: vand_vi_nxv8i32:
1204 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1205 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1207 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> %m, i32 %evl)
1208 ret <vscale x 8 x i32> %v
1211 define <vscale x 8 x i32> @vand_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
1212 ; CHECK-LABEL: vand_vi_nxv8i32_unmasked:
1214 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1215 ; CHECK-NEXT: vand.vi v8, v8, 4
1217 %v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> splat (i32 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1218 ret <vscale x 8 x i32> %v
1221 declare <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1223 define <vscale x 16 x i32> @vand_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1224 ; CHECK-LABEL: vand_vv_nxv16i32:
1226 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1227 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
1229 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
1230 ret <vscale x 16 x i32> %v
1233 define <vscale x 16 x i32> @vand_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
1234 ; CHECK-LABEL: vand_vv_nxv16i32_unmasked:
1236 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1237 ; CHECK-NEXT: vand.vv v8, v8, v16
1239 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1240 ret <vscale x 16 x i32> %v
1243 define <vscale x 16 x i32> @vand_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1244 ; CHECK-LABEL: vand_vx_nxv16i32:
1246 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1247 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1249 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1250 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1251 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
1252 ret <vscale x 16 x i32> %v
1255 define <vscale x 16 x i32> @vand_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
1256 ; CHECK-LABEL: vand_vx_nxv16i32_unmasked:
1258 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1259 ; CHECK-NEXT: vand.vx v8, v8, a0
1261 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1262 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1263 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
1264 ret <vscale x 16 x i32> %v
1267 define <vscale x 16 x i32> @vand_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1268 ; CHECK-LABEL: vand_vi_nxv16i32:
1270 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1271 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1273 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> %m, i32 %evl)
1274 ret <vscale x 16 x i32> %v
1277 define <vscale x 16 x i32> @vand_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
1278 ; CHECK-LABEL: vand_vi_nxv16i32_unmasked:
1280 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1281 ; CHECK-NEXT: vand.vi v8, v8, 4
1283 %v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> splat (i32 4), <vscale x 16 x i1> splat (i1 true), i32 %evl)
1284 ret <vscale x 16 x i32> %v
1287 declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1289 define <vscale x 1 x i64> @vand_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1290 ; CHECK-LABEL: vand_vv_nxv1i64:
1292 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1293 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
1295 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
1296 ret <vscale x 1 x i64> %v
1299 define <vscale x 1 x i64> @vand_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
1300 ; CHECK-LABEL: vand_vv_nxv1i64_unmasked:
1302 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1303 ; CHECK-NEXT: vand.vv v8, v8, v9
1305 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1306 ret <vscale x 1 x i64> %v
1309 define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1310 ; RV32-LABEL: vand_vx_nxv1i64:
1312 ; RV32-NEXT: addi sp, sp, -16
1313 ; RV32-NEXT: .cfi_def_cfa_offset 16
1314 ; RV32-NEXT: sw a1, 12(sp)
1315 ; RV32-NEXT: sw a0, 8(sp)
1316 ; RV32-NEXT: addi a0, sp, 8
1317 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1318 ; RV32-NEXT: vlse64.v v9, (a0), zero
1319 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1320 ; RV32-NEXT: vand.vv v8, v8, v9, v0.t
1321 ; RV32-NEXT: addi sp, sp, 16
1324 ; RV64-LABEL: vand_vx_nxv1i64:
1326 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1327 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1329 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1330 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1331 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
1332 ret <vscale x 1 x i64> %v
1335 define <vscale x 1 x i64> @vand_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
1336 ; RV32-LABEL: vand_vx_nxv1i64_unmasked:
1338 ; RV32-NEXT: addi sp, sp, -16
1339 ; RV32-NEXT: .cfi_def_cfa_offset 16
1340 ; RV32-NEXT: sw a1, 12(sp)
1341 ; RV32-NEXT: sw a0, 8(sp)
1342 ; RV32-NEXT: addi a0, sp, 8
1343 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1344 ; RV32-NEXT: vlse64.v v9, (a0), zero
1345 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1346 ; RV32-NEXT: vand.vv v8, v8, v9
1347 ; RV32-NEXT: addi sp, sp, 16
1350 ; RV64-LABEL: vand_vx_nxv1i64_unmasked:
1352 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1353 ; RV64-NEXT: vand.vx v8, v8, a0
1355 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1356 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1357 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
1358 ret <vscale x 1 x i64> %v
1361 define <vscale x 1 x i64> @vand_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1362 ; CHECK-LABEL: vand_vi_nxv1i64:
1364 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1365 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1367 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> %m, i32 %evl)
1368 ret <vscale x 1 x i64> %v
1371 define <vscale x 1 x i64> @vand_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
1372 ; CHECK-LABEL: vand_vi_nxv1i64_unmasked:
1374 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1375 ; CHECK-NEXT: vand.vi v8, v8, 4
1377 %v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> splat (i64 4), <vscale x 1 x i1> splat (i1 true), i32 %evl)
1378 ret <vscale x 1 x i64> %v
1381 declare <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1383 define <vscale x 2 x i64> @vand_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1384 ; CHECK-LABEL: vand_vv_nxv2i64:
1386 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1387 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
1389 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
1390 ret <vscale x 2 x i64> %v
1393 define <vscale x 2 x i64> @vand_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
1394 ; CHECK-LABEL: vand_vv_nxv2i64_unmasked:
1396 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1397 ; CHECK-NEXT: vand.vv v8, v8, v10
1399 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1400 ret <vscale x 2 x i64> %v
1403 define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1404 ; RV32-LABEL: vand_vx_nxv2i64:
1406 ; RV32-NEXT: addi sp, sp, -16
1407 ; RV32-NEXT: .cfi_def_cfa_offset 16
1408 ; RV32-NEXT: sw a1, 12(sp)
1409 ; RV32-NEXT: sw a0, 8(sp)
1410 ; RV32-NEXT: addi a0, sp, 8
1411 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1412 ; RV32-NEXT: vlse64.v v10, (a0), zero
1413 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1414 ; RV32-NEXT: vand.vv v8, v8, v10, v0.t
1415 ; RV32-NEXT: addi sp, sp, 16
1418 ; RV64-LABEL: vand_vx_nxv2i64:
1420 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1421 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1423 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1424 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1425 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1426 ret <vscale x 2 x i64> %v
1429 define <vscale x 2 x i64> @vand_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1430 ; RV32-LABEL: vand_vx_nxv2i64_unmasked:
1432 ; RV32-NEXT: addi sp, sp, -16
1433 ; RV32-NEXT: .cfi_def_cfa_offset 16
1434 ; RV32-NEXT: sw a1, 12(sp)
1435 ; RV32-NEXT: sw a0, 8(sp)
1436 ; RV32-NEXT: addi a0, sp, 8
1437 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1438 ; RV32-NEXT: vlse64.v v10, (a0), zero
1439 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1440 ; RV32-NEXT: vand.vv v8, v8, v10
1441 ; RV32-NEXT: addi sp, sp, 16
1444 ; RV64-LABEL: vand_vx_nxv2i64_unmasked:
1446 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1447 ; RV64-NEXT: vand.vx v8, v8, a0
1449 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1450 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1451 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1452 ret <vscale x 2 x i64> %v
1455 define <vscale x 2 x i64> @vand_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1456 ; CHECK-LABEL: vand_vi_nxv2i64:
1458 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1459 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1461 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> %m, i32 %evl)
1462 ret <vscale x 2 x i64> %v
1465 define <vscale x 2 x i64> @vand_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
1466 ; CHECK-LABEL: vand_vi_nxv2i64_unmasked:
1468 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1469 ; CHECK-NEXT: vand.vi v8, v8, 4
1471 %v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> splat (i64 4), <vscale x 2 x i1> splat (i1 true), i32 %evl)
1472 ret <vscale x 2 x i64> %v
1475 declare <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1477 define <vscale x 4 x i64> @vand_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1478 ; CHECK-LABEL: vand_vv_nxv4i64:
1480 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1481 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
1483 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1484 ret <vscale x 4 x i64> %v
1487 define <vscale x 4 x i64> @vand_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1488 ; CHECK-LABEL: vand_vv_nxv4i64_unmasked:
1490 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1491 ; CHECK-NEXT: vand.vv v8, v8, v12
1493 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1494 ret <vscale x 4 x i64> %v
1497 define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1498 ; RV32-LABEL: vand_vx_nxv4i64:
1500 ; RV32-NEXT: addi sp, sp, -16
1501 ; RV32-NEXT: .cfi_def_cfa_offset 16
1502 ; RV32-NEXT: sw a1, 12(sp)
1503 ; RV32-NEXT: sw a0, 8(sp)
1504 ; RV32-NEXT: addi a0, sp, 8
1505 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1506 ; RV32-NEXT: vlse64.v v12, (a0), zero
1507 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1508 ; RV32-NEXT: vand.vv v8, v8, v12, v0.t
1509 ; RV32-NEXT: addi sp, sp, 16
1512 ; RV64-LABEL: vand_vx_nxv4i64:
1514 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1515 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1517 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1518 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1519 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1520 ret <vscale x 4 x i64> %v
1523 define <vscale x 4 x i64> @vand_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1524 ; RV32-LABEL: vand_vx_nxv4i64_unmasked:
1526 ; RV32-NEXT: addi sp, sp, -16
1527 ; RV32-NEXT: .cfi_def_cfa_offset 16
1528 ; RV32-NEXT: sw a1, 12(sp)
1529 ; RV32-NEXT: sw a0, 8(sp)
1530 ; RV32-NEXT: addi a0, sp, 8
1531 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1532 ; RV32-NEXT: vlse64.v v12, (a0), zero
1533 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1534 ; RV32-NEXT: vand.vv v8, v8, v12
1535 ; RV32-NEXT: addi sp, sp, 16
1538 ; RV64-LABEL: vand_vx_nxv4i64_unmasked:
1540 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1541 ; RV64-NEXT: vand.vx v8, v8, a0
1543 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1544 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1545 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1546 ret <vscale x 4 x i64> %v
1549 define <vscale x 4 x i64> @vand_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1550 ; CHECK-LABEL: vand_vi_nxv4i64:
1552 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1553 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1555 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> %m, i32 %evl)
1556 ret <vscale x 4 x i64> %v
1559 define <vscale x 4 x i64> @vand_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1560 ; CHECK-LABEL: vand_vi_nxv4i64_unmasked:
1562 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1563 ; CHECK-NEXT: vand.vi v8, v8, 4
1565 %v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> splat (i64 4), <vscale x 4 x i1> splat (i1 true), i32 %evl)
1566 ret <vscale x 4 x i64> %v
1569 declare <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1571 define <vscale x 8 x i64> @vand_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1572 ; CHECK-LABEL: vand_vv_nxv8i64:
1574 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1575 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
1577 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1578 ret <vscale x 8 x i64> %v
1581 define <vscale x 8 x i64> @vand_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1582 ; CHECK-LABEL: vand_vv_nxv8i64_unmasked:
1584 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1585 ; CHECK-NEXT: vand.vv v8, v8, v16
1587 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1588 ret <vscale x 8 x i64> %v
1591 define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1592 ; RV32-LABEL: vand_vx_nxv8i64:
1594 ; RV32-NEXT: addi sp, sp, -16
1595 ; RV32-NEXT: .cfi_def_cfa_offset 16
1596 ; RV32-NEXT: sw a1, 12(sp)
1597 ; RV32-NEXT: sw a0, 8(sp)
1598 ; RV32-NEXT: addi a0, sp, 8
1599 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1600 ; RV32-NEXT: vlse64.v v16, (a0), zero
1601 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1602 ; RV32-NEXT: vand.vv v8, v8, v16, v0.t
1603 ; RV32-NEXT: addi sp, sp, 16
1606 ; RV64-LABEL: vand_vx_nxv8i64:
1608 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1609 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t
1611 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1612 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1613 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1614 ret <vscale x 8 x i64> %v
1617 define <vscale x 8 x i64> @vand_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1618 ; RV32-LABEL: vand_vx_nxv8i64_unmasked:
1620 ; RV32-NEXT: addi sp, sp, -16
1621 ; RV32-NEXT: .cfi_def_cfa_offset 16
1622 ; RV32-NEXT: sw a1, 12(sp)
1623 ; RV32-NEXT: sw a0, 8(sp)
1624 ; RV32-NEXT: addi a0, sp, 8
1625 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1626 ; RV32-NEXT: vlse64.v v16, (a0), zero
1627 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1628 ; RV32-NEXT: vand.vv v8, v8, v16
1629 ; RV32-NEXT: addi sp, sp, 16
1632 ; RV64-LABEL: vand_vx_nxv8i64_unmasked:
1634 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1635 ; RV64-NEXT: vand.vx v8, v8, a0
1637 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1638 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1639 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1640 ret <vscale x 8 x i64> %v
1643 define <vscale x 8 x i64> @vand_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1644 ; CHECK-LABEL: vand_vi_nxv8i64:
1646 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1647 ; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
1649 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> %m, i32 %evl)
1650 ret <vscale x 8 x i64> %v
1653 define <vscale x 8 x i64> @vand_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1654 ; CHECK-LABEL: vand_vi_nxv8i64_unmasked:
1656 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1657 ; CHECK-NEXT: vand.vi v8, v8, 4
1659 %v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> splat (i64 4), <vscale x 8 x i1> splat (i1 true), i32 %evl)
1660 ret <vscale x 8 x i64> %v