1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
4 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB32
5 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvkb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVKB,CHECK-ZVKB64
7 declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
8 declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
10 define <vscale x 1 x i8> @vandn_vv_vp_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
11 ; CHECK-LABEL: vandn_vv_vp_nxv1i8:
13 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
14 ; CHECK-NEXT: vnot.v v8, v8, v0.t
15 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
18 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i8:
19 ; CHECK-ZVKB: # %bb.0:
20 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
21 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
22 ; CHECK-ZVKB-NEXT: ret
23 %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %mask, i32 %evl)
24 %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %not.a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 %evl)
25 ret <vscale x 1 x i8> %x
28 define <vscale x 1 x i8> @vandn_vv_vp_swapped_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
29 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i8:
31 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
32 ; CHECK-NEXT: vnot.v v8, v8, v0.t
33 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
36 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i8:
37 ; CHECK-ZVKB: # %bb.0:
38 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
39 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
40 ; CHECK-ZVKB-NEXT: ret
41 %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> splat (i8 -1), <vscale x 1 x i1> %mask, i32 %evl)
42 %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
43 ret <vscale x 1 x i8> %x
46 define <vscale x 1 x i8> @vandn_vx_vp_nxv1i8(i8 %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
47 ; CHECK-LABEL: vandn_vx_vp_nxv1i8:
49 ; CHECK-NEXT: not a0, a0
50 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
51 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
54 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i8:
55 ; CHECK-ZVKB: # %bb.0:
56 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
57 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
58 ; CHECK-ZVKB-NEXT: ret
59 %not.a = xor i8 %a, -1
60 %head.not.a = insertelement <vscale x 1 x i8> poison, i8 %not.a, i32 0
61 %splat.not.a = shufflevector <vscale x 1 x i8> %head.not.a, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
62 %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
63 ret <vscale x 1 x i8> %x
66 declare <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
67 declare <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
69 define <vscale x 2 x i8> @vandn_vv_vp_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
70 ; CHECK-LABEL: vandn_vv_vp_nxv2i8:
72 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
73 ; CHECK-NEXT: vnot.v v8, v8, v0.t
74 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
77 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i8:
78 ; CHECK-ZVKB: # %bb.0:
79 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
80 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
81 ; CHECK-ZVKB-NEXT: ret
82 %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %mask, i32 %evl)
83 %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %not.a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 %evl)
84 ret <vscale x 2 x i8> %x
87 define <vscale x 2 x i8> @vandn_vv_vp_swapped_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
88 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i8:
90 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
91 ; CHECK-NEXT: vnot.v v8, v8, v0.t
92 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
95 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i8:
96 ; CHECK-ZVKB: # %bb.0:
97 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
98 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
99 ; CHECK-ZVKB-NEXT: ret
100 %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> splat (i8 -1), <vscale x 2 x i1> %mask, i32 %evl)
101 %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
102 ret <vscale x 2 x i8> %x
105 define <vscale x 2 x i8> @vandn_vx_vp_nxv2i8(i8 %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
106 ; CHECK-LABEL: vandn_vx_vp_nxv2i8:
108 ; CHECK-NEXT: not a0, a0
109 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
110 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
113 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i8:
114 ; CHECK-ZVKB: # %bb.0:
115 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
116 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
117 ; CHECK-ZVKB-NEXT: ret
118 %not.a = xor i8 %a, -1
119 %head.not.a = insertelement <vscale x 2 x i8> poison, i8 %not.a, i32 0
120 %splat.not.a = shufflevector <vscale x 2 x i8> %head.not.a, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
121 %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
122 ret <vscale x 2 x i8> %x
125 declare <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
126 declare <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
128 define <vscale x 4 x i8> @vandn_vv_vp_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
129 ; CHECK-LABEL: vandn_vv_vp_nxv4i8:
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
132 ; CHECK-NEXT: vnot.v v8, v8, v0.t
133 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
136 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i8:
137 ; CHECK-ZVKB: # %bb.0:
138 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
139 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
140 ; CHECK-ZVKB-NEXT: ret
141 %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %mask, i32 %evl)
142 %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %not.a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 %evl)
143 ret <vscale x 4 x i8> %x
146 define <vscale x 4 x i8> @vandn_vv_vp_swapped_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
147 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i8:
149 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
150 ; CHECK-NEXT: vnot.v v8, v8, v0.t
151 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
154 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i8:
155 ; CHECK-ZVKB: # %bb.0:
156 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
157 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
158 ; CHECK-ZVKB-NEXT: ret
159 %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> splat (i8 -1), <vscale x 4 x i1> %mask, i32 %evl)
160 %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
161 ret <vscale x 4 x i8> %x
164 define <vscale x 4 x i8> @vandn_vx_vp_nxv4i8(i8 %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
165 ; CHECK-LABEL: vandn_vx_vp_nxv4i8:
167 ; CHECK-NEXT: not a0, a0
168 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
169 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
172 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i8:
173 ; CHECK-ZVKB: # %bb.0:
174 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
175 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
176 ; CHECK-ZVKB-NEXT: ret
177 %not.a = xor i8 %a, -1
178 %head.not.a = insertelement <vscale x 4 x i8> poison, i8 %not.a, i32 0
179 %splat.not.a = shufflevector <vscale x 4 x i8> %head.not.a, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
180 %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
181 ret <vscale x 4 x i8> %x
184 declare <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
185 declare <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
187 define <vscale x 8 x i8> @vandn_vv_vp_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
188 ; CHECK-LABEL: vandn_vv_vp_nxv8i8:
190 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
191 ; CHECK-NEXT: vnot.v v8, v8, v0.t
192 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
195 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i8:
196 ; CHECK-ZVKB: # %bb.0:
197 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
198 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
199 ; CHECK-ZVKB-NEXT: ret
200 %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %mask, i32 %evl)
201 %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %not.a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 %evl)
202 ret <vscale x 8 x i8> %x
205 define <vscale x 8 x i8> @vandn_vv_vp_swapped_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
206 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i8:
208 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
209 ; CHECK-NEXT: vnot.v v8, v8, v0.t
210 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
213 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i8:
214 ; CHECK-ZVKB: # %bb.0:
215 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m1, ta, ma
216 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
217 ; CHECK-ZVKB-NEXT: ret
218 %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> splat (i8 -1), <vscale x 8 x i1> %mask, i32 %evl)
219 %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
220 ret <vscale x 8 x i8> %x
223 define <vscale x 8 x i8> @vandn_vx_vp_nxv8i8(i8 %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
224 ; CHECK-LABEL: vandn_vx_vp_nxv8i8:
226 ; CHECK-NEXT: not a0, a0
227 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
228 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
231 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i8:
232 ; CHECK-ZVKB: # %bb.0:
233 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, m1, ta, ma
234 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
235 ; CHECK-ZVKB-NEXT: ret
236 %not.a = xor i8 %a, -1
237 %head.not.a = insertelement <vscale x 8 x i8> poison, i8 %not.a, i32 0
238 %splat.not.a = shufflevector <vscale x 8 x i8> %head.not.a, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
239 %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
240 ret <vscale x 8 x i8> %x
243 declare <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
244 declare <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
246 define <vscale x 16 x i8> @vandn_vv_vp_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
247 ; CHECK-LABEL: vandn_vv_vp_nxv16i8:
249 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
250 ; CHECK-NEXT: vnot.v v8, v8, v0.t
251 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
254 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i8:
255 ; CHECK-ZVKB: # %bb.0:
256 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
257 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
258 ; CHECK-ZVKB-NEXT: ret
259 %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %mask, i32 %evl)
260 %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %not.a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 %evl)
261 ret <vscale x 16 x i8> %x
264 define <vscale x 16 x i8> @vandn_vv_vp_swapped_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
265 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i8:
267 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
268 ; CHECK-NEXT: vnot.v v8, v8, v0.t
269 ; CHECK-NEXT: vand.vv v8, v10, v8, v0.t
272 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i8:
273 ; CHECK-ZVKB: # %bb.0:
274 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
275 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
276 ; CHECK-ZVKB-NEXT: ret
277 %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 -1), <vscale x 16 x i1> %mask, i32 %evl)
278 %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
279 ret <vscale x 16 x i8> %x
282 define <vscale x 16 x i8> @vandn_vx_vp_nxv16i8(i8 %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
283 ; CHECK-LABEL: vandn_vx_vp_nxv16i8:
285 ; CHECK-NEXT: not a0, a0
286 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
287 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
290 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i8:
291 ; CHECK-ZVKB: # %bb.0:
292 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, m2, ta, ma
293 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
294 ; CHECK-ZVKB-NEXT: ret
295 %not.a = xor i8 %a, -1
296 %head.not.a = insertelement <vscale x 16 x i8> poison, i8 %not.a, i32 0
297 %splat.not.a = shufflevector <vscale x 16 x i8> %head.not.a, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
298 %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
299 ret <vscale x 16 x i8> %x
302 declare <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
303 declare <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
305 define <vscale x 32 x i8> @vandn_vv_vp_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
306 ; CHECK-LABEL: vandn_vv_vp_nxv32i8:
308 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
309 ; CHECK-NEXT: vnot.v v8, v8, v0.t
310 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
313 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv32i8:
314 ; CHECK-ZVKB: # %bb.0:
315 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
316 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
317 ; CHECK-ZVKB-NEXT: ret
318 %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %mask, i32 %evl)
319 %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %not.a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 %evl)
320 ret <vscale x 32 x i8> %x
323 define <vscale x 32 x i8> @vandn_vv_vp_swapped_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
324 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i8:
326 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
327 ; CHECK-NEXT: vnot.v v8, v8, v0.t
328 ; CHECK-NEXT: vand.vv v8, v12, v8, v0.t
331 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv32i8:
332 ; CHECK-ZVKB: # %bb.0:
333 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
334 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
335 ; CHECK-ZVKB-NEXT: ret
336 %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> splat (i8 -1), <vscale x 32 x i1> %mask, i32 %evl)
337 %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
338 ret <vscale x 32 x i8> %x
341 define <vscale x 32 x i8> @vandn_vx_vp_nxv32i8(i8 %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
342 ; CHECK-LABEL: vandn_vx_vp_nxv32i8:
344 ; CHECK-NEXT: not a0, a0
345 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
346 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
349 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv32i8:
350 ; CHECK-ZVKB: # %bb.0:
351 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, m4, ta, ma
352 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
353 ; CHECK-ZVKB-NEXT: ret
354 %not.a = xor i8 %a, -1
355 %head.not.a = insertelement <vscale x 32 x i8> poison, i8 %not.a, i32 0
356 %splat.not.a = shufflevector <vscale x 32 x i8> %head.not.a, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
357 %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
358 ret <vscale x 32 x i8> %x
361 declare <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
362 declare <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
364 define <vscale x 64 x i8> @vandn_vv_vp_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
365 ; CHECK-LABEL: vandn_vv_vp_nxv64i8:
367 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
368 ; CHECK-NEXT: vnot.v v8, v8, v0.t
369 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
372 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv64i8:
373 ; CHECK-ZVKB: # %bb.0:
374 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m8, ta, ma
375 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
376 ; CHECK-ZVKB-NEXT: ret
377 %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %mask, i32 %evl)
378 %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %not.a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 %evl)
379 ret <vscale x 64 x i8> %x
382 define <vscale x 64 x i8> @vandn_vv_vp_swapped_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
383 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv64i8:
385 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
386 ; CHECK-NEXT: vnot.v v8, v8, v0.t
387 ; CHECK-NEXT: vand.vv v8, v16, v8, v0.t
390 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv64i8:
391 ; CHECK-ZVKB: # %bb.0:
392 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e8, m8, ta, ma
393 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
394 ; CHECK-ZVKB-NEXT: ret
395 %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> splat (i8 -1), <vscale x 64 x i1> %mask, i32 %evl)
396 %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %not.a, <vscale x 64 x i1> %mask, i32 %evl)
397 ret <vscale x 64 x i8> %x
400 define <vscale x 64 x i8> @vandn_vx_vp_nxv64i8(i8 %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
401 ; CHECK-LABEL: vandn_vx_vp_nxv64i8:
403 ; CHECK-NEXT: not a0, a0
404 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
405 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
408 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv64i8:
409 ; CHECK-ZVKB: # %bb.0:
410 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e8, m8, ta, ma
411 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
412 ; CHECK-ZVKB-NEXT: ret
413 %not.a = xor i8 %a, -1
414 %head.not.a = insertelement <vscale x 64 x i8> poison, i8 %not.a, i32 0
415 %splat.not.a = shufflevector <vscale x 64 x i8> %head.not.a, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
416 %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %splat.not.a, <vscale x 64 x i1> %mask, i32 %evl)
417 ret <vscale x 64 x i8> %x
420 declare <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
421 declare <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
423 define <vscale x 1 x i16> @vandn_vv_vp_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
424 ; CHECK-LABEL: vandn_vv_vp_nxv1i16:
426 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
427 ; CHECK-NEXT: vnot.v v8, v8, v0.t
428 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
431 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i16:
432 ; CHECK-ZVKB: # %bb.0:
433 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
434 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
435 ; CHECK-ZVKB-NEXT: ret
436 %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %mask, i32 %evl)
437 %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %not.a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 %evl)
438 ret <vscale x 1 x i16> %x
441 define <vscale x 1 x i16> @vandn_vv_vp_swapped_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
442 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i16:
444 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
445 ; CHECK-NEXT: vnot.v v8, v8, v0.t
446 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
449 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i16:
450 ; CHECK-ZVKB: # %bb.0:
451 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
452 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
453 ; CHECK-ZVKB-NEXT: ret
454 %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> splat (i16 -1), <vscale x 1 x i1> %mask, i32 %evl)
455 %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
456 ret <vscale x 1 x i16> %x
459 define <vscale x 1 x i16> @vandn_vx_vp_nxv1i16(i16 %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
460 ; CHECK-LABEL: vandn_vx_vp_nxv1i16:
462 ; CHECK-NEXT: not a0, a0
463 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
464 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
467 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i16:
468 ; CHECK-ZVKB: # %bb.0:
469 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
470 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
471 ; CHECK-ZVKB-NEXT: ret
472 %not.a = xor i16 %a, -1
473 %head.not.a = insertelement <vscale x 1 x i16> poison, i16 %not.a, i32 0
474 %splat.not.a = shufflevector <vscale x 1 x i16> %head.not.a, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
475 %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
476 ret <vscale x 1 x i16> %x
479 declare <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
480 declare <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
482 define <vscale x 2 x i16> @vandn_vv_vp_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
483 ; CHECK-LABEL: vandn_vv_vp_nxv2i16:
485 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
486 ; CHECK-NEXT: vnot.v v8, v8, v0.t
487 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
490 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i16:
491 ; CHECK-ZVKB: # %bb.0:
492 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
493 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
494 ; CHECK-ZVKB-NEXT: ret
495 %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %mask, i32 %evl)
496 %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %not.a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 %evl)
497 ret <vscale x 2 x i16> %x
500 define <vscale x 2 x i16> @vandn_vv_vp_swapped_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
501 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i16:
503 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
504 ; CHECK-NEXT: vnot.v v8, v8, v0.t
505 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
508 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i16:
509 ; CHECK-ZVKB: # %bb.0:
510 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
511 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
512 ; CHECK-ZVKB-NEXT: ret
513 %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> splat (i16 -1), <vscale x 2 x i1> %mask, i32 %evl)
514 %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
515 ret <vscale x 2 x i16> %x
518 define <vscale x 2 x i16> @vandn_vx_vp_nxv2i16(i16 %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
519 ; CHECK-LABEL: vandn_vx_vp_nxv2i16:
521 ; CHECK-NEXT: not a0, a0
522 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
523 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
526 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i16:
527 ; CHECK-ZVKB: # %bb.0:
528 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
529 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
530 ; CHECK-ZVKB-NEXT: ret
531 %not.a = xor i16 %a, -1
532 %head.not.a = insertelement <vscale x 2 x i16> poison, i16 %not.a, i32 0
533 %splat.not.a = shufflevector <vscale x 2 x i16> %head.not.a, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
534 %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
535 ret <vscale x 2 x i16> %x
538 declare <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
539 declare <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
541 define <vscale x 4 x i16> @vandn_vv_vp_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
542 ; CHECK-LABEL: vandn_vv_vp_nxv4i16:
544 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
545 ; CHECK-NEXT: vnot.v v8, v8, v0.t
546 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
549 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i16:
550 ; CHECK-ZVKB: # %bb.0:
551 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
552 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
553 ; CHECK-ZVKB-NEXT: ret
554 %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %mask, i32 %evl)
555 %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %not.a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 %evl)
556 ret <vscale x 4 x i16> %x
559 define <vscale x 4 x i16> @vandn_vv_vp_swapped_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
560 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i16:
562 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
563 ; CHECK-NEXT: vnot.v v8, v8, v0.t
564 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
567 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i16:
568 ; CHECK-ZVKB: # %bb.0:
569 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m1, ta, ma
570 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
571 ; CHECK-ZVKB-NEXT: ret
572 %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> splat (i16 -1), <vscale x 4 x i1> %mask, i32 %evl)
573 %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
574 ret <vscale x 4 x i16> %x
577 define <vscale x 4 x i16> @vandn_vx_vp_nxv4i16(i16 %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
578 ; CHECK-LABEL: vandn_vx_vp_nxv4i16:
580 ; CHECK-NEXT: not a0, a0
581 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
582 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
585 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i16:
586 ; CHECK-ZVKB: # %bb.0:
587 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, m1, ta, ma
588 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
589 ; CHECK-ZVKB-NEXT: ret
590 %not.a = xor i16 %a, -1
591 %head.not.a = insertelement <vscale x 4 x i16> poison, i16 %not.a, i32 0
592 %splat.not.a = shufflevector <vscale x 4 x i16> %head.not.a, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
593 %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
594 ret <vscale x 4 x i16> %x
597 declare <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
598 declare <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
600 define <vscale x 8 x i16> @vandn_vv_vp_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
601 ; CHECK-LABEL: vandn_vv_vp_nxv8i16:
603 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
604 ; CHECK-NEXT: vnot.v v8, v8, v0.t
605 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
608 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i16:
609 ; CHECK-ZVKB: # %bb.0:
610 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m2, ta, ma
611 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
612 ; CHECK-ZVKB-NEXT: ret
613 %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %mask, i32 %evl)
614 %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %not.a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 %evl)
615 ret <vscale x 8 x i16> %x
618 define <vscale x 8 x i16> @vandn_vv_vp_swapped_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
619 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i16:
621 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
622 ; CHECK-NEXT: vnot.v v8, v8, v0.t
623 ; CHECK-NEXT: vand.vv v8, v10, v8, v0.t
626 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i16:
627 ; CHECK-ZVKB: # %bb.0:
628 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m2, ta, ma
629 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
630 ; CHECK-ZVKB-NEXT: ret
631 %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> splat (i16 -1), <vscale x 8 x i1> %mask, i32 %evl)
632 %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
633 ret <vscale x 8 x i16> %x
636 define <vscale x 8 x i16> @vandn_vx_vp_nxv8i16(i16 %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
637 ; CHECK-LABEL: vandn_vx_vp_nxv8i16:
639 ; CHECK-NEXT: not a0, a0
640 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
641 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
644 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i16:
645 ; CHECK-ZVKB: # %bb.0:
646 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, m2, ta, ma
647 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
648 ; CHECK-ZVKB-NEXT: ret
649 %not.a = xor i16 %a, -1
650 %head.not.a = insertelement <vscale x 8 x i16> poison, i16 %not.a, i32 0
651 %splat.not.a = shufflevector <vscale x 8 x i16> %head.not.a, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
652 %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
653 ret <vscale x 8 x i16> %x
656 declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
657 declare <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
659 define <vscale x 16 x i16> @vandn_vv_vp_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
660 ; CHECK-LABEL: vandn_vv_vp_nxv16i16:
662 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
663 ; CHECK-NEXT: vnot.v v8, v8, v0.t
664 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
667 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i16:
668 ; CHECK-ZVKB: # %bb.0:
669 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m4, ta, ma
670 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
671 ; CHECK-ZVKB-NEXT: ret
672 %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %mask, i32 %evl)
673 %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %not.a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 %evl)
674 ret <vscale x 16 x i16> %x
677 define <vscale x 16 x i16> @vandn_vv_vp_swapped_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
678 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i16:
680 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
681 ; CHECK-NEXT: vnot.v v8, v8, v0.t
682 ; CHECK-NEXT: vand.vv v8, v12, v8, v0.t
685 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i16:
686 ; CHECK-ZVKB: # %bb.0:
687 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m4, ta, ma
688 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
689 ; CHECK-ZVKB-NEXT: ret
690 %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> splat (i16 -1), <vscale x 16 x i1> %mask, i32 %evl)
691 %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
692 ret <vscale x 16 x i16> %x
695 define <vscale x 16 x i16> @vandn_vx_vp_nxv16i16(i16 %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
696 ; CHECK-LABEL: vandn_vx_vp_nxv16i16:
698 ; CHECK-NEXT: not a0, a0
699 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
700 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
703 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i16:
704 ; CHECK-ZVKB: # %bb.0:
705 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, m4, ta, ma
706 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
707 ; CHECK-ZVKB-NEXT: ret
708 %not.a = xor i16 %a, -1
709 %head.not.a = insertelement <vscale x 16 x i16> poison, i16 %not.a, i32 0
710 %splat.not.a = shufflevector <vscale x 16 x i16> %head.not.a, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
711 %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
712 ret <vscale x 16 x i16> %x
715 declare <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
716 declare <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
718 define <vscale x 32 x i16> @vandn_vv_vp_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
719 ; CHECK-LABEL: vandn_vv_vp_nxv32i16:
721 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
722 ; CHECK-NEXT: vnot.v v8, v8, v0.t
723 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
726 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv32i16:
727 ; CHECK-ZVKB: # %bb.0:
728 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
729 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
730 ; CHECK-ZVKB-NEXT: ret
731 %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %mask, i32 %evl)
732 %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %not.a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 %evl)
733 ret <vscale x 32 x i16> %x
736 define <vscale x 32 x i16> @vandn_vv_vp_swapped_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
737 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i16:
739 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
740 ; CHECK-NEXT: vnot.v v8, v8, v0.t
741 ; CHECK-NEXT: vand.vv v8, v16, v8, v0.t
744 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv32i16:
745 ; CHECK-ZVKB: # %bb.0:
746 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
747 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
748 ; CHECK-ZVKB-NEXT: ret
749 %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> splat (i16 -1), <vscale x 32 x i1> %mask, i32 %evl)
750 %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
751 ret <vscale x 32 x i16> %x
754 define <vscale x 32 x i16> @vandn_vx_vp_nxv32i16(i16 %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
755 ; CHECK-LABEL: vandn_vx_vp_nxv32i16:
757 ; CHECK-NEXT: not a0, a0
758 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
759 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
762 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv32i16:
763 ; CHECK-ZVKB: # %bb.0:
764 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e16, m8, ta, ma
765 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
766 ; CHECK-ZVKB-NEXT: ret
767 %not.a = xor i16 %a, -1
768 %head.not.a = insertelement <vscale x 32 x i16> poison, i16 %not.a, i32 0
769 %splat.not.a = shufflevector <vscale x 32 x i16> %head.not.a, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
770 %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
771 ret <vscale x 32 x i16> %x
774 declare <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
775 declare <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
777 define <vscale x 1 x i32> @vandn_vv_vp_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
778 ; CHECK-LABEL: vandn_vv_vp_nxv1i32:
780 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
781 ; CHECK-NEXT: vnot.v v8, v8, v0.t
782 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
785 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i32:
786 ; CHECK-ZVKB: # %bb.0:
787 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
788 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
789 ; CHECK-ZVKB-NEXT: ret
790 %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %mask, i32 %evl)
791 %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %not.a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 %evl)
792 ret <vscale x 1 x i32> %x
795 define <vscale x 1 x i32> @vandn_vv_vp_swapped_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
796 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i32:
798 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
799 ; CHECK-NEXT: vnot.v v8, v8, v0.t
800 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
803 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i32:
804 ; CHECK-ZVKB: # %bb.0:
805 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
806 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
807 ; CHECK-ZVKB-NEXT: ret
808 %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> splat (i32 -1), <vscale x 1 x i1> %mask, i32 %evl)
809 %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
810 ret <vscale x 1 x i32> %x
813 define <vscale x 1 x i32> @vandn_vx_vp_nxv1i32(i32 %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
814 ; CHECK-LABEL: vandn_vx_vp_nxv1i32:
816 ; CHECK-NEXT: not a0, a0
817 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
818 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
821 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv1i32:
822 ; CHECK-ZVKB: # %bb.0:
823 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
824 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
825 ; CHECK-ZVKB-NEXT: ret
826 %not.a = xor i32 %a, -1
827 %head.not.a = insertelement <vscale x 1 x i32> poison, i32 %not.a, i32 0
828 %splat.not.a = shufflevector <vscale x 1 x i32> %head.not.a, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
829 %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
830 ret <vscale x 1 x i32> %x
833 declare <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
834 declare <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
836 define <vscale x 2 x i32> @vandn_vv_vp_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
837 ; CHECK-LABEL: vandn_vv_vp_nxv2i32:
839 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
840 ; CHECK-NEXT: vnot.v v8, v8, v0.t
841 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
844 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i32:
845 ; CHECK-ZVKB: # %bb.0:
846 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m1, ta, ma
847 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
848 ; CHECK-ZVKB-NEXT: ret
849 %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %mask, i32 %evl)
850 %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %not.a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 %evl)
851 ret <vscale x 2 x i32> %x
854 define <vscale x 2 x i32> @vandn_vv_vp_swapped_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
855 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i32:
857 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
858 ; CHECK-NEXT: vnot.v v8, v8, v0.t
859 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
862 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i32:
863 ; CHECK-ZVKB: # %bb.0:
864 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m1, ta, ma
865 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
866 ; CHECK-ZVKB-NEXT: ret
867 %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> splat (i32 -1), <vscale x 2 x i1> %mask, i32 %evl)
868 %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
869 ret <vscale x 2 x i32> %x
872 define <vscale x 2 x i32> @vandn_vx_vp_nxv2i32(i32 %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
873 ; CHECK-LABEL: vandn_vx_vp_nxv2i32:
875 ; CHECK-NEXT: not a0, a0
876 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
877 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
880 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv2i32:
881 ; CHECK-ZVKB: # %bb.0:
882 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e32, m1, ta, ma
883 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
884 ; CHECK-ZVKB-NEXT: ret
885 %not.a = xor i32 %a, -1
886 %head.not.a = insertelement <vscale x 2 x i32> poison, i32 %not.a, i32 0
887 %splat.not.a = shufflevector <vscale x 2 x i32> %head.not.a, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
888 %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
889 ret <vscale x 2 x i32> %x
892 declare <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
893 declare <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
895 define <vscale x 4 x i32> @vandn_vv_vp_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
896 ; CHECK-LABEL: vandn_vv_vp_nxv4i32:
898 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
899 ; CHECK-NEXT: vnot.v v8, v8, v0.t
900 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
903 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i32:
904 ; CHECK-ZVKB: # %bb.0:
905 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m2, ta, ma
906 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
907 ; CHECK-ZVKB-NEXT: ret
908 %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %mask, i32 %evl)
909 %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %not.a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 %evl)
910 ret <vscale x 4 x i32> %x
913 define <vscale x 4 x i32> @vandn_vv_vp_swapped_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
914 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i32:
916 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
917 ; CHECK-NEXT: vnot.v v8, v8, v0.t
918 ; CHECK-NEXT: vand.vv v8, v10, v8, v0.t
921 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i32:
922 ; CHECK-ZVKB: # %bb.0:
923 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m2, ta, ma
924 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
925 ; CHECK-ZVKB-NEXT: ret
926 %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> splat (i32 -1), <vscale x 4 x i1> %mask, i32 %evl)
927 %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
928 ret <vscale x 4 x i32> %x
931 define <vscale x 4 x i32> @vandn_vx_vp_nxv4i32(i32 %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
932 ; CHECK-LABEL: vandn_vx_vp_nxv4i32:
934 ; CHECK-NEXT: not a0, a0
935 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
936 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
939 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv4i32:
940 ; CHECK-ZVKB: # %bb.0:
941 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e32, m2, ta, ma
942 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
943 ; CHECK-ZVKB-NEXT: ret
944 %not.a = xor i32 %a, -1
945 %head.not.a = insertelement <vscale x 4 x i32> poison, i32 %not.a, i32 0
946 %splat.not.a = shufflevector <vscale x 4 x i32> %head.not.a, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
947 %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
948 ret <vscale x 4 x i32> %x
951 declare <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
952 declare <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
954 define <vscale x 8 x i32> @vandn_vv_vp_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
955 ; CHECK-LABEL: vandn_vv_vp_nxv8i32:
957 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
958 ; CHECK-NEXT: vnot.v v8, v8, v0.t
959 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
962 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i32:
963 ; CHECK-ZVKB: # %bb.0:
964 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m4, ta, ma
965 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
966 ; CHECK-ZVKB-NEXT: ret
967 %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %mask, i32 %evl)
968 %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %not.a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 %evl)
969 ret <vscale x 8 x i32> %x
972 define <vscale x 8 x i32> @vandn_vv_vp_swapped_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
973 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i32:
975 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
976 ; CHECK-NEXT: vnot.v v8, v8, v0.t
977 ; CHECK-NEXT: vand.vv v8, v12, v8, v0.t
980 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i32:
981 ; CHECK-ZVKB: # %bb.0:
982 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m4, ta, ma
983 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
984 ; CHECK-ZVKB-NEXT: ret
985 %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> splat (i32 -1), <vscale x 8 x i1> %mask, i32 %evl)
986 %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
987 ret <vscale x 8 x i32> %x
990 define <vscale x 8 x i32> @vandn_vx_vp_nxv8i32(i32 %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
991 ; CHECK-LABEL: vandn_vx_vp_nxv8i32:
993 ; CHECK-NEXT: not a0, a0
994 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
995 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
998 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv8i32:
999 ; CHECK-ZVKB: # %bb.0:
1000 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1001 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
1002 ; CHECK-ZVKB-NEXT: ret
1003 %not.a = xor i32 %a, -1
1004 %head.not.a = insertelement <vscale x 8 x i32> poison, i32 %not.a, i32 0
1005 %splat.not.a = shufflevector <vscale x 8 x i32> %head.not.a, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1006 %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
1007 ret <vscale x 8 x i32> %x
1010 declare <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1011 declare <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1013 define <vscale x 16 x i32> @vandn_vv_vp_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1014 ; CHECK-LABEL: vandn_vv_vp_nxv16i32:
1016 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1017 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1018 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
1021 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv16i32:
1022 ; CHECK-ZVKB: # %bb.0:
1023 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1024 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
1025 ; CHECK-ZVKB-NEXT: ret
1026 %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %mask, i32 %evl)
1027 %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %not.a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 %evl)
1028 ret <vscale x 16 x i32> %x
1031 define <vscale x 16 x i32> @vandn_vv_vp_swapped_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1032 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i32:
1034 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1035 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1036 ; CHECK-NEXT: vand.vv v8, v16, v8, v0.t
1039 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv16i32:
1040 ; CHECK-ZVKB: # %bb.0:
1041 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1042 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
1043 ; CHECK-ZVKB-NEXT: ret
1044 %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> splat (i32 -1), <vscale x 16 x i1> %mask, i32 %evl)
1045 %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
1046 ret <vscale x 16 x i32> %x
1049 define <vscale x 16 x i32> @vandn_vx_vp_nxv16i32(i32 %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
1050 ; CHECK-LABEL: vandn_vx_vp_nxv16i32:
1052 ; CHECK-NEXT: not a0, a0
1053 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1054 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
1057 ; CHECK-ZVKB-LABEL: vandn_vx_vp_nxv16i32:
1058 ; CHECK-ZVKB: # %bb.0:
1059 ; CHECK-ZVKB-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1060 ; CHECK-ZVKB-NEXT: vandn.vx v8, v8, a0, v0.t
1061 ; CHECK-ZVKB-NEXT: ret
1062 %not.a = xor i32 %a, -1
1063 %head.not.a = insertelement <vscale x 16 x i32> poison, i32 %not.a, i32 0
1064 %splat.not.a = shufflevector <vscale x 16 x i32> %head.not.a, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1065 %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
1066 ret <vscale x 16 x i32> %x
1069 declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1070 declare <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1072 define <vscale x 1 x i64> @vandn_vv_vp_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1073 ; CHECK-LABEL: vandn_vv_vp_nxv1i64:
1075 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1076 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1077 ; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
1080 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv1i64:
1081 ; CHECK-ZVKB: # %bb.0:
1082 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1083 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
1084 ; CHECK-ZVKB-NEXT: ret
1085 %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %mask, i32 %evl)
1086 %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %not.a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 %evl)
1087 ret <vscale x 1 x i64> %x
1090 define <vscale x 1 x i64> @vandn_vv_vp_swapped_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1091 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i64:
1093 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1094 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1095 ; CHECK-NEXT: vand.vv v8, v9, v8, v0.t
1098 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv1i64:
1099 ; CHECK-ZVKB: # %bb.0:
1100 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1101 ; CHECK-ZVKB-NEXT: vandn.vv v8, v9, v8, v0.t
1102 ; CHECK-ZVKB-NEXT: ret
1103 %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> splat (i64 -1), <vscale x 1 x i1> %mask, i32 %evl)
1104 %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
1105 ret <vscale x 1 x i64> %x
1108 define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
1109 ; CHECK-RV32-LABEL: vandn_vx_vp_nxv1i64:
1110 ; CHECK-RV32: # %bb.0:
1111 ; CHECK-RV32-NEXT: addi sp, sp, -16
1112 ; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
1113 ; CHECK-RV32-NEXT: not a0, a0
1114 ; CHECK-RV32-NEXT: not a1, a1
1115 ; CHECK-RV32-NEXT: sw a1, 12(sp)
1116 ; CHECK-RV32-NEXT: sw a0, 8(sp)
1117 ; CHECK-RV32-NEXT: addi a0, sp, 8
1118 ; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1119 ; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
1120 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1121 ; CHECK-RV32-NEXT: vand.vv v8, v8, v9, v0.t
1122 ; CHECK-RV32-NEXT: addi sp, sp, 16
1123 ; CHECK-RV32-NEXT: ret
1125 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv1i64:
1126 ; CHECK-RV64: # %bb.0:
1127 ; CHECK-RV64-NEXT: not a0, a0
1128 ; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1129 ; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t
1130 ; CHECK-RV64-NEXT: ret
1132 ; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv1i64:
1133 ; CHECK-ZVKB32: # %bb.0:
1134 ; CHECK-ZVKB32-NEXT: addi sp, sp, -16
1135 ; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
1136 ; CHECK-ZVKB32-NEXT: not a0, a0
1137 ; CHECK-ZVKB32-NEXT: not a1, a1
1138 ; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
1139 ; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
1140 ; CHECK-ZVKB32-NEXT: addi a0, sp, 8
1141 ; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1142 ; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
1143 ; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1144 ; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9, v0.t
1145 ; CHECK-ZVKB32-NEXT: addi sp, sp, 16
1146 ; CHECK-ZVKB32-NEXT: ret
1148 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv1i64:
1149 ; CHECK-ZVKB64: # %bb.0:
1150 ; CHECK-ZVKB64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1151 ; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0, v0.t
1152 ; CHECK-ZVKB64-NEXT: ret
1153 %not.a = xor i64 %a, -1
1154 %head.not.a = insertelement <vscale x 1 x i64> poison, i64 %not.a, i32 0
1155 %splat.not.a = shufflevector <vscale x 1 x i64> %head.not.a, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1156 %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
1157 ret <vscale x 1 x i64> %x
1160 declare <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1161 declare <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1163 define <vscale x 2 x i64> @vandn_vv_vp_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1164 ; CHECK-LABEL: vandn_vv_vp_nxv2i64:
1166 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1167 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1168 ; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
1171 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv2i64:
1172 ; CHECK-ZVKB: # %bb.0:
1173 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1174 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
1175 ; CHECK-ZVKB-NEXT: ret
1176 %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %mask, i32 %evl)
1177 %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %not.a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 %evl)
1178 ret <vscale x 2 x i64> %x
1181 define <vscale x 2 x i64> @vandn_vv_vp_swapped_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1182 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i64:
1184 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1185 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1186 ; CHECK-NEXT: vand.vv v8, v10, v8, v0.t
1189 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv2i64:
1190 ; CHECK-ZVKB: # %bb.0:
1191 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1192 ; CHECK-ZVKB-NEXT: vandn.vv v8, v10, v8, v0.t
1193 ; CHECK-ZVKB-NEXT: ret
1194 %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> splat (i64 -1), <vscale x 2 x i1> %mask, i32 %evl)
1195 %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
1196 ret <vscale x 2 x i64> %x
1199 define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
1200 ; CHECK-RV32-LABEL: vandn_vx_vp_nxv2i64:
1201 ; CHECK-RV32: # %bb.0:
1202 ; CHECK-RV32-NEXT: addi sp, sp, -16
1203 ; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
1204 ; CHECK-RV32-NEXT: not a0, a0
1205 ; CHECK-RV32-NEXT: not a1, a1
1206 ; CHECK-RV32-NEXT: sw a1, 12(sp)
1207 ; CHECK-RV32-NEXT: sw a0, 8(sp)
1208 ; CHECK-RV32-NEXT: addi a0, sp, 8
1209 ; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1210 ; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
1211 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1212 ; CHECK-RV32-NEXT: vand.vv v8, v8, v10, v0.t
1213 ; CHECK-RV32-NEXT: addi sp, sp, 16
1214 ; CHECK-RV32-NEXT: ret
1216 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv2i64:
1217 ; CHECK-RV64: # %bb.0:
1218 ; CHECK-RV64-NEXT: not a0, a0
1219 ; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1220 ; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t
1221 ; CHECK-RV64-NEXT: ret
1223 ; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv2i64:
1224 ; CHECK-ZVKB32: # %bb.0:
1225 ; CHECK-ZVKB32-NEXT: addi sp, sp, -16
1226 ; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
1227 ; CHECK-ZVKB32-NEXT: not a0, a0
1228 ; CHECK-ZVKB32-NEXT: not a1, a1
1229 ; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
1230 ; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
1231 ; CHECK-ZVKB32-NEXT: addi a0, sp, 8
1232 ; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1233 ; CHECK-ZVKB32-NEXT: vlse64.v v10, (a0), zero
1234 ; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1235 ; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v10, v0.t
1236 ; CHECK-ZVKB32-NEXT: addi sp, sp, 16
1237 ; CHECK-ZVKB32-NEXT: ret
1239 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv2i64:
1240 ; CHECK-ZVKB64: # %bb.0:
1241 ; CHECK-ZVKB64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1242 ; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0, v0.t
1243 ; CHECK-ZVKB64-NEXT: ret
1244 %not.a = xor i64 %a, -1
1245 %head.not.a = insertelement <vscale x 2 x i64> poison, i64 %not.a, i32 0
1246 %splat.not.a = shufflevector <vscale x 2 x i64> %head.not.a, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1247 %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
1248 ret <vscale x 2 x i64> %x
1251 declare <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1252 declare <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1254 define <vscale x 4 x i64> @vandn_vv_vp_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1255 ; CHECK-LABEL: vandn_vv_vp_nxv4i64:
1257 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1258 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1259 ; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
1262 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv4i64:
1263 ; CHECK-ZVKB: # %bb.0:
1264 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1265 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
1266 ; CHECK-ZVKB-NEXT: ret
1267 %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %mask, i32 %evl)
1268 %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %not.a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 %evl)
1269 ret <vscale x 4 x i64> %x
1272 define <vscale x 4 x i64> @vandn_vv_vp_swapped_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1273 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i64:
1275 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1276 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1277 ; CHECK-NEXT: vand.vv v8, v12, v8, v0.t
1280 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv4i64:
1281 ; CHECK-ZVKB: # %bb.0:
1282 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1283 ; CHECK-ZVKB-NEXT: vandn.vv v8, v12, v8, v0.t
1284 ; CHECK-ZVKB-NEXT: ret
1285 %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> splat (i64 -1), <vscale x 4 x i1> %mask, i32 %evl)
1286 %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
1287 ret <vscale x 4 x i64> %x
1290 define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
1291 ; CHECK-RV32-LABEL: vandn_vx_vp_nxv4i64:
1292 ; CHECK-RV32: # %bb.0:
1293 ; CHECK-RV32-NEXT: addi sp, sp, -16
1294 ; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
1295 ; CHECK-RV32-NEXT: not a0, a0
1296 ; CHECK-RV32-NEXT: not a1, a1
1297 ; CHECK-RV32-NEXT: sw a1, 12(sp)
1298 ; CHECK-RV32-NEXT: sw a0, 8(sp)
1299 ; CHECK-RV32-NEXT: addi a0, sp, 8
1300 ; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1301 ; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
1302 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1303 ; CHECK-RV32-NEXT: vand.vv v8, v8, v12, v0.t
1304 ; CHECK-RV32-NEXT: addi sp, sp, 16
1305 ; CHECK-RV32-NEXT: ret
1307 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv4i64:
1308 ; CHECK-RV64: # %bb.0:
1309 ; CHECK-RV64-NEXT: not a0, a0
1310 ; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1311 ; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t
1312 ; CHECK-RV64-NEXT: ret
1314 ; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv4i64:
1315 ; CHECK-ZVKB32: # %bb.0:
1316 ; CHECK-ZVKB32-NEXT: addi sp, sp, -16
1317 ; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
1318 ; CHECK-ZVKB32-NEXT: not a0, a0
1319 ; CHECK-ZVKB32-NEXT: not a1, a1
1320 ; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
1321 ; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
1322 ; CHECK-ZVKB32-NEXT: addi a0, sp, 8
1323 ; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1324 ; CHECK-ZVKB32-NEXT: vlse64.v v12, (a0), zero
1325 ; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1326 ; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v12, v0.t
1327 ; CHECK-ZVKB32-NEXT: addi sp, sp, 16
1328 ; CHECK-ZVKB32-NEXT: ret
1330 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv4i64:
1331 ; CHECK-ZVKB64: # %bb.0:
1332 ; CHECK-ZVKB64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1333 ; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0, v0.t
1334 ; CHECK-ZVKB64-NEXT: ret
1335 %not.a = xor i64 %a, -1
1336 %head.not.a = insertelement <vscale x 4 x i64> poison, i64 %not.a, i32 0
1337 %splat.not.a = shufflevector <vscale x 4 x i64> %head.not.a, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1338 %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
1339 ret <vscale x 4 x i64> %x
1342 declare <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1343 declare <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1345 define <vscale x 8 x i64> @vandn_vv_vp_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1346 ; CHECK-LABEL: vandn_vv_vp_nxv8i64:
1348 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1349 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1350 ; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
1353 ; CHECK-ZVKB-LABEL: vandn_vv_vp_nxv8i64:
1354 ; CHECK-ZVKB: # %bb.0:
1355 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1356 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
1357 ; CHECK-ZVKB-NEXT: ret
1358 %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %mask, i32 %evl)
1359 %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %not.a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 %evl)
1360 ret <vscale x 8 x i64> %x
1363 define <vscale x 8 x i64> @vandn_vv_vp_swapped_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1364 ; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i64:
1366 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1367 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1368 ; CHECK-NEXT: vand.vv v8, v16, v8, v0.t
1371 ; CHECK-ZVKB-LABEL: vandn_vv_vp_swapped_nxv8i64:
1372 ; CHECK-ZVKB: # %bb.0:
1373 ; CHECK-ZVKB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1374 ; CHECK-ZVKB-NEXT: vandn.vv v8, v16, v8, v0.t
1375 ; CHECK-ZVKB-NEXT: ret
1376 %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> splat (i64 -1), <vscale x 8 x i1> %mask, i32 %evl)
1377 %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
1378 ret <vscale x 8 x i64> %x
1381 define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
1382 ; CHECK-RV32-LABEL: vandn_vx_vp_nxv8i64:
1383 ; CHECK-RV32: # %bb.0:
1384 ; CHECK-RV32-NEXT: addi sp, sp, -16
1385 ; CHECK-RV32-NEXT: .cfi_def_cfa_offset 16
1386 ; CHECK-RV32-NEXT: not a0, a0
1387 ; CHECK-RV32-NEXT: not a1, a1
1388 ; CHECK-RV32-NEXT: sw a1, 12(sp)
1389 ; CHECK-RV32-NEXT: sw a0, 8(sp)
1390 ; CHECK-RV32-NEXT: addi a0, sp, 8
1391 ; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1392 ; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero
1393 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1394 ; CHECK-RV32-NEXT: vand.vv v8, v8, v16, v0.t
1395 ; CHECK-RV32-NEXT: addi sp, sp, 16
1396 ; CHECK-RV32-NEXT: ret
1398 ; CHECK-RV64-LABEL: vandn_vx_vp_nxv8i64:
1399 ; CHECK-RV64: # %bb.0:
1400 ; CHECK-RV64-NEXT: not a0, a0
1401 ; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1402 ; CHECK-RV64-NEXT: vand.vx v8, v8, a0, v0.t
1403 ; CHECK-RV64-NEXT: ret
1405 ; CHECK-ZVKB32-LABEL: vandn_vx_vp_nxv8i64:
1406 ; CHECK-ZVKB32: # %bb.0:
1407 ; CHECK-ZVKB32-NEXT: addi sp, sp, -16
1408 ; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 16
1409 ; CHECK-ZVKB32-NEXT: not a0, a0
1410 ; CHECK-ZVKB32-NEXT: not a1, a1
1411 ; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
1412 ; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
1413 ; CHECK-ZVKB32-NEXT: addi a0, sp, 8
1414 ; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1415 ; CHECK-ZVKB32-NEXT: vlse64.v v16, (a0), zero
1416 ; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1417 ; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v16, v0.t
1418 ; CHECK-ZVKB32-NEXT: addi sp, sp, 16
1419 ; CHECK-ZVKB32-NEXT: ret
1421 ; CHECK-ZVKB64-LABEL: vandn_vx_vp_nxv8i64:
1422 ; CHECK-ZVKB64: # %bb.0:
1423 ; CHECK-ZVKB64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1424 ; CHECK-ZVKB64-NEXT: vandn.vx v8, v8, a0, v0.t
1425 ; CHECK-ZVKB64-NEXT: ret
1426 %not.a = xor i64 %a, -1
1427 %head.not.a = insertelement <vscale x 8 x i64> poison, i64 %not.a, i32 0
1428 %splat.not.a = shufflevector <vscale x 8 x i64> %head.not.a, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1429 %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
1430 ret <vscale x 8 x i64> %x