1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <8 x i7> @llvm.vp.xor.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
9 define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vxor_vv_v8i7:
12 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
13 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
15 %v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
19 declare <2 x i8> @llvm.vp.xor.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
21 define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
22 ; CHECK-LABEL: vxor_vv_v2i8:
24 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
25 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
27 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
31 define <2 x i8> @vxor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
32 ; CHECK-LABEL: vxor_vv_v2i8_unmasked:
34 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
35 ; CHECK-NEXT: vxor.vv v8, v8, v9
37 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> splat (i1 true), i32 %evl)
41 define <2 x i8> @vxor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
42 ; CHECK-LABEL: vxor_vx_v2i8:
44 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
45 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
47 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
48 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
49 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
53 define <2 x i8> @vxor_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
54 ; CHECK-LABEL: vxor_vx_v2i8_commute:
56 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
57 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
59 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
60 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
61 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %vb, <2 x i8> %va, <2 x i1> %m, i32 %evl)
65 define <2 x i8> @vxor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
66 ; CHECK-LABEL: vxor_vx_v2i8_unmasked:
68 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
69 ; CHECK-NEXT: vxor.vx v8, v8, a0
71 %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
72 %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
73 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> splat (i1 true), i32 %evl)
77 define <2 x i8> @vxor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
78 ; CHECK-LABEL: vxor_vi_v2i8:
80 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
81 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
83 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> %m, i32 %evl)
87 define <2 x i8> @vxor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
88 ; CHECK-LABEL: vxor_vi_v2i8_unmasked:
90 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
91 ; CHECK-NEXT: vxor.vi v8, v8, 7
93 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 7), <2 x i1> splat (i1 true), i32 %evl)
97 define <2 x i8> @vxor_vi_v2i8_1(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
98 ; CHECK-LABEL: vxor_vi_v2i8_1:
100 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
101 ; CHECK-NEXT: vnot.v v8, v8, v0.t
103 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> %m, i32 %evl)
107 define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) {
108 ; CHECK-LABEL: vxor_vi_v2i8_unmasked_1:
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
111 ; CHECK-NEXT: vnot.v v8, v8
113 %v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> splat (i8 -1), <2 x i1> splat (i1 true), i32 %evl)
117 declare <4 x i8> @llvm.vp.xor.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
119 define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
120 ; CHECK-LABEL: vxor_vv_v4i8:
122 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
123 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
125 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
129 define <4 x i8> @vxor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
130 ; CHECK-LABEL: vxor_vv_v4i8_unmasked:
132 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
133 ; CHECK-NEXT: vxor.vv v8, v8, v9
135 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> splat (i1 true), i32 %evl)
139 define <4 x i8> @vxor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
140 ; CHECK-LABEL: vxor_vx_v4i8:
142 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
143 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
145 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
146 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
147 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
151 define <4 x i8> @vxor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
152 ; CHECK-LABEL: vxor_vx_v4i8_unmasked:
154 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
155 ; CHECK-NEXT: vxor.vx v8, v8, a0
157 %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
158 %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
159 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> splat (i1 true), i32 %evl)
163 define <4 x i8> @vxor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
164 ; CHECK-LABEL: vxor_vi_v4i8:
166 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
167 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
169 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> %m, i32 %evl)
173 define <4 x i8> @vxor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
174 ; CHECK-LABEL: vxor_vi_v4i8_unmasked:
176 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
177 ; CHECK-NEXT: vxor.vi v8, v8, 7
179 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 7), <4 x i1> splat (i1 true), i32 %evl)
183 define <4 x i8> @vxor_vi_v4i8_1(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
184 ; CHECK-LABEL: vxor_vi_v4i8_1:
186 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
187 ; CHECK-NEXT: vnot.v v8, v8, v0.t
189 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> %m, i32 %evl)
193 define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) {
194 ; CHECK-LABEL: vxor_vi_v4i8_unmasked_1:
196 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
197 ; CHECK-NEXT: vnot.v v8, v8
199 %v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> splat (i8 -1), <4 x i1> splat (i1 true), i32 %evl)
203 declare <8 x i8> @llvm.vp.xor.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
205 define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
206 ; CHECK-LABEL: vxor_vv_v8i8:
208 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
209 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
211 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
215 define <8 x i8> @vxor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
216 ; CHECK-LABEL: vxor_vv_v8i8_unmasked:
218 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
219 ; CHECK-NEXT: vxor.vv v8, v8, v9
221 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> splat (i1 true), i32 %evl)
225 define <8 x i8> @vxor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vxor_vx_v8i8:
228 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
229 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
231 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
232 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
233 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
237 define <8 x i8> @vxor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
238 ; CHECK-LABEL: vxor_vx_v8i8_unmasked:
240 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
241 ; CHECK-NEXT: vxor.vx v8, v8, a0
243 %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
244 %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
245 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> splat (i1 true), i32 %evl)
249 define <8 x i8> @vxor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
250 ; CHECK-LABEL: vxor_vi_v8i8:
252 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
253 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
255 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> %m, i32 %evl)
259 define <8 x i8> @vxor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
260 ; CHECK-LABEL: vxor_vi_v8i8_unmasked:
262 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
263 ; CHECK-NEXT: vxor.vi v8, v8, 7
265 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 7), <8 x i1> splat (i1 true), i32 %evl)
269 define <8 x i8> @vxor_vi_v8i8_1(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
270 ; CHECK-LABEL: vxor_vi_v8i8_1:
272 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
273 ; CHECK-NEXT: vnot.v v8, v8, v0.t
275 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> %m, i32 %evl)
279 define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) {
280 ; CHECK-LABEL: vxor_vi_v8i8_unmasked_1:
282 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
283 ; CHECK-NEXT: vnot.v v8, v8
285 %v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> splat (i8 -1), <8 x i1> splat (i1 true), i32 %evl)
289 declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32)
291 define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) {
292 ; CHECK-LABEL: vxor_vv_v9i8:
294 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
295 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
297 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl)
301 define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %evl) {
302 ; CHECK-LABEL: vxor_vv_v9i8_unmasked:
304 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
305 ; CHECK-NEXT: vxor.vv v8, v8, v9
307 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> splat (i1 true), i32 %evl)
311 define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) {
312 ; CHECK-LABEL: vxor_vx_v9i8:
314 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
315 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
317 %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0
318 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
319 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
323 define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) {
324 ; CHECK-LABEL: vxor_vx_v9i8_unmasked:
326 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
327 ; CHECK-NEXT: vxor.vx v8, v8, a0
329 %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0
330 %vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
331 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> splat (i1 true), i32 %evl)
335 define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
336 ; CHECK-LABEL: vxor_vi_v9i8:
338 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
339 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
341 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> %m, i32 %evl)
345 define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) {
346 ; CHECK-LABEL: vxor_vi_v9i8_unmasked:
348 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
349 ; CHECK-NEXT: vxor.vi v8, v8, 7
351 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 7), <9 x i1> splat (i1 true), i32 %evl)
355 define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
356 ; CHECK-LABEL: vxor_vi_v9i8_1:
358 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
359 ; CHECK-NEXT: vnot.v v8, v8, v0.t
361 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> %m, i32 %evl)
365 define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) {
366 ; CHECK-LABEL: vxor_vi_v9i8_unmasked_1:
368 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
369 ; CHECK-NEXT: vnot.v v8, v8
371 %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> splat (i8 -1), <9 x i1> splat (i1 true), i32 %evl)
375 declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
377 define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
378 ; CHECK-LABEL: vxor_vv_v16i8:
380 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
381 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
383 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
387 define <16 x i8> @vxor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
388 ; CHECK-LABEL: vxor_vv_v16i8_unmasked:
390 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
391 ; CHECK-NEXT: vxor.vv v8, v8, v9
393 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> splat (i1 true), i32 %evl)
397 define <16 x i8> @vxor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
398 ; CHECK-LABEL: vxor_vx_v16i8:
400 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
401 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
403 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
404 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
405 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
409 define <16 x i8> @vxor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
410 ; CHECK-LABEL: vxor_vx_v16i8_unmasked:
412 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
413 ; CHECK-NEXT: vxor.vx v8, v8, a0
415 %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
416 %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
417 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> splat (i1 true), i32 %evl)
421 define <16 x i8> @vxor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
422 ; CHECK-LABEL: vxor_vi_v16i8:
424 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
425 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
427 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> %m, i32 %evl)
431 define <16 x i8> @vxor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
432 ; CHECK-LABEL: vxor_vi_v16i8_unmasked:
434 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
435 ; CHECK-NEXT: vxor.vi v8, v8, 7
437 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 7), <16 x i1> splat (i1 true), i32 %evl)
441 define <16 x i8> @vxor_vi_v16i8_1(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
442 ; CHECK-LABEL: vxor_vi_v16i8_1:
444 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
445 ; CHECK-NEXT: vnot.v v8, v8, v0.t
447 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> %m, i32 %evl)
451 define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) {
452 ; CHECK-LABEL: vxor_vi_v16i8_unmasked_1:
454 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
455 ; CHECK-NEXT: vnot.v v8, v8
457 %v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> splat (i8 -1), <16 x i1> splat (i1 true), i32 %evl)
461 declare <2 x i16> @llvm.vp.xor.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
463 define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
464 ; CHECK-LABEL: vxor_vv_v2i16:
466 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
467 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
469 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
473 define <2 x i16> @vxor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
474 ; CHECK-LABEL: vxor_vv_v2i16_unmasked:
476 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
477 ; CHECK-NEXT: vxor.vv v8, v8, v9
479 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> splat (i1 true), i32 %evl)
483 define <2 x i16> @vxor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
484 ; CHECK-LABEL: vxor_vx_v2i16:
486 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
487 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
489 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
490 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
491 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
495 define <2 x i16> @vxor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
496 ; CHECK-LABEL: vxor_vx_v2i16_unmasked:
498 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
499 ; CHECK-NEXT: vxor.vx v8, v8, a0
501 %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
502 %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
503 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> splat (i1 true), i32 %evl)
507 define <2 x i16> @vxor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
508 ; CHECK-LABEL: vxor_vi_v2i16:
510 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
511 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
513 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> %m, i32 %evl)
517 define <2 x i16> @vxor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
518 ; CHECK-LABEL: vxor_vi_v2i16_unmasked:
520 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
521 ; CHECK-NEXT: vxor.vi v8, v8, 7
523 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 7), <2 x i1> splat (i1 true), i32 %evl)
527 define <2 x i16> @vxor_vi_v2i16_1(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
528 ; CHECK-LABEL: vxor_vi_v2i16_1:
530 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
531 ; CHECK-NEXT: vnot.v v8, v8, v0.t
533 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> %m, i32 %evl)
537 define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) {
538 ; CHECK-LABEL: vxor_vi_v2i16_unmasked_1:
540 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
541 ; CHECK-NEXT: vnot.v v8, v8
543 %v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> splat (i16 -1), <2 x i1> splat (i1 true), i32 %evl)
547 declare <4 x i16> @llvm.vp.xor.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
549 define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
550 ; CHECK-LABEL: vxor_vv_v4i16:
552 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
553 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
555 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
559 define <4 x i16> @vxor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
560 ; CHECK-LABEL: vxor_vv_v4i16_unmasked:
562 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
563 ; CHECK-NEXT: vxor.vv v8, v8, v9
565 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> splat (i1 true), i32 %evl)
569 define <4 x i16> @vxor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
570 ; CHECK-LABEL: vxor_vx_v4i16:
572 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
573 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
575 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
576 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
577 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
581 define <4 x i16> @vxor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
582 ; CHECK-LABEL: vxor_vx_v4i16_unmasked:
584 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
585 ; CHECK-NEXT: vxor.vx v8, v8, a0
587 %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
588 %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
589 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> splat (i1 true), i32 %evl)
593 define <4 x i16> @vxor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
594 ; CHECK-LABEL: vxor_vi_v4i16:
596 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
597 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
599 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> %m, i32 %evl)
603 define <4 x i16> @vxor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
604 ; CHECK-LABEL: vxor_vi_v4i16_unmasked:
606 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
607 ; CHECK-NEXT: vxor.vi v8, v8, 7
609 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 7), <4 x i1> splat (i1 true), i32 %evl)
613 define <4 x i16> @vxor_vi_v4i16_1(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
614 ; CHECK-LABEL: vxor_vi_v4i16_1:
616 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
617 ; CHECK-NEXT: vnot.v v8, v8, v0.t
619 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> %m, i32 %evl)
623 define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) {
624 ; CHECK-LABEL: vxor_vi_v4i16_unmasked_1:
626 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
627 ; CHECK-NEXT: vnot.v v8, v8
629 %v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> splat (i16 -1), <4 x i1> splat (i1 true), i32 %evl)
633 declare <8 x i16> @llvm.vp.xor.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
635 define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
636 ; CHECK-LABEL: vxor_vv_v8i16:
638 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
639 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
641 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
645 define <8 x i16> @vxor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
646 ; CHECK-LABEL: vxor_vv_v8i16_unmasked:
648 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
649 ; CHECK-NEXT: vxor.vv v8, v8, v9
651 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> splat (i1 true), i32 %evl)
655 define <8 x i16> @vxor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
656 ; CHECK-LABEL: vxor_vx_v8i16:
658 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
659 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
661 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
662 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
663 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
667 define <8 x i16> @vxor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
668 ; CHECK-LABEL: vxor_vx_v8i16_unmasked:
670 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
671 ; CHECK-NEXT: vxor.vx v8, v8, a0
673 %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
674 %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
675 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> splat (i1 true), i32 %evl)
679 define <8 x i16> @vxor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
680 ; CHECK-LABEL: vxor_vi_v8i16:
682 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
683 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
685 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> %m, i32 %evl)
689 define <8 x i16> @vxor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
690 ; CHECK-LABEL: vxor_vi_v8i16_unmasked:
692 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
693 ; CHECK-NEXT: vxor.vi v8, v8, 7
695 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 7), <8 x i1> splat (i1 true), i32 %evl)
699 define <8 x i16> @vxor_vi_v8i16_1(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
700 ; CHECK-LABEL: vxor_vi_v8i16_1:
702 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
703 ; CHECK-NEXT: vnot.v v8, v8, v0.t
705 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> %m, i32 %evl)
709 define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) {
710 ; CHECK-LABEL: vxor_vi_v8i16_unmasked_1:
712 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
713 ; CHECK-NEXT: vnot.v v8, v8
715 %v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> splat (i16 -1), <8 x i1> splat (i1 true), i32 %evl)
719 declare <16 x i16> @llvm.vp.xor.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
721 define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
722 ; CHECK-LABEL: vxor_vv_v16i16:
724 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
725 ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
727 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
731 define <16 x i16> @vxor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
732 ; CHECK-LABEL: vxor_vv_v16i16_unmasked:
734 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
735 ; CHECK-NEXT: vxor.vv v8, v8, v10
737 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> splat (i1 true), i32 %evl)
741 define <16 x i16> @vxor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
742 ; CHECK-LABEL: vxor_vx_v16i16:
744 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
745 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
747 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
748 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
749 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
753 define <16 x i16> @vxor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
754 ; CHECK-LABEL: vxor_vx_v16i16_unmasked:
756 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
757 ; CHECK-NEXT: vxor.vx v8, v8, a0
759 %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
760 %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
761 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> splat (i1 true), i32 %evl)
765 define <16 x i16> @vxor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
766 ; CHECK-LABEL: vxor_vi_v16i16:
768 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
769 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
771 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> %m, i32 %evl)
775 define <16 x i16> @vxor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
776 ; CHECK-LABEL: vxor_vi_v16i16_unmasked:
778 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
779 ; CHECK-NEXT: vxor.vi v8, v8, 7
781 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 7), <16 x i1> splat (i1 true), i32 %evl)
785 define <16 x i16> @vxor_vi_v16i16_1(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
786 ; CHECK-LABEL: vxor_vi_v16i16_1:
788 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
789 ; CHECK-NEXT: vnot.v v8, v8, v0.t
791 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> %m, i32 %evl)
795 define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) {
796 ; CHECK-LABEL: vxor_vi_v16i16_unmasked_1:
798 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
799 ; CHECK-NEXT: vnot.v v8, v8
801 %v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> splat (i16 -1), <16 x i1> splat (i1 true), i32 %evl)
805 declare <2 x i32> @llvm.vp.xor.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
807 define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
808 ; CHECK-LABEL: vxor_vv_v2i32:
810 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
811 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
813 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
817 define <2 x i32> @vxor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
818 ; CHECK-LABEL: vxor_vv_v2i32_unmasked:
820 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
821 ; CHECK-NEXT: vxor.vv v8, v8, v9
823 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> splat (i1 true), i32 %evl)
827 define <2 x i32> @vxor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
828 ; CHECK-LABEL: vxor_vx_v2i32:
830 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
831 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
833 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
834 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
835 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
839 define <2 x i32> @vxor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
840 ; CHECK-LABEL: vxor_vx_v2i32_unmasked:
842 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
843 ; CHECK-NEXT: vxor.vx v8, v8, a0
845 %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
846 %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
847 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> splat (i1 true), i32 %evl)
851 define <2 x i32> @vxor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
852 ; CHECK-LABEL: vxor_vi_v2i32:
854 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
855 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
857 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> %m, i32 %evl)
861 define <2 x i32> @vxor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
862 ; CHECK-LABEL: vxor_vi_v2i32_unmasked:
864 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
865 ; CHECK-NEXT: vxor.vi v8, v8, 7
867 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 7), <2 x i1> splat (i1 true), i32 %evl)
871 define <2 x i32> @vxor_vi_v2i32_1(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
872 ; CHECK-LABEL: vxor_vi_v2i32_1:
874 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
875 ; CHECK-NEXT: vnot.v v8, v8, v0.t
877 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> %m, i32 %evl)
881 define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) {
882 ; CHECK-LABEL: vxor_vi_v2i32_unmasked_1:
884 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
885 ; CHECK-NEXT: vnot.v v8, v8
887 %v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> splat (i32 -1), <2 x i1> splat (i1 true), i32 %evl)
891 declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
893 define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
894 ; CHECK-LABEL: vxor_vv_v4i32:
896 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
897 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
899 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
903 define <4 x i32> @vxor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
904 ; CHECK-LABEL: vxor_vv_v4i32_unmasked:
906 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
907 ; CHECK-NEXT: vxor.vv v8, v8, v9
909 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> splat (i1 true), i32 %evl)
913 define <4 x i32> @vxor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
914 ; CHECK-LABEL: vxor_vx_v4i32:
916 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
917 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
919 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
920 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
921 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
925 define <4 x i32> @vxor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
926 ; CHECK-LABEL: vxor_vx_v4i32_unmasked:
928 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
929 ; CHECK-NEXT: vxor.vx v8, v8, a0
931 %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
932 %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
933 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> splat (i1 true), i32 %evl)
937 define <4 x i32> @vxor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
938 ; CHECK-LABEL: vxor_vi_v4i32:
940 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
941 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
943 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> %m, i32 %evl)
947 define <4 x i32> @vxor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
948 ; CHECK-LABEL: vxor_vi_v4i32_unmasked:
950 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
951 ; CHECK-NEXT: vxor.vi v8, v8, 7
953 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 7), <4 x i1> splat (i1 true), i32 %evl)
957 define <4 x i32> @vxor_vi_v4i32_1(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
958 ; CHECK-LABEL: vxor_vi_v4i32_1:
960 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
961 ; CHECK-NEXT: vnot.v v8, v8, v0.t
963 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> %m, i32 %evl)
967 define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) {
968 ; CHECK-LABEL: vxor_vi_v4i32_unmasked_1:
970 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
971 ; CHECK-NEXT: vnot.v v8, v8
973 %v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> splat (i32 -1), <4 x i1> splat (i1 true), i32 %evl)
977 declare <8 x i32> @llvm.vp.xor.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
979 define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
980 ; CHECK-LABEL: vxor_vv_v8i32:
982 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
983 ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
985 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
989 define <8 x i32> @vxor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
990 ; CHECK-LABEL: vxor_vv_v8i32_unmasked:
992 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
993 ; CHECK-NEXT: vxor.vv v8, v8, v10
995 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> splat (i1 true), i32 %evl)
999 define <8 x i32> @vxor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
1000 ; CHECK-LABEL: vxor_vx_v8i32:
1002 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1003 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
1005 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
1006 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
1007 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
1011 define <8 x i32> @vxor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
1012 ; CHECK-LABEL: vxor_vx_v8i32_unmasked:
1014 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1015 ; CHECK-NEXT: vxor.vx v8, v8, a0
1017 %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
1018 %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
1019 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> splat (i1 true), i32 %evl)
1023 define <8 x i32> @vxor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
1024 ; CHECK-LABEL: vxor_vi_v8i32:
1026 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1027 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1029 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> %m, i32 %evl)
1033 define <8 x i32> @vxor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
1034 ; CHECK-LABEL: vxor_vi_v8i32_unmasked:
1036 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1037 ; CHECK-NEXT: vxor.vi v8, v8, 7
1039 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 7), <8 x i1> splat (i1 true), i32 %evl)
1043 define <8 x i32> @vxor_vi_v8i32_1(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
1044 ; CHECK-LABEL: vxor_vi_v8i32_1:
1046 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1047 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1049 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> %m, i32 %evl)
1053 define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) {
1054 ; CHECK-LABEL: vxor_vi_v8i32_unmasked_1:
1056 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1057 ; CHECK-NEXT: vnot.v v8, v8
1059 %v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> splat (i32 -1), <8 x i1> splat (i1 true), i32 %evl)
1063 declare <16 x i32> @llvm.vp.xor.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
1065 define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
1066 ; CHECK-LABEL: vxor_vv_v16i32:
1068 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1069 ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
1071 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
1075 define <16 x i32> @vxor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
1076 ; CHECK-LABEL: vxor_vv_v16i32_unmasked:
1078 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1079 ; CHECK-NEXT: vxor.vv v8, v8, v12
1081 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> splat (i1 true), i32 %evl)
1085 define <16 x i32> @vxor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
1086 ; CHECK-LABEL: vxor_vx_v16i32:
1088 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1089 ; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
1091 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
1092 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
1093 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
1097 define <16 x i32> @vxor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
1098 ; CHECK-LABEL: vxor_vx_v16i32_unmasked:
1100 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1101 ; CHECK-NEXT: vxor.vx v8, v8, a0
1103 %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
1104 %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
1105 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> splat (i1 true), i32 %evl)
1109 define <16 x i32> @vxor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
1110 ; CHECK-LABEL: vxor_vi_v16i32:
1112 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1113 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1115 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> %m, i32 %evl)
1119 define <16 x i32> @vxor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
1120 ; CHECK-LABEL: vxor_vi_v16i32_unmasked:
1122 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1123 ; CHECK-NEXT: vxor.vi v8, v8, 7
1125 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 7), <16 x i1> splat (i1 true), i32 %evl)
1129 define <16 x i32> @vxor_vi_v16i32_1(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
1130 ; CHECK-LABEL: vxor_vi_v16i32_1:
1132 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1133 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1135 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> %m, i32 %evl)
1139 define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) {
1140 ; CHECK-LABEL: vxor_vi_v16i32_unmasked_1:
1142 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1143 ; CHECK-NEXT: vnot.v v8, v8
1145 %v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> splat (i32 -1), <16 x i1> splat (i1 true), i32 %evl)
1149 declare <2 x i64> @llvm.vp.xor.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
1151 define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
1152 ; CHECK-LABEL: vxor_vv_v2i64:
1154 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1155 ; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
1157 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
1161 define <2 x i64> @vxor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
1162 ; CHECK-LABEL: vxor_vv_v2i64_unmasked:
1164 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1165 ; CHECK-NEXT: vxor.vv v8, v8, v9
1167 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> splat (i1 true), i32 %evl)
1171 define <2 x i64> @vxor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
1172 ; RV32-LABEL: vxor_vx_v2i64:
1174 ; RV32-NEXT: addi sp, sp, -16
1175 ; RV32-NEXT: .cfi_def_cfa_offset 16
1176 ; RV32-NEXT: sw a1, 12(sp)
1177 ; RV32-NEXT: sw a0, 8(sp)
1178 ; RV32-NEXT: addi a0, sp, 8
1179 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1180 ; RV32-NEXT: vlse64.v v9, (a0), zero
1181 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1182 ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t
1183 ; RV32-NEXT: addi sp, sp, 16
1186 ; RV64-LABEL: vxor_vx_v2i64:
1188 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1189 ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
1191 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
1192 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
1193 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
1197 define <2 x i64> @vxor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
1198 ; RV32-LABEL: vxor_vx_v2i64_unmasked:
1200 ; RV32-NEXT: addi sp, sp, -16
1201 ; RV32-NEXT: .cfi_def_cfa_offset 16
1202 ; RV32-NEXT: sw a1, 12(sp)
1203 ; RV32-NEXT: sw a0, 8(sp)
1204 ; RV32-NEXT: addi a0, sp, 8
1205 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1206 ; RV32-NEXT: vlse64.v v9, (a0), zero
1207 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1208 ; RV32-NEXT: vxor.vv v8, v8, v9
1209 ; RV32-NEXT: addi sp, sp, 16
1212 ; RV64-LABEL: vxor_vx_v2i64_unmasked:
1214 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
1215 ; RV64-NEXT: vxor.vx v8, v8, a0
1217 %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
1218 %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
1219 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> splat (i1 true), i32 %evl)
1223 define <2 x i64> @vxor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
1224 ; CHECK-LABEL: vxor_vi_v2i64:
1226 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1227 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1229 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> %m, i32 %evl)
1233 define <2 x i64> @vxor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
1234 ; CHECK-LABEL: vxor_vi_v2i64_unmasked:
1236 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1237 ; CHECK-NEXT: vxor.vi v8, v8, 7
1239 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 7), <2 x i1> splat (i1 true), i32 %evl)
1243 define <2 x i64> @vxor_vi_v2i64_1(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
1244 ; CHECK-LABEL: vxor_vi_v2i64_1:
1246 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1247 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1249 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> %m, i32 %evl)
1253 define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) {
1254 ; CHECK-LABEL: vxor_vi_v2i64_unmasked_1:
1256 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1257 ; CHECK-NEXT: vnot.v v8, v8
1259 %v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1), <2 x i1> splat (i1 true), i32 %evl)
1263 declare <4 x i64> @llvm.vp.xor.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
1265 define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
1266 ; CHECK-LABEL: vxor_vv_v4i64:
1268 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1269 ; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
1271 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
1275 define <4 x i64> @vxor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
1276 ; CHECK-LABEL: vxor_vv_v4i64_unmasked:
1278 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1279 ; CHECK-NEXT: vxor.vv v8, v8, v10
1281 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> splat (i1 true), i32 %evl)
1285 define <4 x i64> @vxor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
1286 ; RV32-LABEL: vxor_vx_v4i64:
1288 ; RV32-NEXT: addi sp, sp, -16
1289 ; RV32-NEXT: .cfi_def_cfa_offset 16
1290 ; RV32-NEXT: sw a1, 12(sp)
1291 ; RV32-NEXT: sw a0, 8(sp)
1292 ; RV32-NEXT: addi a0, sp, 8
1293 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1294 ; RV32-NEXT: vlse64.v v10, (a0), zero
1295 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1296 ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t
1297 ; RV32-NEXT: addi sp, sp, 16
1300 ; RV64-LABEL: vxor_vx_v4i64:
1302 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1303 ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
1305 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
1306 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
1307 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
1311 define <4 x i64> @vxor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
1312 ; RV32-LABEL: vxor_vx_v4i64_unmasked:
1314 ; RV32-NEXT: addi sp, sp, -16
1315 ; RV32-NEXT: .cfi_def_cfa_offset 16
1316 ; RV32-NEXT: sw a1, 12(sp)
1317 ; RV32-NEXT: sw a0, 8(sp)
1318 ; RV32-NEXT: addi a0, sp, 8
1319 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1320 ; RV32-NEXT: vlse64.v v10, (a0), zero
1321 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1322 ; RV32-NEXT: vxor.vv v8, v8, v10
1323 ; RV32-NEXT: addi sp, sp, 16
1326 ; RV64-LABEL: vxor_vx_v4i64_unmasked:
1328 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1329 ; RV64-NEXT: vxor.vx v8, v8, a0
1331 %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
1332 %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
1333 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> splat (i1 true), i32 %evl)
1337 define <4 x i64> @vxor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
1338 ; CHECK-LABEL: vxor_vi_v4i64:
1340 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1341 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1343 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> %m, i32 %evl)
1347 define <4 x i64> @vxor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
1348 ; CHECK-LABEL: vxor_vi_v4i64_unmasked:
1350 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1351 ; CHECK-NEXT: vxor.vi v8, v8, 7
1353 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 7), <4 x i1> splat (i1 true), i32 %evl)
1357 define <4 x i64> @vxor_vi_v4i64_1(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
1358 ; CHECK-LABEL: vxor_vi_v4i64_1:
1360 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1361 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1363 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> %m, i32 %evl)
1367 define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) {
1368 ; CHECK-LABEL: vxor_vi_v4i64_unmasked_1:
1370 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1371 ; CHECK-NEXT: vnot.v v8, v8
1373 %v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> splat (i64 -1), <4 x i1> splat (i1 true), i32 %evl)
1377 declare <8 x i64> @llvm.vp.xor.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
1379 define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
1380 ; CHECK-LABEL: vxor_vv_v8i64:
1382 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1383 ; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
1385 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
1389 define <8 x i64> @vxor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
1390 ; CHECK-LABEL: vxor_vv_v8i64_unmasked:
1392 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1393 ; CHECK-NEXT: vxor.vv v8, v8, v12
1395 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> splat (i1 true), i32 %evl)
1399 define <8 x i64> @vxor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
1400 ; RV32-LABEL: vxor_vx_v8i64:
1402 ; RV32-NEXT: addi sp, sp, -16
1403 ; RV32-NEXT: .cfi_def_cfa_offset 16
1404 ; RV32-NEXT: sw a1, 12(sp)
1405 ; RV32-NEXT: sw a0, 8(sp)
1406 ; RV32-NEXT: addi a0, sp, 8
1407 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1408 ; RV32-NEXT: vlse64.v v12, (a0), zero
1409 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1410 ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t
1411 ; RV32-NEXT: addi sp, sp, 16
1414 ; RV64-LABEL: vxor_vx_v8i64:
1416 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1417 ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
1419 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1420 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1421 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
1425 define <8 x i64> @vxor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
1426 ; RV32-LABEL: vxor_vx_v8i64_unmasked:
1428 ; RV32-NEXT: addi sp, sp, -16
1429 ; RV32-NEXT: .cfi_def_cfa_offset 16
1430 ; RV32-NEXT: sw a1, 12(sp)
1431 ; RV32-NEXT: sw a0, 8(sp)
1432 ; RV32-NEXT: addi a0, sp, 8
1433 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
1434 ; RV32-NEXT: vlse64.v v12, (a0), zero
1435 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1436 ; RV32-NEXT: vxor.vv v8, v8, v12
1437 ; RV32-NEXT: addi sp, sp, 16
1440 ; RV64-LABEL: vxor_vx_v8i64_unmasked:
1442 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1443 ; RV64-NEXT: vxor.vx v8, v8, a0
1445 %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
1446 %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
1447 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> splat (i1 true), i32 %evl)
1451 define <8 x i64> @vxor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
1452 ; CHECK-LABEL: vxor_vi_v8i64:
1454 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1455 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1457 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> %m, i32 %evl)
1461 define <8 x i64> @vxor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
1462 ; CHECK-LABEL: vxor_vi_v8i64_unmasked:
1464 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1465 ; CHECK-NEXT: vxor.vi v8, v8, 7
1467 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 7), <8 x i1> splat (i1 true), i32 %evl)
1471 define <8 x i64> @vxor_vi_v8i64_1(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
1472 ; CHECK-LABEL: vxor_vi_v8i64_1:
1474 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1475 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1477 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> %m, i32 %evl)
1481 define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) {
1482 ; CHECK-LABEL: vxor_vi_v8i64_unmasked_1:
1484 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1485 ; CHECK-NEXT: vnot.v v8, v8
1487 %v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> splat (i64 -1), <8 x i1> splat (i1 true), i32 %evl)
1491 declare <16 x i64> @llvm.vp.xor.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
1493 define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
1494 ; CHECK-LABEL: vxor_vv_v16i64:
1496 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1497 ; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
1499 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
1503 define <16 x i64> @vxor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
1504 ; CHECK-LABEL: vxor_vv_v16i64_unmasked:
1506 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1507 ; CHECK-NEXT: vxor.vv v8, v8, v16
1509 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> splat (i1 true), i32 %evl)
1513 define <16 x i64> @vxor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
1514 ; RV32-LABEL: vxor_vx_v16i64:
1516 ; RV32-NEXT: addi sp, sp, -16
1517 ; RV32-NEXT: .cfi_def_cfa_offset 16
1518 ; RV32-NEXT: sw a1, 12(sp)
1519 ; RV32-NEXT: sw a0, 8(sp)
1520 ; RV32-NEXT: addi a0, sp, 8
1521 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1522 ; RV32-NEXT: vlse64.v v16, (a0), zero
1523 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1524 ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t
1525 ; RV32-NEXT: addi sp, sp, 16
1528 ; RV64-LABEL: vxor_vx_v16i64:
1530 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1531 ; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
1533 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1534 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1535 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
1539 define <16 x i64> @vxor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
1540 ; RV32-LABEL: vxor_vx_v16i64_unmasked:
1542 ; RV32-NEXT: addi sp, sp, -16
1543 ; RV32-NEXT: .cfi_def_cfa_offset 16
1544 ; RV32-NEXT: sw a1, 12(sp)
1545 ; RV32-NEXT: sw a0, 8(sp)
1546 ; RV32-NEXT: addi a0, sp, 8
1547 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1548 ; RV32-NEXT: vlse64.v v16, (a0), zero
1549 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1550 ; RV32-NEXT: vxor.vv v8, v8, v16
1551 ; RV32-NEXT: addi sp, sp, 16
1554 ; RV64-LABEL: vxor_vx_v16i64_unmasked:
1556 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1557 ; RV64-NEXT: vxor.vx v8, v8, a0
1559 %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
1560 %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
1561 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> splat (i1 true), i32 %evl)
1565 define <16 x i64> @vxor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
1566 ; CHECK-LABEL: vxor_vi_v16i64:
1568 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1569 ; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
1571 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> %m, i32 %evl)
1575 define <16 x i64> @vxor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
1576 ; CHECK-LABEL: vxor_vi_v16i64_unmasked:
1578 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1579 ; CHECK-NEXT: vxor.vi v8, v8, 7
1581 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 7), <16 x i1> splat (i1 true), i32 %evl)
1585 define <16 x i64> @vxor_vi_v16i64_1(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
1586 ; CHECK-LABEL: vxor_vi_v16i64_1:
1588 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1589 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1591 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> %m, i32 %evl)
1595 define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) {
1596 ; CHECK-LABEL: vxor_vi_v16i64_unmasked_1:
1598 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1599 ; CHECK-NEXT: vnot.v v8, v8
1601 %v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> splat (i64 -1), <16 x i1> splat (i1 true), i32 %evl)