1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 define <vscale x 1 x i1> @select_nxv1i1(i1 zeroext %c, <vscale x 1 x i1> %a, <vscale x 1 x i1> %b) {
8 ; CHECK-LABEL: select_nxv1i1:
10 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
11 ; CHECK-NEXT: vmv.v.x v9, a0
12 ; CHECK-NEXT: vmsne.vi v9, v9, 0
13 ; CHECK-NEXT: vmandn.mm v8, v8, v9
14 ; CHECK-NEXT: vmand.mm v9, v0, v9
15 ; CHECK-NEXT: vmor.mm v0, v9, v8
17 %v = select i1 %c, <vscale x 1 x i1> %a, <vscale x 1 x i1> %b
18 ret <vscale x 1 x i1> %v
21 define <vscale x 1 x i1> @selectcc_nxv1i1(i1 signext %a, i1 signext %b, <vscale x 1 x i1> %c, <vscale x 1 x i1> %d) {
22 ; CHECK-LABEL: selectcc_nxv1i1:
24 ; CHECK-NEXT: xor a0, a0, a1
25 ; CHECK-NEXT: andi a0, a0, 1
26 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
27 ; CHECK-NEXT: vmv.v.x v9, a0
28 ; CHECK-NEXT: vmsne.vi v9, v9, 0
29 ; CHECK-NEXT: vmandn.mm v8, v8, v9
30 ; CHECK-NEXT: vmand.mm v9, v0, v9
31 ; CHECK-NEXT: vmor.mm v0, v9, v8
33 %cmp = icmp ne i1 %a, %b
34 %v = select i1 %cmp, <vscale x 1 x i1> %c, <vscale x 1 x i1> %d
35 ret <vscale x 1 x i1> %v
38 define <vscale x 2 x i1> @select_nxv2i1(i1 zeroext %c, <vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
39 ; CHECK-LABEL: select_nxv2i1:
41 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
42 ; CHECK-NEXT: vmv.v.x v9, a0
43 ; CHECK-NEXT: vmsne.vi v9, v9, 0
44 ; CHECK-NEXT: vmandn.mm v8, v8, v9
45 ; CHECK-NEXT: vmand.mm v9, v0, v9
46 ; CHECK-NEXT: vmor.mm v0, v9, v8
48 %v = select i1 %c, <vscale x 2 x i1> %a, <vscale x 2 x i1> %b
49 ret <vscale x 2 x i1> %v
52 define <vscale x 2 x i1> @selectcc_nxv2i1(i1 signext %a, i1 signext %b, <vscale x 2 x i1> %c, <vscale x 2 x i1> %d) {
53 ; CHECK-LABEL: selectcc_nxv2i1:
55 ; CHECK-NEXT: xor a0, a0, a1
56 ; CHECK-NEXT: andi a0, a0, 1
57 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
58 ; CHECK-NEXT: vmv.v.x v9, a0
59 ; CHECK-NEXT: vmsne.vi v9, v9, 0
60 ; CHECK-NEXT: vmandn.mm v8, v8, v9
61 ; CHECK-NEXT: vmand.mm v9, v0, v9
62 ; CHECK-NEXT: vmor.mm v0, v9, v8
64 %cmp = icmp ne i1 %a, %b
65 %v = select i1 %cmp, <vscale x 2 x i1> %c, <vscale x 2 x i1> %d
66 ret <vscale x 2 x i1> %v
69 define <vscale x 4 x i1> @select_nxv4i1(i1 zeroext %c, <vscale x 4 x i1> %a, <vscale x 4 x i1> %b) {
70 ; CHECK-LABEL: select_nxv4i1:
72 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
73 ; CHECK-NEXT: vmv.v.x v9, a0
74 ; CHECK-NEXT: vmsne.vi v9, v9, 0
75 ; CHECK-NEXT: vmandn.mm v8, v8, v9
76 ; CHECK-NEXT: vmand.mm v9, v0, v9
77 ; CHECK-NEXT: vmor.mm v0, v9, v8
79 %v = select i1 %c, <vscale x 4 x i1> %a, <vscale x 4 x i1> %b
80 ret <vscale x 4 x i1> %v
83 define <vscale x 4 x i1> @selectcc_nxv4i1(i1 signext %a, i1 signext %b, <vscale x 4 x i1> %c, <vscale x 4 x i1> %d) {
84 ; CHECK-LABEL: selectcc_nxv4i1:
86 ; CHECK-NEXT: xor a0, a0, a1
87 ; CHECK-NEXT: andi a0, a0, 1
88 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
89 ; CHECK-NEXT: vmv.v.x v9, a0
90 ; CHECK-NEXT: vmsne.vi v9, v9, 0
91 ; CHECK-NEXT: vmandn.mm v8, v8, v9
92 ; CHECK-NEXT: vmand.mm v9, v0, v9
93 ; CHECK-NEXT: vmor.mm v0, v9, v8
95 %cmp = icmp ne i1 %a, %b
96 %v = select i1 %cmp, <vscale x 4 x i1> %c, <vscale x 4 x i1> %d
97 ret <vscale x 4 x i1> %v
100 define <vscale x 8 x i1> @select_nxv8i1(i1 zeroext %c, <vscale x 8 x i1> %a, <vscale x 8 x i1> %b) {
101 ; CHECK-LABEL: select_nxv8i1:
103 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
104 ; CHECK-NEXT: vmv.v.x v9, a0
105 ; CHECK-NEXT: vmsne.vi v9, v9, 0
106 ; CHECK-NEXT: vmandn.mm v8, v8, v9
107 ; CHECK-NEXT: vmand.mm v9, v0, v9
108 ; CHECK-NEXT: vmor.mm v0, v9, v8
110 %v = select i1 %c, <vscale x 8 x i1> %a, <vscale x 8 x i1> %b
111 ret <vscale x 8 x i1> %v
114 define <vscale x 8 x i1> @selectcc_nxv8i1(i1 signext %a, i1 signext %b, <vscale x 8 x i1> %c, <vscale x 8 x i1> %d) {
115 ; CHECK-LABEL: selectcc_nxv8i1:
117 ; CHECK-NEXT: xor a0, a0, a1
118 ; CHECK-NEXT: andi a0, a0, 1
119 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
120 ; CHECK-NEXT: vmv.v.x v9, a0
121 ; CHECK-NEXT: vmsne.vi v9, v9, 0
122 ; CHECK-NEXT: vmandn.mm v8, v8, v9
123 ; CHECK-NEXT: vmand.mm v9, v0, v9
124 ; CHECK-NEXT: vmor.mm v0, v9, v8
126 %cmp = icmp ne i1 %a, %b
127 %v = select i1 %cmp, <vscale x 8 x i1> %c, <vscale x 8 x i1> %d
128 ret <vscale x 8 x i1> %v
131 define <vscale x 16 x i1> @select_nxv16i1(i1 zeroext %c, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
132 ; CHECK-LABEL: select_nxv16i1:
134 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
135 ; CHECK-NEXT: vmv.v.x v10, a0
136 ; CHECK-NEXT: vmsne.vi v9, v10, 0
137 ; CHECK-NEXT: vmandn.mm v8, v8, v9
138 ; CHECK-NEXT: vmand.mm v9, v0, v9
139 ; CHECK-NEXT: vmor.mm v0, v9, v8
141 %v = select i1 %c, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b
142 ret <vscale x 16 x i1> %v
145 define <vscale x 16 x i1> @selectcc_nxv16i1(i1 signext %a, i1 signext %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d) {
146 ; CHECK-LABEL: selectcc_nxv16i1:
148 ; CHECK-NEXT: xor a0, a0, a1
149 ; CHECK-NEXT: andi a0, a0, 1
150 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
151 ; CHECK-NEXT: vmv.v.x v10, a0
152 ; CHECK-NEXT: vmsne.vi v9, v10, 0
153 ; CHECK-NEXT: vmandn.mm v8, v8, v9
154 ; CHECK-NEXT: vmand.mm v9, v0, v9
155 ; CHECK-NEXT: vmor.mm v0, v9, v8
157 %cmp = icmp ne i1 %a, %b
158 %v = select i1 %cmp, <vscale x 16 x i1> %c, <vscale x 16 x i1> %d
159 ret <vscale x 16 x i1> %v
162 define <vscale x 32 x i1> @select_nxv32i1(i1 zeroext %c, <vscale x 32 x i1> %a, <vscale x 32 x i1> %b) {
163 ; CHECK-LABEL: select_nxv32i1:
165 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
166 ; CHECK-NEXT: vmv.v.x v12, a0
167 ; CHECK-NEXT: vmsne.vi v9, v12, 0
168 ; CHECK-NEXT: vmandn.mm v8, v8, v9
169 ; CHECK-NEXT: vmand.mm v9, v0, v9
170 ; CHECK-NEXT: vmor.mm v0, v9, v8
172 %v = select i1 %c, <vscale x 32 x i1> %a, <vscale x 32 x i1> %b
173 ret <vscale x 32 x i1> %v
176 define <vscale x 32 x i1> @selectcc_nxv32i1(i1 signext %a, i1 signext %b, <vscale x 32 x i1> %c, <vscale x 32 x i1> %d) {
177 ; CHECK-LABEL: selectcc_nxv32i1:
179 ; CHECK-NEXT: xor a0, a0, a1
180 ; CHECK-NEXT: andi a0, a0, 1
181 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
182 ; CHECK-NEXT: vmv.v.x v12, a0
183 ; CHECK-NEXT: vmsne.vi v9, v12, 0
184 ; CHECK-NEXT: vmandn.mm v8, v8, v9
185 ; CHECK-NEXT: vmand.mm v9, v0, v9
186 ; CHECK-NEXT: vmor.mm v0, v9, v8
188 %cmp = icmp ne i1 %a, %b
189 %v = select i1 %cmp, <vscale x 32 x i1> %c, <vscale x 32 x i1> %d
190 ret <vscale x 32 x i1> %v
193 define <vscale x 64 x i1> @select_nxv64i1(i1 zeroext %c, <vscale x 64 x i1> %a, <vscale x 64 x i1> %b) {
194 ; CHECK-LABEL: select_nxv64i1:
196 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
197 ; CHECK-NEXT: vmv.v.x v16, a0
198 ; CHECK-NEXT: vmsne.vi v9, v16, 0
199 ; CHECK-NEXT: vmandn.mm v8, v8, v9
200 ; CHECK-NEXT: vmand.mm v9, v0, v9
201 ; CHECK-NEXT: vmor.mm v0, v9, v8
203 %v = select i1 %c, <vscale x 64 x i1> %a, <vscale x 64 x i1> %b
204 ret <vscale x 64 x i1> %v
207 define <vscale x 64 x i1> @selectcc_nxv64i1(i1 signext %a, i1 signext %b, <vscale x 64 x i1> %c, <vscale x 64 x i1> %d) {
208 ; CHECK-LABEL: selectcc_nxv64i1:
210 ; CHECK-NEXT: xor a0, a0, a1
211 ; CHECK-NEXT: andi a0, a0, 1
212 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
213 ; CHECK-NEXT: vmv.v.x v16, a0
214 ; CHECK-NEXT: vmsne.vi v9, v16, 0
215 ; CHECK-NEXT: vmandn.mm v8, v8, v9
216 ; CHECK-NEXT: vmand.mm v9, v0, v9
217 ; CHECK-NEXT: vmor.mm v0, v9, v8
219 %cmp = icmp ne i1 %a, %b
220 %v = select i1 %cmp, <vscale x 64 x i1> %c, <vscale x 64 x i1> %d
221 ret <vscale x 64 x i1> %v
224 define <vscale x 1 x i8> @select_nxv1i8(i1 zeroext %c, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
225 ; CHECK-LABEL: select_nxv1i8:
227 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
228 ; CHECK-NEXT: vmv.v.x v10, a0
229 ; CHECK-NEXT: vmsne.vi v0, v10, 0
230 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
232 %v = select i1 %c, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b
233 ret <vscale x 1 x i8> %v
236 define <vscale x 1 x i8> @selectcc_nxv1i8(i8 signext %a, i8 signext %b, <vscale x 1 x i8> %c, <vscale x 1 x i8> %d) {
237 ; CHECK-LABEL: selectcc_nxv1i8:
239 ; CHECK-NEXT: xor a0, a0, a1
240 ; CHECK-NEXT: snez a0, a0
241 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
242 ; CHECK-NEXT: vmv.v.x v10, a0
243 ; CHECK-NEXT: vmsne.vi v0, v10, 0
244 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
246 %cmp = icmp ne i8 %a, %b
247 %v = select i1 %cmp, <vscale x 1 x i8> %c, <vscale x 1 x i8> %d
248 ret <vscale x 1 x i8> %v
251 define <vscale x 2 x i8> @select_nxv2i8(i1 zeroext %c, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
252 ; CHECK-LABEL: select_nxv2i8:
254 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
255 ; CHECK-NEXT: vmv.v.x v10, a0
256 ; CHECK-NEXT: vmsne.vi v0, v10, 0
257 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
259 %v = select i1 %c, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b
260 ret <vscale x 2 x i8> %v
263 define <vscale x 2 x i8> @selectcc_nxv2i8(i8 signext %a, i8 signext %b, <vscale x 2 x i8> %c, <vscale x 2 x i8> %d) {
264 ; CHECK-LABEL: selectcc_nxv2i8:
266 ; CHECK-NEXT: xor a0, a0, a1
267 ; CHECK-NEXT: snez a0, a0
268 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
269 ; CHECK-NEXT: vmv.v.x v10, a0
270 ; CHECK-NEXT: vmsne.vi v0, v10, 0
271 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
273 %cmp = icmp ne i8 %a, %b
274 %v = select i1 %cmp, <vscale x 2 x i8> %c, <vscale x 2 x i8> %d
275 ret <vscale x 2 x i8> %v
278 define <vscale x 4 x i8> @select_nxv4i8(i1 zeroext %c, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
279 ; CHECK-LABEL: select_nxv4i8:
281 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
282 ; CHECK-NEXT: vmv.v.x v10, a0
283 ; CHECK-NEXT: vmsne.vi v0, v10, 0
284 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
286 %v = select i1 %c, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b
287 ret <vscale x 4 x i8> %v
290 define <vscale x 4 x i8> @selectcc_nxv4i8(i8 signext %a, i8 signext %b, <vscale x 4 x i8> %c, <vscale x 4 x i8> %d) {
291 ; CHECK-LABEL: selectcc_nxv4i8:
293 ; CHECK-NEXT: xor a0, a0, a1
294 ; CHECK-NEXT: snez a0, a0
295 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
296 ; CHECK-NEXT: vmv.v.x v10, a0
297 ; CHECK-NEXT: vmsne.vi v0, v10, 0
298 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
300 %cmp = icmp ne i8 %a, %b
301 %v = select i1 %cmp, <vscale x 4 x i8> %c, <vscale x 4 x i8> %d
302 ret <vscale x 4 x i8> %v
305 define <vscale x 8 x i8> @select_nxv8i8(i1 zeroext %c, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
306 ; CHECK-LABEL: select_nxv8i8:
308 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
309 ; CHECK-NEXT: vmv.v.x v10, a0
310 ; CHECK-NEXT: vmsne.vi v0, v10, 0
311 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
313 %v = select i1 %c, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b
314 ret <vscale x 8 x i8> %v
317 define <vscale x 8 x i8> @selectcc_nxv8i8(i8 signext %a, i8 signext %b, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d) {
318 ; CHECK-LABEL: selectcc_nxv8i8:
320 ; CHECK-NEXT: xor a0, a0, a1
321 ; CHECK-NEXT: snez a0, a0
322 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
323 ; CHECK-NEXT: vmv.v.x v10, a0
324 ; CHECK-NEXT: vmsne.vi v0, v10, 0
325 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
327 %cmp = icmp ne i8 %a, %b
328 %v = select i1 %cmp, <vscale x 8 x i8> %c, <vscale x 8 x i8> %d
329 ret <vscale x 8 x i8> %v
332 define <vscale x 16 x i8> @select_nxv16i8(i1 zeroext %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
333 ; CHECK-LABEL: select_nxv16i8:
335 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
336 ; CHECK-NEXT: vmv.v.x v12, a0
337 ; CHECK-NEXT: vmsne.vi v0, v12, 0
338 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
340 %v = select i1 %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
341 ret <vscale x 16 x i8> %v
344 define <vscale x 16 x i8> @selectcc_nxv16i8(i8 signext %a, i8 signext %b, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d) {
345 ; CHECK-LABEL: selectcc_nxv16i8:
347 ; CHECK-NEXT: xor a0, a0, a1
348 ; CHECK-NEXT: snez a0, a0
349 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
350 ; CHECK-NEXT: vmv.v.x v12, a0
351 ; CHECK-NEXT: vmsne.vi v0, v12, 0
352 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
354 %cmp = icmp ne i8 %a, %b
355 %v = select i1 %cmp, <vscale x 16 x i8> %c, <vscale x 16 x i8> %d
356 ret <vscale x 16 x i8> %v
359 define <vscale x 32 x i8> @select_nxv32i8(i1 zeroext %c, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
360 ; CHECK-LABEL: select_nxv32i8:
362 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
363 ; CHECK-NEXT: vmv.v.x v16, a0
364 ; CHECK-NEXT: vmsne.vi v0, v16, 0
365 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
367 %v = select i1 %c, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b
368 ret <vscale x 32 x i8> %v
371 define <vscale x 32 x i8> @selectcc_nxv32i8(i8 signext %a, i8 signext %b, <vscale x 32 x i8> %c, <vscale x 32 x i8> %d) {
372 ; CHECK-LABEL: selectcc_nxv32i8:
374 ; CHECK-NEXT: xor a0, a0, a1
375 ; CHECK-NEXT: snez a0, a0
376 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
377 ; CHECK-NEXT: vmv.v.x v16, a0
378 ; CHECK-NEXT: vmsne.vi v0, v16, 0
379 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
381 %cmp = icmp ne i8 %a, %b
382 %v = select i1 %cmp, <vscale x 32 x i8> %c, <vscale x 32 x i8> %d
383 ret <vscale x 32 x i8> %v
386 define <vscale x 64 x i8> @select_nxv64i8(i1 zeroext %c, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
387 ; CHECK-LABEL: select_nxv64i8:
389 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
390 ; CHECK-NEXT: vmv.v.x v24, a0
391 ; CHECK-NEXT: vmsne.vi v0, v24, 0
392 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
394 %v = select i1 %c, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b
395 ret <vscale x 64 x i8> %v
398 define <vscale x 64 x i8> @selectcc_nxv64i8(i8 signext %a, i8 signext %b, <vscale x 64 x i8> %c, <vscale x 64 x i8> %d) {
399 ; CHECK-LABEL: selectcc_nxv64i8:
401 ; CHECK-NEXT: xor a0, a0, a1
402 ; CHECK-NEXT: snez a0, a0
403 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
404 ; CHECK-NEXT: vmv.v.x v24, a0
405 ; CHECK-NEXT: vmsne.vi v0, v24, 0
406 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
408 %cmp = icmp ne i8 %a, %b
409 %v = select i1 %cmp, <vscale x 64 x i8> %c, <vscale x 64 x i8> %d
410 ret <vscale x 64 x i8> %v
413 define <vscale x 1 x i16> @select_nxv1i16(i1 zeroext %c, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
414 ; CHECK-LABEL: select_nxv1i16:
416 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
417 ; CHECK-NEXT: vmv.v.x v10, a0
418 ; CHECK-NEXT: vmsne.vi v0, v10, 0
419 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
420 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
422 %v = select i1 %c, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b
423 ret <vscale x 1 x i16> %v
426 define <vscale x 1 x i16> @selectcc_nxv1i16(i16 signext %a, i16 signext %b, <vscale x 1 x i16> %c, <vscale x 1 x i16> %d) {
427 ; CHECK-LABEL: selectcc_nxv1i16:
429 ; CHECK-NEXT: xor a0, a0, a1
430 ; CHECK-NEXT: snez a0, a0
431 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
432 ; CHECK-NEXT: vmv.v.x v10, a0
433 ; CHECK-NEXT: vmsne.vi v0, v10, 0
434 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
435 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
437 %cmp = icmp ne i16 %a, %b
438 %v = select i1 %cmp, <vscale x 1 x i16> %c, <vscale x 1 x i16> %d
439 ret <vscale x 1 x i16> %v
442 define <vscale x 2 x i16> @select_nxv2i16(i1 zeroext %c, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
443 ; CHECK-LABEL: select_nxv2i16:
445 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
446 ; CHECK-NEXT: vmv.v.x v10, a0
447 ; CHECK-NEXT: vmsne.vi v0, v10, 0
448 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
449 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
451 %v = select i1 %c, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b
452 ret <vscale x 2 x i16> %v
455 define <vscale x 2 x i16> @selectcc_nxv2i16(i16 signext %a, i16 signext %b, <vscale x 2 x i16> %c, <vscale x 2 x i16> %d) {
456 ; CHECK-LABEL: selectcc_nxv2i16:
458 ; CHECK-NEXT: xor a0, a0, a1
459 ; CHECK-NEXT: snez a0, a0
460 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
461 ; CHECK-NEXT: vmv.v.x v10, a0
462 ; CHECK-NEXT: vmsne.vi v0, v10, 0
463 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
464 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
466 %cmp = icmp ne i16 %a, %b
467 %v = select i1 %cmp, <vscale x 2 x i16> %c, <vscale x 2 x i16> %d
468 ret <vscale x 2 x i16> %v
471 define <vscale x 4 x i16> @select_nxv4i16(i1 zeroext %c, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
472 ; CHECK-LABEL: select_nxv4i16:
474 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
475 ; CHECK-NEXT: vmv.v.x v10, a0
476 ; CHECK-NEXT: vmsne.vi v0, v10, 0
477 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
478 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
480 %v = select i1 %c, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b
481 ret <vscale x 4 x i16> %v
484 define <vscale x 4 x i16> @selectcc_nxv4i16(i16 signext %a, i16 signext %b, <vscale x 4 x i16> %c, <vscale x 4 x i16> %d) {
485 ; CHECK-LABEL: selectcc_nxv4i16:
487 ; CHECK-NEXT: xor a0, a0, a1
488 ; CHECK-NEXT: snez a0, a0
489 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
490 ; CHECK-NEXT: vmv.v.x v10, a0
491 ; CHECK-NEXT: vmsne.vi v0, v10, 0
492 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
493 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
495 %cmp = icmp ne i16 %a, %b
496 %v = select i1 %cmp, <vscale x 4 x i16> %c, <vscale x 4 x i16> %d
497 ret <vscale x 4 x i16> %v
500 define <vscale x 8 x i16> @select_nxv8i16(i1 zeroext %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
501 ; CHECK-LABEL: select_nxv8i16:
503 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
504 ; CHECK-NEXT: vmv.v.x v12, a0
505 ; CHECK-NEXT: vmsne.vi v0, v12, 0
506 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
507 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
509 %v = select i1 %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
510 ret <vscale x 8 x i16> %v
513 define <vscale x 8 x i16> @selectcc_nxv8i16(i16 signext %a, i16 signext %b, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d) {
514 ; CHECK-LABEL: selectcc_nxv8i16:
516 ; CHECK-NEXT: xor a0, a0, a1
517 ; CHECK-NEXT: snez a0, a0
518 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
519 ; CHECK-NEXT: vmv.v.x v12, a0
520 ; CHECK-NEXT: vmsne.vi v0, v12, 0
521 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
522 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
524 %cmp = icmp ne i16 %a, %b
525 %v = select i1 %cmp, <vscale x 8 x i16> %c, <vscale x 8 x i16> %d
526 ret <vscale x 8 x i16> %v
529 define <vscale x 16 x i16> @select_nxv16i16(i1 zeroext %c, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
530 ; CHECK-LABEL: select_nxv16i16:
532 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
533 ; CHECK-NEXT: vmv.v.x v16, a0
534 ; CHECK-NEXT: vmsne.vi v0, v16, 0
535 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
536 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
538 %v = select i1 %c, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b
539 ret <vscale x 16 x i16> %v
542 define <vscale x 16 x i16> @selectcc_nxv16i16(i16 signext %a, i16 signext %b, <vscale x 16 x i16> %c, <vscale x 16 x i16> %d) {
543 ; CHECK-LABEL: selectcc_nxv16i16:
545 ; CHECK-NEXT: xor a0, a0, a1
546 ; CHECK-NEXT: snez a0, a0
547 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
548 ; CHECK-NEXT: vmv.v.x v16, a0
549 ; CHECK-NEXT: vmsne.vi v0, v16, 0
550 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
551 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
553 %cmp = icmp ne i16 %a, %b
554 %v = select i1 %cmp, <vscale x 16 x i16> %c, <vscale x 16 x i16> %d
555 ret <vscale x 16 x i16> %v
558 define <vscale x 32 x i16> @select_nxv32i16(i1 zeroext %c, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
559 ; CHECK-LABEL: select_nxv32i16:
561 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
562 ; CHECK-NEXT: vmv.v.x v24, a0
563 ; CHECK-NEXT: vmsne.vi v0, v24, 0
564 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
565 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
567 %v = select i1 %c, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b
568 ret <vscale x 32 x i16> %v
571 define <vscale x 32 x i16> @selectcc_nxv32i16(i16 signext %a, i16 signext %b, <vscale x 32 x i16> %c, <vscale x 32 x i16> %d) {
572 ; CHECK-LABEL: selectcc_nxv32i16:
574 ; CHECK-NEXT: xor a0, a0, a1
575 ; CHECK-NEXT: snez a0, a0
576 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
577 ; CHECK-NEXT: vmv.v.x v24, a0
578 ; CHECK-NEXT: vmsne.vi v0, v24, 0
579 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
580 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
582 %cmp = icmp ne i16 %a, %b
583 %v = select i1 %cmp, <vscale x 32 x i16> %c, <vscale x 32 x i16> %d
584 ret <vscale x 32 x i16> %v
587 define <vscale x 1 x i32> @select_nxv1i32(i1 zeroext %c, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b) {
588 ; CHECK-LABEL: select_nxv1i32:
590 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
591 ; CHECK-NEXT: vmv.v.x v10, a0
592 ; CHECK-NEXT: vmsne.vi v0, v10, 0
593 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
594 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
596 %v = select i1 %c, <vscale x 1 x i32> %a, <vscale x 1 x i32> %b
597 ret <vscale x 1 x i32> %v
600 define <vscale x 1 x i32> @selectcc_nxv1i32(i32 signext %a, i32 signext %b, <vscale x 1 x i32> %c, <vscale x 1 x i32> %d) {
601 ; CHECK-LABEL: selectcc_nxv1i32:
603 ; CHECK-NEXT: xor a0, a0, a1
604 ; CHECK-NEXT: snez a0, a0
605 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
606 ; CHECK-NEXT: vmv.v.x v10, a0
607 ; CHECK-NEXT: vmsne.vi v0, v10, 0
608 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
609 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
611 %cmp = icmp ne i32 %a, %b
612 %v = select i1 %cmp, <vscale x 1 x i32> %c, <vscale x 1 x i32> %d
613 ret <vscale x 1 x i32> %v
616 define <vscale x 2 x i32> @select_nxv2i32(i1 zeroext %c, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
617 ; CHECK-LABEL: select_nxv2i32:
619 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
620 ; CHECK-NEXT: vmv.v.x v10, a0
621 ; CHECK-NEXT: vmsne.vi v0, v10, 0
622 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
623 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
625 %v = select i1 %c, <vscale x 2 x i32> %a, <vscale x 2 x i32> %b
626 ret <vscale x 2 x i32> %v
629 define <vscale x 2 x i32> @selectcc_nxv2i32(i32 signext %a, i32 signext %b, <vscale x 2 x i32> %c, <vscale x 2 x i32> %d) {
630 ; CHECK-LABEL: selectcc_nxv2i32:
632 ; CHECK-NEXT: xor a0, a0, a1
633 ; CHECK-NEXT: snez a0, a0
634 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
635 ; CHECK-NEXT: vmv.v.x v10, a0
636 ; CHECK-NEXT: vmsne.vi v0, v10, 0
637 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
638 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
640 %cmp = icmp ne i32 %a, %b
641 %v = select i1 %cmp, <vscale x 2 x i32> %c, <vscale x 2 x i32> %d
642 ret <vscale x 2 x i32> %v
645 define <vscale x 4 x i32> @select_nxv4i32(i1 zeroext %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
646 ; CHECK-LABEL: select_nxv4i32:
648 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
649 ; CHECK-NEXT: vmv.v.x v12, a0
650 ; CHECK-NEXT: vmsne.vi v0, v12, 0
651 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
652 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
654 %v = select i1 %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
655 ret <vscale x 4 x i32> %v
658 define <vscale x 4 x i32> @selectcc_nxv4i32(i32 signext %a, i32 signext %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d) {
659 ; CHECK-LABEL: selectcc_nxv4i32:
661 ; CHECK-NEXT: xor a0, a0, a1
662 ; CHECK-NEXT: snez a0, a0
663 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
664 ; CHECK-NEXT: vmv.v.x v12, a0
665 ; CHECK-NEXT: vmsne.vi v0, v12, 0
666 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
667 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
669 %cmp = icmp ne i32 %a, %b
670 %v = select i1 %cmp, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d
671 ret <vscale x 4 x i32> %v
674 define <vscale x 8 x i32> @select_nxv8i32(i1 zeroext %c, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b) {
675 ; CHECK-LABEL: select_nxv8i32:
677 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
678 ; CHECK-NEXT: vmv.v.x v16, a0
679 ; CHECK-NEXT: vmsne.vi v0, v16, 0
680 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
681 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
683 %v = select i1 %c, <vscale x 8 x i32> %a, <vscale x 8 x i32> %b
684 ret <vscale x 8 x i32> %v
687 define <vscale x 8 x i32> @selectcc_nxv8i32(i32 signext %a, i32 signext %b, <vscale x 8 x i32> %c, <vscale x 8 x i32> %d) {
688 ; CHECK-LABEL: selectcc_nxv8i32:
690 ; CHECK-NEXT: xor a0, a0, a1
691 ; CHECK-NEXT: snez a0, a0
692 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
693 ; CHECK-NEXT: vmv.v.x v16, a0
694 ; CHECK-NEXT: vmsne.vi v0, v16, 0
695 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
696 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
698 %cmp = icmp ne i32 %a, %b
699 %v = select i1 %cmp, <vscale x 8 x i32> %c, <vscale x 8 x i32> %d
700 ret <vscale x 8 x i32> %v
703 define <vscale x 16 x i32> @select_nxv16i32(i1 zeroext %c, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b) {
704 ; CHECK-LABEL: select_nxv16i32:
706 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
707 ; CHECK-NEXT: vmv.v.x v24, a0
708 ; CHECK-NEXT: vmsne.vi v0, v24, 0
709 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
710 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
712 %v = select i1 %c, <vscale x 16 x i32> %a, <vscale x 16 x i32> %b
713 ret <vscale x 16 x i32> %v
716 define <vscale x 16 x i32> @selectcc_nxv16i32(i32 signext %a, i32 signext %b, <vscale x 16 x i32> %c, <vscale x 16 x i32> %d) {
717 ; CHECK-LABEL: selectcc_nxv16i32:
719 ; CHECK-NEXT: xor a0, a0, a1
720 ; CHECK-NEXT: snez a0, a0
721 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
722 ; CHECK-NEXT: vmv.v.x v24, a0
723 ; CHECK-NEXT: vmsne.vi v0, v24, 0
724 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
725 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
727 %cmp = icmp ne i32 %a, %b
728 %v = select i1 %cmp, <vscale x 16 x i32> %c, <vscale x 16 x i32> %d
729 ret <vscale x 16 x i32> %v
732 define <vscale x 1 x i64> @select_nxv1i64(i1 zeroext %c, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b) {
733 ; CHECK-LABEL: select_nxv1i64:
735 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
736 ; CHECK-NEXT: vmv.v.x v10, a0
737 ; CHECK-NEXT: vmsne.vi v0, v10, 0
738 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
739 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
741 %v = select i1 %c, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b
742 ret <vscale x 1 x i64> %v
745 define <vscale x 1 x i64> @selectcc_nxv1i64(i64 signext %a, i64 signext %b, <vscale x 1 x i64> %c, <vscale x 1 x i64> %d) {
746 ; RV32-LABEL: selectcc_nxv1i64:
748 ; RV32-NEXT: xor a1, a1, a3
749 ; RV32-NEXT: xor a0, a0, a2
750 ; RV32-NEXT: or a0, a0, a1
751 ; RV32-NEXT: snez a0, a0
752 ; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
753 ; RV32-NEXT: vmv.v.x v10, a0
754 ; RV32-NEXT: vmsne.vi v0, v10, 0
755 ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
756 ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
759 ; RV64-LABEL: selectcc_nxv1i64:
761 ; RV64-NEXT: xor a0, a0, a1
762 ; RV64-NEXT: snez a0, a0
763 ; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
764 ; RV64-NEXT: vmv.v.x v10, a0
765 ; RV64-NEXT: vmsne.vi v0, v10, 0
766 ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
767 ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0
769 %cmp = icmp ne i64 %a, %b
770 %v = select i1 %cmp, <vscale x 1 x i64> %c, <vscale x 1 x i64> %d
771 ret <vscale x 1 x i64> %v
774 define <vscale x 2 x i64> @select_nxv2i64(i1 zeroext %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
775 ; CHECK-LABEL: select_nxv2i64:
777 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
778 ; CHECK-NEXT: vmv.v.x v12, a0
779 ; CHECK-NEXT: vmsne.vi v0, v12, 0
780 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
781 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
783 %v = select i1 %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
784 ret <vscale x 2 x i64> %v
787 define <vscale x 2 x i64> @selectcc_nxv2i64(i64 signext %a, i64 signext %b, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d) {
788 ; RV32-LABEL: selectcc_nxv2i64:
790 ; RV32-NEXT: xor a1, a1, a3
791 ; RV32-NEXT: xor a0, a0, a2
792 ; RV32-NEXT: or a0, a0, a1
793 ; RV32-NEXT: snez a0, a0
794 ; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
795 ; RV32-NEXT: vmv.v.x v12, a0
796 ; RV32-NEXT: vmsne.vi v0, v12, 0
797 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
798 ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0
801 ; RV64-LABEL: selectcc_nxv2i64:
803 ; RV64-NEXT: xor a0, a0, a1
804 ; RV64-NEXT: snez a0, a0
805 ; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
806 ; RV64-NEXT: vmv.v.x v12, a0
807 ; RV64-NEXT: vmsne.vi v0, v12, 0
808 ; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
809 ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
811 %cmp = icmp ne i64 %a, %b
812 %v = select i1 %cmp, <vscale x 2 x i64> %c, <vscale x 2 x i64> %d
813 ret <vscale x 2 x i64> %v
816 define <vscale x 4 x i64> @select_nxv4i64(i1 zeroext %c, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b) {
817 ; CHECK-LABEL: select_nxv4i64:
819 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
820 ; CHECK-NEXT: vmv.v.x v16, a0
821 ; CHECK-NEXT: vmsne.vi v0, v16, 0
822 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
823 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
825 %v = select i1 %c, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b
826 ret <vscale x 4 x i64> %v
829 define <vscale x 4 x i64> @selectcc_nxv4i64(i64 signext %a, i64 signext %b, <vscale x 4 x i64> %c, <vscale x 4 x i64> %d) {
830 ; RV32-LABEL: selectcc_nxv4i64:
832 ; RV32-NEXT: xor a1, a1, a3
833 ; RV32-NEXT: xor a0, a0, a2
834 ; RV32-NEXT: or a0, a0, a1
835 ; RV32-NEXT: snez a0, a0
836 ; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
837 ; RV32-NEXT: vmv.v.x v16, a0
838 ; RV32-NEXT: vmsne.vi v0, v16, 0
839 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
840 ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0
843 ; RV64-LABEL: selectcc_nxv4i64:
845 ; RV64-NEXT: xor a0, a0, a1
846 ; RV64-NEXT: snez a0, a0
847 ; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
848 ; RV64-NEXT: vmv.v.x v16, a0
849 ; RV64-NEXT: vmsne.vi v0, v16, 0
850 ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
851 ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0
853 %cmp = icmp ne i64 %a, %b
854 %v = select i1 %cmp, <vscale x 4 x i64> %c, <vscale x 4 x i64> %d
855 ret <vscale x 4 x i64> %v
858 define <vscale x 8 x i64> @select_nxv8i64(i1 zeroext %c, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b) {
859 ; CHECK-LABEL: select_nxv8i64:
861 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
862 ; CHECK-NEXT: vmv.v.x v24, a0
863 ; CHECK-NEXT: vmsne.vi v0, v24, 0
864 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
865 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
867 %v = select i1 %c, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b
868 ret <vscale x 8 x i64> %v
871 define <vscale x 8 x i64> @selectcc_nxv8i64(i64 signext %a, i64 signext %b, <vscale x 8 x i64> %c, <vscale x 8 x i64> %d) {
872 ; RV32-LABEL: selectcc_nxv8i64:
874 ; RV32-NEXT: xor a1, a1, a3
875 ; RV32-NEXT: xor a0, a0, a2
876 ; RV32-NEXT: or a0, a0, a1
877 ; RV32-NEXT: snez a0, a0
878 ; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma
879 ; RV32-NEXT: vmv.v.x v24, a0
880 ; RV32-NEXT: vmsne.vi v0, v24, 0
881 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
882 ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0
885 ; RV64-LABEL: selectcc_nxv8i64:
887 ; RV64-NEXT: xor a0, a0, a1
888 ; RV64-NEXT: snez a0, a0
889 ; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma
890 ; RV64-NEXT: vmv.v.x v24, a0
891 ; RV64-NEXT: vmsne.vi v0, v24, 0
892 ; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
893 ; RV64-NEXT: vmerge.vvm v8, v16, v8, v0
895 %cmp = icmp ne i64 %a, %b
896 %v = select i1 %cmp, <vscale x 8 x i64> %c, <vscale x 8 x i64> %d
897 ret <vscale x 8 x i64> %v