1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+m,+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+m,+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 define <vscale x 3 x i1> @icmp_eq_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb) {
8 ; CHECK-LABEL: icmp_eq_vv_nxv3i8:
10 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
11 ; CHECK-NEXT: vmseq.vv v0, v8, v9
13 %vc = icmp eq <vscale x 3 x i8> %va, %vb
14 ret <vscale x 3 x i1> %vc
17 define <vscale x 3 x i1> @icmp_eq_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b) {
18 ; CHECK-LABEL: icmp_eq_vx_nxv3i8:
20 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
21 ; CHECK-NEXT: vmseq.vx v0, v8, a0
23 %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
24 %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
25 %vc = icmp eq <vscale x 3 x i8> %va, %splat
26 ret <vscale x 3 x i1> %vc
29 define <vscale x 3 x i1> @icmp_eq_xv_nxv3i8(<vscale x 3 x i8> %va, i8 %b) {
30 ; CHECK-LABEL: icmp_eq_xv_nxv3i8:
32 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
33 ; CHECK-NEXT: vmseq.vx v0, v8, a0
35 %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
36 %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
37 %vc = icmp eq <vscale x 3 x i8> %splat, %va
38 ret <vscale x 3 x i1> %vc
41 define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
42 ; CHECK-LABEL: icmp_eq_vv_nxv8i8:
44 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
45 ; CHECK-NEXT: vmseq.vv v0, v8, v9
47 %vc = icmp eq <vscale x 8 x i8> %va, %vb
48 ret <vscale x 8 x i1> %vc
51 define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
52 ; CHECK-LABEL: icmp_eq_vx_nxv8i8:
54 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
55 ; CHECK-NEXT: vmseq.vx v0, v8, a0
57 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
58 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
59 %vc = icmp eq <vscale x 8 x i8> %va, %splat
60 ret <vscale x 8 x i1> %vc
63 define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
64 ; CHECK-LABEL: icmp_eq_xv_nxv8i8:
66 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
67 ; CHECK-NEXT: vmseq.vx v0, v8, a0
69 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
70 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
71 %vc = icmp eq <vscale x 8 x i8> %splat, %va
72 ret <vscale x 8 x i1> %vc
75 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
76 ; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
78 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
79 ; CHECK-NEXT: vmseq.vi v0, v8, 0
81 %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 0)
82 ret <vscale x 8 x i1> %vc
85 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
86 ; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
88 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
89 ; CHECK-NEXT: vmseq.vi v0, v8, 5
91 %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 5)
92 ret <vscale x 8 x i1> %vc
95 define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
96 ; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
98 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
99 ; CHECK-NEXT: vmseq.vi v0, v8, 5
101 %vc = icmp eq <vscale x 8 x i8> splat (i8 5), %va
102 ret <vscale x 8 x i1> %vc
105 define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
106 ; CHECK-LABEL: icmp_ne_vv_nxv8i8:
108 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
109 ; CHECK-NEXT: vmsne.vv v0, v8, v9
111 %vc = icmp ne <vscale x 8 x i8> %va, %vb
112 ret <vscale x 8 x i1> %vc
115 define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
116 ; CHECK-LABEL: icmp_ne_vx_nxv8i8:
118 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
119 ; CHECK-NEXT: vmsne.vx v0, v8, a0
121 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
122 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
123 %vc = icmp ne <vscale x 8 x i8> %va, %splat
124 ret <vscale x 8 x i1> %vc
127 define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
128 ; CHECK-LABEL: icmp_ne_xv_nxv8i8:
130 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
131 ; CHECK-NEXT: vmsne.vx v0, v8, a0
133 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
134 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
135 %vc = icmp ne <vscale x 8 x i8> %splat, %va
136 ret <vscale x 8 x i1> %vc
139 define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
140 ; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
142 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
143 ; CHECK-NEXT: vmsne.vi v0, v8, 5
145 %vc = icmp ne <vscale x 8 x i8> %va, splat (i8 5)
146 ret <vscale x 8 x i1> %vc
149 define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
150 ; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
152 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
153 ; CHECK-NEXT: vmsltu.vv v0, v9, v8
155 %vc = icmp ugt <vscale x 8 x i8> %va, %vb
156 ret <vscale x 8 x i1> %vc
159 define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
160 ; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
162 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
163 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
165 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
166 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
167 %vc = icmp ugt <vscale x 8 x i8> %va, %splat
168 ret <vscale x 8 x i1> %vc
171 define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
172 ; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
174 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
175 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
177 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
178 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
179 %vc = icmp ugt <vscale x 8 x i8> %splat, %va
180 ret <vscale x 8 x i1> %vc
183 define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
184 ; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
186 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
187 ; CHECK-NEXT: vmsgtu.vi v0, v8, 5
189 %vc = icmp ugt <vscale x 8 x i8> %va, splat (i8 5)
190 ret <vscale x 8 x i1> %vc
193 define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
194 ; CHECK-LABEL: icmp_uge_vv_nxv8i8:
196 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
197 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
199 %vc = icmp uge <vscale x 8 x i8> %va, %vb
200 ret <vscale x 8 x i1> %vc
203 define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
204 ; CHECK-LABEL: icmp_uge_vx_nxv8i8:
206 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
207 ; CHECK-NEXT: vmv.v.x v9, a0
208 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
210 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
211 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
212 %vc = icmp uge <vscale x 8 x i8> %va, %splat
213 ret <vscale x 8 x i1> %vc
216 define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
217 ; CHECK-LABEL: icmp_uge_xv_nxv8i8:
219 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
220 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
222 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
223 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
224 %vc = icmp uge <vscale x 8 x i8> %splat, %va
225 ret <vscale x 8 x i1> %vc
228 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
229 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
231 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
232 ; CHECK-NEXT: vmv.v.i v9, -16
233 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
235 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -16)
236 ret <vscale x 8 x i1> %vc
239 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
240 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
242 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
243 ; CHECK-NEXT: vmsgtu.vi v0, v8, 14
245 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 15)
246 ret <vscale x 8 x i1> %vc
249 define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
250 ; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
252 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
253 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
255 %vc = icmp uge <vscale x 8 x i8> splat (i8 15), %va
256 ret <vscale x 8 x i1> %vc
259 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
260 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
262 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
263 ; CHECK-NEXT: vmset.m v0
265 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 0)
266 ret <vscale x 8 x i1> %vc
269 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
270 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
272 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
273 ; CHECK-NEXT: vmsgtu.vi v0, v8, 0
275 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 1)
276 ret <vscale x 8 x i1> %vc
279 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
280 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
282 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
283 ; CHECK-NEXT: vmsgtu.vi v0, v8, -16
285 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -15)
286 ret <vscale x 8 x i1> %vc
289 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
290 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
292 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
293 ; CHECK-NEXT: vmsgtu.vi v0, v8, 15
295 %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 16)
296 ret <vscale x 8 x i1> %vc
299 ; Test that we don't optimize uge x, 0 -> ugt x, -1
300 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_6(<vscale x 8 x i8> %va, iXLen %vl) {
301 ; CHECK-LABEL: icmp_uge_vi_nxv8i8_6:
303 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
304 ; CHECK-NEXT: vmv.v.i v9, 0
305 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
306 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
308 %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8> undef, i8 0, iXLen %vl)
309 %vc = icmp uge <vscale x 8 x i8> %va, %splat
310 ret <vscale x 8 x i1> %vc
313 define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
314 ; CHECK-LABEL: icmp_ult_vv_nxv8i8:
316 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
317 ; CHECK-NEXT: vmsltu.vv v0, v8, v9
319 %vc = icmp ult <vscale x 8 x i8> %va, %vb
320 ret <vscale x 8 x i1> %vc
323 define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
324 ; CHECK-LABEL: icmp_ult_vx_nxv8i8:
326 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
327 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
329 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
330 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
331 %vc = icmp ult <vscale x 8 x i8> %va, %splat
332 ret <vscale x 8 x i1> %vc
335 define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
336 ; CHECK-LABEL: icmp_ult_xv_nxv8i8:
338 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
339 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
341 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
342 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
343 %vc = icmp ult <vscale x 8 x i8> %splat, %va
344 ret <vscale x 8 x i1> %vc
347 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
348 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
350 ; CHECK-NEXT: li a0, -16
351 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
352 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
354 %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -16)
355 ret <vscale x 8 x i1> %vc
358 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
359 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
361 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
362 ; CHECK-NEXT: vmsleu.vi v0, v8, -16
364 %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -15)
365 ret <vscale x 8 x i1> %vc
368 define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
369 ; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
371 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
372 ; CHECK-NEXT: vmsgtu.vi v0, v8, -15
374 %vc = icmp ult <vscale x 8 x i8> splat (i8 -15), %va
375 ret <vscale x 8 x i1> %vc
378 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
379 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
381 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
382 ; CHECK-NEXT: vmclr.m v0
384 %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 0)
385 ret <vscale x 8 x i1> %vc
388 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
389 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
391 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
392 ; CHECK-NEXT: vmseq.vi v0, v8, 0
394 %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 1)
395 ret <vscale x 8 x i1> %vc
398 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
399 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
401 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
402 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
404 %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 16)
405 ret <vscale x 8 x i1> %vc
408 declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8>, i8, iXLen);
410 ; Test that we don't optimize ult x, 0 -> ule x, -1
411 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_5(<vscale x 8 x i8> %va, iXLen %vl) {
412 ; CHECK-LABEL: icmp_ult_vi_nxv8i8_5:
414 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
415 ; CHECK-NEXT: vmsltu.vx v0, v8, zero
417 %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8> undef, i8 0, iXLen %vl)
418 %vc = icmp ult <vscale x 8 x i8> %va, %splat
419 ret <vscale x 8 x i1> %vc
422 define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
423 ; CHECK-LABEL: icmp_ule_vv_nxv8i8:
425 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
426 ; CHECK-NEXT: vmsleu.vv v0, v8, v9
428 %vc = icmp ule <vscale x 8 x i8> %va, %vb
429 ret <vscale x 8 x i1> %vc
432 define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
433 ; CHECK-LABEL: icmp_ule_vx_nxv8i8:
435 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
436 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
438 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
439 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
440 %vc = icmp ule <vscale x 8 x i8> %va, %splat
441 ret <vscale x 8 x i1> %vc
444 define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
445 ; CHECK-LABEL: icmp_ule_xv_nxv8i8:
447 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
448 ; CHECK-NEXT: vmv.v.x v9, a0
449 ; CHECK-NEXT: vmsleu.vv v0, v9, v8
451 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
452 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
453 %vc = icmp ule <vscale x 8 x i8> %splat, %va
454 ret <vscale x 8 x i1> %vc
457 define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
458 ; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
460 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
461 ; CHECK-NEXT: vmsleu.vi v0, v8, 5
463 %vc = icmp ule <vscale x 8 x i8> %va, splat (i8 5)
464 ret <vscale x 8 x i1> %vc
467 define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
468 ; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
470 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
471 ; CHECK-NEXT: vmslt.vv v0, v9, v8
473 %vc = icmp sgt <vscale x 8 x i8> %va, %vb
474 ret <vscale x 8 x i1> %vc
477 define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
478 ; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
480 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
481 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
483 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
484 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
485 %vc = icmp sgt <vscale x 8 x i8> %va, %splat
486 ret <vscale x 8 x i1> %vc
489 define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
490 ; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
492 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
493 ; CHECK-NEXT: vmslt.vx v0, v8, a0
495 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
496 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
497 %vc = icmp sgt <vscale x 8 x i8> %splat, %va
498 ret <vscale x 8 x i1> %vc
501 define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
502 ; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
504 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
505 ; CHECK-NEXT: vmsgt.vi v0, v8, 5
507 %vc = icmp sgt <vscale x 8 x i8> %va, splat (i8 5)
508 ret <vscale x 8 x i1> %vc
511 define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
512 ; CHECK-LABEL: icmp_sge_vv_nxv8i8:
514 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
515 ; CHECK-NEXT: vmsle.vv v0, v9, v8
517 %vc = icmp sge <vscale x 8 x i8> %va, %vb
518 ret <vscale x 8 x i1> %vc
521 define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
522 ; CHECK-LABEL: icmp_sge_vx_nxv8i8:
524 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
525 ; CHECK-NEXT: vmv.v.x v9, a0
526 ; CHECK-NEXT: vmsle.vv v0, v9, v8
528 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
529 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
530 %vc = icmp sge <vscale x 8 x i8> %va, %splat
531 ret <vscale x 8 x i1> %vc
534 define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
535 ; CHECK-LABEL: icmp_sge_xv_nxv8i8:
537 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
538 ; CHECK-NEXT: vmsle.vx v0, v8, a0
540 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
541 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
542 %vc = icmp sge <vscale x 8 x i8> %splat, %va
543 ret <vscale x 8 x i1> %vc
546 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
547 ; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
549 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
550 ; CHECK-NEXT: vmv.v.i v9, -16
551 ; CHECK-NEXT: vmsle.vv v0, v9, v8
553 %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -16)
554 ret <vscale x 8 x i1> %vc
557 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
558 ; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
560 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
561 ; CHECK-NEXT: vmsgt.vi v0, v8, -16
563 %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -15)
564 ret <vscale x 8 x i1> %vc
567 define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
568 ; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
570 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
571 ; CHECK-NEXT: vmsle.vi v0, v8, -15
573 %vc = icmp sge <vscale x 8 x i8> splat (i8 -15), %va
574 ret <vscale x 8 x i1> %vc
577 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
578 ; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
580 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
581 ; CHECK-NEXT: vmsgt.vi v0, v8, -1
583 %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 0)
584 ret <vscale x 8 x i1> %vc
587 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
588 ; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
590 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
591 ; CHECK-NEXT: vmsgt.vi v0, v8, 15
593 %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 16)
594 ret <vscale x 8 x i1> %vc
597 define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
598 ; CHECK-LABEL: icmp_slt_vv_nxv8i8:
600 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
601 ; CHECK-NEXT: vmslt.vv v0, v8, v9
603 %vc = icmp slt <vscale x 8 x i8> %va, %vb
604 ret <vscale x 8 x i1> %vc
607 define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
608 ; CHECK-LABEL: icmp_slt_vx_nxv8i8:
610 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
611 ; CHECK-NEXT: vmslt.vx v0, v8, a0
613 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
614 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
615 %vc = icmp slt <vscale x 8 x i8> %va, %splat
616 ret <vscale x 8 x i1> %vc
619 define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
620 ; CHECK-LABEL: icmp_slt_xv_nxv8i8:
622 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
623 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
625 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
626 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
627 %vc = icmp slt <vscale x 8 x i8> %splat, %va
628 ret <vscale x 8 x i1> %vc
631 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
632 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
634 ; CHECK-NEXT: li a0, -16
635 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
636 ; CHECK-NEXT: vmslt.vx v0, v8, a0
638 %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -16)
639 ret <vscale x 8 x i1> %vc
642 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
643 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
645 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
646 ; CHECK-NEXT: vmsle.vi v0, v8, -16
648 %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -15)
649 ret <vscale x 8 x i1> %vc
652 define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
653 ; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
655 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
656 ; CHECK-NEXT: vmsgt.vi v0, v8, -15
658 %vc = icmp slt <vscale x 8 x i8> splat (i8 -15), %va
659 ret <vscale x 8 x i1> %vc
662 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
663 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
665 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
666 ; CHECK-NEXT: vmsle.vi v0, v8, -1
668 %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 0)
669 ret <vscale x 8 x i1> %vc
672 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
673 ; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
675 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
676 ; CHECK-NEXT: vmsle.vi v0, v8, 15
678 %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 16)
679 ret <vscale x 8 x i1> %vc
682 define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
683 ; CHECK-LABEL: icmp_sle_vv_nxv8i8:
685 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
686 ; CHECK-NEXT: vmsle.vv v0, v8, v9
688 %vc = icmp sle <vscale x 8 x i8> %va, %vb
689 ret <vscale x 8 x i1> %vc
692 define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
693 ; CHECK-LABEL: icmp_sle_vx_nxv8i8:
695 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
696 ; CHECK-NEXT: vmsle.vx v0, v8, a0
698 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
699 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
700 %vc = icmp sle <vscale x 8 x i8> %va, %splat
701 ret <vscale x 8 x i1> %vc
704 define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
705 ; CHECK-LABEL: icmp_sle_xv_nxv8i8:
707 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
708 ; CHECK-NEXT: vmv.v.x v9, a0
709 ; CHECK-NEXT: vmsle.vv v0, v9, v8
711 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
712 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
713 %vc = icmp sle <vscale x 8 x i8> %splat, %va
714 ret <vscale x 8 x i1> %vc
717 define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
718 ; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
720 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
721 ; CHECK-NEXT: vmsle.vi v0, v8, 5
723 %vc = icmp sle <vscale x 8 x i8> %va, splat (i8 5)
724 ret <vscale x 8 x i1> %vc
727 define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
728 ; CHECK-LABEL: icmp_eq_vv_nxv8i16:
730 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
731 ; CHECK-NEXT: vmseq.vv v0, v8, v10
733 %vc = icmp eq <vscale x 8 x i16> %va, %vb
734 ret <vscale x 8 x i1> %vc
737 define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
738 ; CHECK-LABEL: icmp_eq_vx_nxv8i16:
740 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
741 ; CHECK-NEXT: vmseq.vx v0, v8, a0
743 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
744 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
745 %vc = icmp eq <vscale x 8 x i16> %va, %splat
746 ret <vscale x 8 x i1> %vc
749 define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
750 ; CHECK-LABEL: icmp_eq_xv_nxv8i16:
752 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
753 ; CHECK-NEXT: vmseq.vx v0, v8, a0
755 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
756 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
757 %vc = icmp eq <vscale x 8 x i16> %splat, %va
758 ret <vscale x 8 x i1> %vc
761 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
762 ; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
764 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
765 ; CHECK-NEXT: vmseq.vi v0, v8, 0
767 %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 0)
768 ret <vscale x 8 x i1> %vc
771 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
772 ; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
774 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
775 ; CHECK-NEXT: vmseq.vi v0, v8, 5
777 %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 5)
778 ret <vscale x 8 x i1> %vc
781 define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
782 ; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
784 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
785 ; CHECK-NEXT: vmseq.vi v0, v8, 5
787 %vc = icmp eq <vscale x 8 x i16> splat (i16 5), %va
788 ret <vscale x 8 x i1> %vc
791 define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
792 ; CHECK-LABEL: icmp_ne_vv_nxv8i16:
794 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
795 ; CHECK-NEXT: vmsne.vv v0, v8, v10
797 %vc = icmp ne <vscale x 8 x i16> %va, %vb
798 ret <vscale x 8 x i1> %vc
801 define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
802 ; CHECK-LABEL: icmp_ne_vx_nxv8i16:
804 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
805 ; CHECK-NEXT: vmsne.vx v0, v8, a0
807 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
808 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
809 %vc = icmp ne <vscale x 8 x i16> %va, %splat
810 ret <vscale x 8 x i1> %vc
813 define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
814 ; CHECK-LABEL: icmp_ne_xv_nxv8i16:
816 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
817 ; CHECK-NEXT: vmsne.vx v0, v8, a0
819 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
820 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
821 %vc = icmp ne <vscale x 8 x i16> %splat, %va
822 ret <vscale x 8 x i1> %vc
825 define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
826 ; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
828 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
829 ; CHECK-NEXT: vmsne.vi v0, v8, 5
831 %vc = icmp ne <vscale x 8 x i16> %va, splat (i16 5)
832 ret <vscale x 8 x i1> %vc
835 define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
836 ; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
838 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
839 ; CHECK-NEXT: vmsltu.vv v0, v10, v8
841 %vc = icmp ugt <vscale x 8 x i16> %va, %vb
842 ret <vscale x 8 x i1> %vc
845 define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
846 ; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
848 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
849 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
851 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
852 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
853 %vc = icmp ugt <vscale x 8 x i16> %va, %splat
854 ret <vscale x 8 x i1> %vc
857 define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
858 ; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
860 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
861 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
863 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
864 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
865 %vc = icmp ugt <vscale x 8 x i16> %splat, %va
866 ret <vscale x 8 x i1> %vc
869 define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
870 ; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
872 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
873 ; CHECK-NEXT: vmsgtu.vi v0, v8, 5
875 %vc = icmp ugt <vscale x 8 x i16> %va, splat (i16 5)
876 ret <vscale x 8 x i1> %vc
879 define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
880 ; CHECK-LABEL: icmp_uge_vv_nxv8i16:
882 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
883 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
885 %vc = icmp uge <vscale x 8 x i16> %va, %vb
886 ret <vscale x 8 x i1> %vc
889 define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
890 ; CHECK-LABEL: icmp_uge_vx_nxv8i16:
892 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
893 ; CHECK-NEXT: vmv.v.x v10, a0
894 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
896 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
897 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
898 %vc = icmp uge <vscale x 8 x i16> %va, %splat
899 ret <vscale x 8 x i1> %vc
902 define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
903 ; CHECK-LABEL: icmp_uge_xv_nxv8i16:
905 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
906 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
908 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
909 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
910 %vc = icmp uge <vscale x 8 x i16> %splat, %va
911 ret <vscale x 8 x i1> %vc
914 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
915 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
917 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
918 ; CHECK-NEXT: vmv.v.i v10, -16
919 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
921 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -16)
922 ret <vscale x 8 x i1> %vc
925 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
926 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
928 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
929 ; CHECK-NEXT: vmsgtu.vi v0, v8, 14
931 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 15)
932 ret <vscale x 8 x i1> %vc
935 define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
936 ; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
938 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
939 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
941 %vc = icmp uge <vscale x 8 x i16> splat (i16 15), %va
942 ret <vscale x 8 x i1> %vc
945 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
946 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
948 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
949 ; CHECK-NEXT: vmset.m v0
951 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 0)
952 ret <vscale x 8 x i1> %vc
955 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
956 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
958 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
959 ; CHECK-NEXT: vmsgtu.vi v0, v8, 0
961 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 1)
962 ret <vscale x 8 x i1> %vc
965 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
966 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
968 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
969 ; CHECK-NEXT: vmsgtu.vi v0, v8, -16
971 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -15)
972 ret <vscale x 8 x i1> %vc
975 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
976 ; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
978 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
979 ; CHECK-NEXT: vmsgtu.vi v0, v8, 15
981 %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 16)
982 ret <vscale x 8 x i1> %vc
985 define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
986 ; CHECK-LABEL: icmp_ult_vv_nxv8i16:
988 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
989 ; CHECK-NEXT: vmsltu.vv v0, v8, v10
991 %vc = icmp ult <vscale x 8 x i16> %va, %vb
992 ret <vscale x 8 x i1> %vc
995 define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
996 ; CHECK-LABEL: icmp_ult_vx_nxv8i16:
998 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
999 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
1001 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1002 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1003 %vc = icmp ult <vscale x 8 x i16> %va, %splat
1004 ret <vscale x 8 x i1> %vc
1007 define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1008 ; CHECK-LABEL: icmp_ult_xv_nxv8i16:
1010 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1011 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
1013 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1014 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1015 %vc = icmp ult <vscale x 8 x i16> %splat, %va
1016 ret <vscale x 8 x i1> %vc
1019 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1020 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
1022 ; CHECK-NEXT: li a0, -16
1023 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1024 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
1026 %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -16)
1027 ret <vscale x 8 x i1> %vc
1030 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1031 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
1033 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1034 ; CHECK-NEXT: vmsleu.vi v0, v8, -16
1036 %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -15)
1037 ret <vscale x 8 x i1> %vc
1040 define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1041 ; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
1043 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1044 ; CHECK-NEXT: vmsgtu.vi v0, v8, -15
1046 %vc = icmp ult <vscale x 8 x i16> splat (i16 -15), %va
1047 ret <vscale x 8 x i1> %vc
1050 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1051 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
1053 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
1054 ; CHECK-NEXT: vmclr.m v0
1056 %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 0)
1057 ret <vscale x 8 x i1> %vc
1060 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1061 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
1063 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1064 ; CHECK-NEXT: vmseq.vi v0, v8, 0
1066 %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 1)
1067 ret <vscale x 8 x i1> %vc
1070 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
1071 ; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
1073 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1074 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
1076 %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 16)
1077 ret <vscale x 8 x i1> %vc
1080 define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1081 ; CHECK-LABEL: icmp_ule_vv_nxv8i16:
1083 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1084 ; CHECK-NEXT: vmsleu.vv v0, v8, v10
1086 %vc = icmp ule <vscale x 8 x i16> %va, %vb
1087 ret <vscale x 8 x i1> %vc
1090 define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1091 ; CHECK-LABEL: icmp_ule_vx_nxv8i16:
1093 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1094 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
1096 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1097 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1098 %vc = icmp ule <vscale x 8 x i16> %va, %splat
1099 ret <vscale x 8 x i1> %vc
1102 define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1103 ; CHECK-LABEL: icmp_ule_xv_nxv8i16:
1105 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1106 ; CHECK-NEXT: vmv.v.x v10, a0
1107 ; CHECK-NEXT: vmsleu.vv v0, v10, v8
1109 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1110 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1111 %vc = icmp ule <vscale x 8 x i16> %splat, %va
1112 ret <vscale x 8 x i1> %vc
1115 define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1116 ; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
1118 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1119 ; CHECK-NEXT: vmsleu.vi v0, v8, 5
1121 %vc = icmp ule <vscale x 8 x i16> %va, splat (i16 5)
1122 ret <vscale x 8 x i1> %vc
1125 define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1126 ; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
1128 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1129 ; CHECK-NEXT: vmslt.vv v0, v10, v8
1131 %vc = icmp sgt <vscale x 8 x i16> %va, %vb
1132 ret <vscale x 8 x i1> %vc
1135 define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1136 ; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
1138 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1139 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
1141 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1142 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1143 %vc = icmp sgt <vscale x 8 x i16> %va, %splat
1144 ret <vscale x 8 x i1> %vc
1147 define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1148 ; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
1150 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1151 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1153 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1154 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1155 %vc = icmp sgt <vscale x 8 x i16> %splat, %va
1156 ret <vscale x 8 x i1> %vc
1159 define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1160 ; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
1162 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1163 ; CHECK-NEXT: vmsgt.vi v0, v8, 5
1165 %vc = icmp sgt <vscale x 8 x i16> %va, splat (i16 5)
1166 ret <vscale x 8 x i1> %vc
1169 define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1170 ; CHECK-LABEL: icmp_sge_vv_nxv8i16:
1172 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1173 ; CHECK-NEXT: vmsle.vv v0, v10, v8
1175 %vc = icmp sge <vscale x 8 x i16> %va, %vb
1176 ret <vscale x 8 x i1> %vc
1179 define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1180 ; CHECK-LABEL: icmp_sge_vx_nxv8i16:
1182 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1183 ; CHECK-NEXT: vmv.v.x v10, a0
1184 ; CHECK-NEXT: vmsle.vv v0, v10, v8
1186 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1187 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1188 %vc = icmp sge <vscale x 8 x i16> %va, %splat
1189 ret <vscale x 8 x i1> %vc
1192 define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1193 ; CHECK-LABEL: icmp_sge_xv_nxv8i16:
1195 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1196 ; CHECK-NEXT: vmsle.vx v0, v8, a0
1198 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1199 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1200 %vc = icmp sge <vscale x 8 x i16> %splat, %va
1201 ret <vscale x 8 x i1> %vc
1204 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1205 ; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
1207 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1208 ; CHECK-NEXT: vmv.v.i v10, -16
1209 ; CHECK-NEXT: vmsle.vv v0, v10, v8
1211 %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -16)
1212 ret <vscale x 8 x i1> %vc
1215 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1216 ; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
1218 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1219 ; CHECK-NEXT: vmsgt.vi v0, v8, -16
1221 %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -15)
1222 ret <vscale x 8 x i1> %vc
1225 define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1226 ; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
1228 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1229 ; CHECK-NEXT: vmsle.vi v0, v8, -15
1231 %vc = icmp sge <vscale x 8 x i16> splat (i16 -15), %va
1232 ret <vscale x 8 x i1> %vc
1235 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1236 ; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
1238 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1239 ; CHECK-NEXT: vmsgt.vi v0, v8, -1
1241 %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 0)
1242 ret <vscale x 8 x i1> %vc
1245 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1246 ; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
1248 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1249 ; CHECK-NEXT: vmsgt.vi v0, v8, 15
1251 %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 16)
1252 ret <vscale x 8 x i1> %vc
1255 define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1256 ; CHECK-LABEL: icmp_slt_vv_nxv8i16:
1258 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1259 ; CHECK-NEXT: vmslt.vv v0, v8, v10
1261 %vc = icmp slt <vscale x 8 x i16> %va, %vb
1262 ret <vscale x 8 x i1> %vc
1265 define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1266 ; CHECK-LABEL: icmp_slt_vx_nxv8i16:
1268 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1269 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1271 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1272 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1273 %vc = icmp slt <vscale x 8 x i16> %va, %splat
1274 ret <vscale x 8 x i1> %vc
1277 define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1278 ; CHECK-LABEL: icmp_slt_xv_nxv8i16:
1280 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1281 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
1283 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1284 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1285 %vc = icmp slt <vscale x 8 x i16> %splat, %va
1286 ret <vscale x 8 x i1> %vc
1289 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1290 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
1292 ; CHECK-NEXT: li a0, -16
1293 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1294 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1296 %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -16)
1297 ret <vscale x 8 x i1> %vc
1300 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1301 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
1303 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1304 ; CHECK-NEXT: vmsle.vi v0, v8, -16
1306 %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -15)
1307 ret <vscale x 8 x i1> %vc
1310 define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1311 ; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
1313 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1314 ; CHECK-NEXT: vmsgt.vi v0, v8, -15
1316 %vc = icmp slt <vscale x 8 x i16> splat (i16 -15), %va
1317 ret <vscale x 8 x i1> %vc
1320 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1321 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
1323 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1324 ; CHECK-NEXT: vmsle.vi v0, v8, -1
1326 %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 0)
1327 ret <vscale x 8 x i1> %vc
1330 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1331 ; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
1333 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1334 ; CHECK-NEXT: vmsle.vi v0, v8, 15
1336 %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 16)
1337 ret <vscale x 8 x i1> %vc
1340 define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1341 ; CHECK-LABEL: icmp_sle_vv_nxv8i16:
1343 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1344 ; CHECK-NEXT: vmsle.vv v0, v8, v10
1346 %vc = icmp sle <vscale x 8 x i16> %va, %vb
1347 ret <vscale x 8 x i1> %vc
1350 define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1351 ; CHECK-LABEL: icmp_sle_vx_nxv8i16:
1353 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1354 ; CHECK-NEXT: vmsle.vx v0, v8, a0
1356 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1357 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1358 %vc = icmp sle <vscale x 8 x i16> %va, %splat
1359 ret <vscale x 8 x i1> %vc
1362 define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1363 ; CHECK-LABEL: icmp_sle_xv_nxv8i16:
1365 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
1366 ; CHECK-NEXT: vmv.v.x v10, a0
1367 ; CHECK-NEXT: vmsle.vv v0, v10, v8
1369 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1370 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1371 %vc = icmp sle <vscale x 8 x i16> %splat, %va
1372 ret <vscale x 8 x i1> %vc
1375 define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1376 ; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
1378 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
1379 ; CHECK-NEXT: vmsle.vi v0, v8, 5
1381 %vc = icmp sle <vscale x 8 x i16> %va, splat (i16 5)
1382 ret <vscale x 8 x i1> %vc
1385 define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1386 ; CHECK-LABEL: icmp_eq_vv_nxv8i32:
1388 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1389 ; CHECK-NEXT: vmseq.vv v0, v8, v12
1391 %vc = icmp eq <vscale x 8 x i32> %va, %vb
1392 ret <vscale x 8 x i1> %vc
1395 define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1396 ; CHECK-LABEL: icmp_eq_vx_nxv8i32:
1398 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1399 ; CHECK-NEXT: vmseq.vx v0, v8, a0
1401 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1402 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1403 %vc = icmp eq <vscale x 8 x i32> %va, %splat
1404 ret <vscale x 8 x i1> %vc
1407 define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1408 ; CHECK-LABEL: icmp_eq_xv_nxv8i32:
1410 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1411 ; CHECK-NEXT: vmseq.vx v0, v8, a0
1413 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1414 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1415 %vc = icmp eq <vscale x 8 x i32> %splat, %va
1416 ret <vscale x 8 x i1> %vc
1419 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1420 ; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
1422 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1423 ; CHECK-NEXT: vmseq.vi v0, v8, 0
1425 %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 0)
1426 ret <vscale x 8 x i1> %vc
1429 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1430 ; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
1432 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1433 ; CHECK-NEXT: vmseq.vi v0, v8, 5
1435 %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 5)
1436 ret <vscale x 8 x i1> %vc
1439 define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1440 ; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
1442 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1443 ; CHECK-NEXT: vmseq.vi v0, v8, 5
1445 %vc = icmp eq <vscale x 8 x i32> splat (i32 5), %va
1446 ret <vscale x 8 x i1> %vc
1449 define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1450 ; CHECK-LABEL: icmp_ne_vv_nxv8i32:
1452 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1453 ; CHECK-NEXT: vmsne.vv v0, v8, v12
1455 %vc = icmp ne <vscale x 8 x i32> %va, %vb
1456 ret <vscale x 8 x i1> %vc
1459 define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1460 ; CHECK-LABEL: icmp_ne_vx_nxv8i32:
1462 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1463 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1465 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1466 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1467 %vc = icmp ne <vscale x 8 x i32> %va, %splat
1468 ret <vscale x 8 x i1> %vc
1471 define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1472 ; CHECK-LABEL: icmp_ne_xv_nxv8i32:
1474 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1475 ; CHECK-NEXT: vmsne.vx v0, v8, a0
1477 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1478 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1479 %vc = icmp ne <vscale x 8 x i32> %splat, %va
1480 ret <vscale x 8 x i1> %vc
1483 define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1484 ; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
1486 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1487 ; CHECK-NEXT: vmsne.vi v0, v8, 5
1489 %vc = icmp ne <vscale x 8 x i32> %va, splat (i32 5)
1490 ret <vscale x 8 x i1> %vc
1493 define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1494 ; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
1496 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1497 ; CHECK-NEXT: vmsltu.vv v0, v12, v8
1499 %vc = icmp ugt <vscale x 8 x i32> %va, %vb
1500 ret <vscale x 8 x i1> %vc
1503 define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1504 ; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
1506 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1507 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
1509 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1510 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1511 %vc = icmp ugt <vscale x 8 x i32> %va, %splat
1512 ret <vscale x 8 x i1> %vc
1515 define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1516 ; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
1518 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1519 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
1521 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1522 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1523 %vc = icmp ugt <vscale x 8 x i32> %splat, %va
1524 ret <vscale x 8 x i1> %vc
1527 define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1528 ; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
1530 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1531 ; CHECK-NEXT: vmsgtu.vi v0, v8, 5
1533 %vc = icmp ugt <vscale x 8 x i32> %va, splat (i32 5)
1534 ret <vscale x 8 x i1> %vc
1537 define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1538 ; CHECK-LABEL: icmp_uge_vv_nxv8i32:
1540 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1541 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
1543 %vc = icmp uge <vscale x 8 x i32> %va, %vb
1544 ret <vscale x 8 x i1> %vc
1547 define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1548 ; CHECK-LABEL: icmp_uge_vx_nxv8i32:
1550 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1551 ; CHECK-NEXT: vmv.v.x v12, a0
1552 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
1554 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1555 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1556 %vc = icmp uge <vscale x 8 x i32> %va, %splat
1557 ret <vscale x 8 x i1> %vc
1560 define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1561 ; CHECK-LABEL: icmp_uge_xv_nxv8i32:
1563 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1564 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
1566 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1567 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1568 %vc = icmp uge <vscale x 8 x i32> %splat, %va
1569 ret <vscale x 8 x i1> %vc
1572 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1573 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
1575 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1576 ; CHECK-NEXT: vmv.v.i v12, -16
1577 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
1579 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -16)
1580 ret <vscale x 8 x i1> %vc
1583 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1584 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
1586 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1587 ; CHECK-NEXT: vmsgtu.vi v0, v8, 14
1589 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 15)
1590 ret <vscale x 8 x i1> %vc
1593 define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1594 ; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
1596 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1597 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
1599 %vc = icmp uge <vscale x 8 x i32> splat (i32 15), %va
1600 ret <vscale x 8 x i1> %vc
1603 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1604 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
1606 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
1607 ; CHECK-NEXT: vmset.m v0
1609 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 0)
1610 ret <vscale x 8 x i1> %vc
1613 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1614 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
1616 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1617 ; CHECK-NEXT: vmsgtu.vi v0, v8, 0
1619 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 1)
1620 ret <vscale x 8 x i1> %vc
1623 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
1624 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
1626 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1627 ; CHECK-NEXT: vmsgtu.vi v0, v8, -16
1629 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -15)
1630 ret <vscale x 8 x i1> %vc
1633 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
1634 ; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
1636 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1637 ; CHECK-NEXT: vmsgtu.vi v0, v8, 15
1639 %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 16)
1640 ret <vscale x 8 x i1> %vc
1643 define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1644 ; CHECK-LABEL: icmp_ult_vv_nxv8i32:
1646 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1647 ; CHECK-NEXT: vmsltu.vv v0, v8, v12
1649 %vc = icmp ult <vscale x 8 x i32> %va, %vb
1650 ret <vscale x 8 x i1> %vc
1653 define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1654 ; CHECK-LABEL: icmp_ult_vx_nxv8i32:
1656 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1657 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
1659 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1660 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1661 %vc = icmp ult <vscale x 8 x i32> %va, %splat
1662 ret <vscale x 8 x i1> %vc
1665 define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1666 ; CHECK-LABEL: icmp_ult_xv_nxv8i32:
1668 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1669 ; CHECK-NEXT: vmsgtu.vx v0, v8, a0
1671 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1672 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1673 %vc = icmp ult <vscale x 8 x i32> %splat, %va
1674 ret <vscale x 8 x i1> %vc
1677 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1678 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
1680 ; CHECK-NEXT: li a0, -16
1681 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1682 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
1684 %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -16)
1685 ret <vscale x 8 x i1> %vc
1688 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1689 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
1691 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1692 ; CHECK-NEXT: vmsleu.vi v0, v8, -16
1694 %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -15)
1695 ret <vscale x 8 x i1> %vc
1698 define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1699 ; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
1701 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1702 ; CHECK-NEXT: vmsgtu.vi v0, v8, -15
1704 %vc = icmp ult <vscale x 8 x i32> splat (i32 -15), %va
1705 ret <vscale x 8 x i1> %vc
1708 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1709 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
1711 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
1712 ; CHECK-NEXT: vmclr.m v0
1714 %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 0)
1715 ret <vscale x 8 x i1> %vc
1718 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1719 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
1721 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1722 ; CHECK-NEXT: vmseq.vi v0, v8, 0
1724 %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 1)
1725 ret <vscale x 8 x i1> %vc
1728 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
1729 ; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
1731 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1732 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
1734 %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 16)
1735 ret <vscale x 8 x i1> %vc
1738 define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1739 ; CHECK-LABEL: icmp_ule_vv_nxv8i32:
1741 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1742 ; CHECK-NEXT: vmsleu.vv v0, v8, v12
1744 %vc = icmp ule <vscale x 8 x i32> %va, %vb
1745 ret <vscale x 8 x i1> %vc
1748 define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1749 ; CHECK-LABEL: icmp_ule_vx_nxv8i32:
1751 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1752 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
1754 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1755 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1756 %vc = icmp ule <vscale x 8 x i32> %va, %splat
1757 ret <vscale x 8 x i1> %vc
1760 define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1761 ; CHECK-LABEL: icmp_ule_xv_nxv8i32:
1763 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1764 ; CHECK-NEXT: vmv.v.x v12, a0
1765 ; CHECK-NEXT: vmsleu.vv v0, v12, v8
1767 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1768 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1769 %vc = icmp ule <vscale x 8 x i32> %splat, %va
1770 ret <vscale x 8 x i1> %vc
1773 define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1774 ; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
1776 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1777 ; CHECK-NEXT: vmsleu.vi v0, v8, 5
1779 %vc = icmp ule <vscale x 8 x i32> %va, splat (i32 5)
1780 ret <vscale x 8 x i1> %vc
1783 define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1784 ; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
1786 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1787 ; CHECK-NEXT: vmslt.vv v0, v12, v8
1789 %vc = icmp sgt <vscale x 8 x i32> %va, %vb
1790 ret <vscale x 8 x i1> %vc
1793 define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1794 ; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
1796 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1797 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
1799 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1800 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1801 %vc = icmp sgt <vscale x 8 x i32> %va, %splat
1802 ret <vscale x 8 x i1> %vc
1805 define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1806 ; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
1808 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1809 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1811 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1812 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1813 %vc = icmp sgt <vscale x 8 x i32> %splat, %va
1814 ret <vscale x 8 x i1> %vc
1817 define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1818 ; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
1820 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1821 ; CHECK-NEXT: vmsgt.vi v0, v8, 5
1823 %vc = icmp sgt <vscale x 8 x i32> %va, splat (i32 5)
1824 ret <vscale x 8 x i1> %vc
1827 define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1828 ; CHECK-LABEL: icmp_sge_vv_nxv8i32:
1830 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1831 ; CHECK-NEXT: vmsle.vv v0, v12, v8
1833 %vc = icmp sge <vscale x 8 x i32> %va, %vb
1834 ret <vscale x 8 x i1> %vc
1837 define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1838 ; CHECK-LABEL: icmp_sge_vx_nxv8i32:
1840 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1841 ; CHECK-NEXT: vmv.v.x v12, a0
1842 ; CHECK-NEXT: vmsle.vv v0, v12, v8
1844 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1845 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1846 %vc = icmp sge <vscale x 8 x i32> %va, %splat
1847 ret <vscale x 8 x i1> %vc
1850 define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1851 ; CHECK-LABEL: icmp_sge_xv_nxv8i32:
1853 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1854 ; CHECK-NEXT: vmsle.vx v0, v8, a0
1856 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1857 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1858 %vc = icmp sge <vscale x 8 x i32> %splat, %va
1859 ret <vscale x 8 x i1> %vc
1862 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1863 ; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
1865 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1866 ; CHECK-NEXT: vmv.v.i v12, -16
1867 ; CHECK-NEXT: vmsle.vv v0, v12, v8
1869 %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -16)
1870 ret <vscale x 8 x i1> %vc
1873 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1874 ; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
1876 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1877 ; CHECK-NEXT: vmsgt.vi v0, v8, -16
1879 %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -15)
1880 ret <vscale x 8 x i1> %vc
1883 define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1884 ; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
1886 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1887 ; CHECK-NEXT: vmsle.vi v0, v8, -15
1889 %vc = icmp sge <vscale x 8 x i32> splat (i32 -15), %va
1890 ret <vscale x 8 x i1> %vc
1893 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1894 ; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
1896 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1897 ; CHECK-NEXT: vmsgt.vi v0, v8, -1
1899 %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 0)
1900 ret <vscale x 8 x i1> %vc
1903 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1904 ; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
1906 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1907 ; CHECK-NEXT: vmsgt.vi v0, v8, 15
1909 %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 16)
1910 ret <vscale x 8 x i1> %vc
1913 define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1914 ; CHECK-LABEL: icmp_slt_vv_nxv8i32:
1916 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1917 ; CHECK-NEXT: vmslt.vv v0, v8, v12
1919 %vc = icmp slt <vscale x 8 x i32> %va, %vb
1920 ret <vscale x 8 x i1> %vc
1923 define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1924 ; CHECK-LABEL: icmp_slt_vx_nxv8i32:
1926 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1927 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1929 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1930 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1931 %vc = icmp slt <vscale x 8 x i32> %va, %splat
1932 ret <vscale x 8 x i1> %vc
1935 define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1936 ; CHECK-LABEL: icmp_slt_xv_nxv8i32:
1938 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1939 ; CHECK-NEXT: vmsgt.vx v0, v8, a0
1941 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1942 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1943 %vc = icmp slt <vscale x 8 x i32> %splat, %va
1944 ret <vscale x 8 x i1> %vc
1947 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1948 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
1950 ; CHECK-NEXT: li a0, -16
1951 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1952 ; CHECK-NEXT: vmslt.vx v0, v8, a0
1954 %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -16)
1955 ret <vscale x 8 x i1> %vc
1958 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1959 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
1961 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1962 ; CHECK-NEXT: vmsle.vi v0, v8, -16
1964 %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -15)
1965 ret <vscale x 8 x i1> %vc
1968 define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1969 ; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
1971 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1972 ; CHECK-NEXT: vmsgt.vi v0, v8, -15
1974 %vc = icmp slt <vscale x 8 x i32> splat (i32 -15), %va
1975 ret <vscale x 8 x i1> %vc
1978 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1979 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
1981 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1982 ; CHECK-NEXT: vmsle.vi v0, v8, -1
1984 %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 0)
1985 ret <vscale x 8 x i1> %vc
1988 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1989 ; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
1991 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1992 ; CHECK-NEXT: vmsle.vi v0, v8, 15
1994 %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 16)
1995 ret <vscale x 8 x i1> %vc
1998 define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1999 ; CHECK-LABEL: icmp_sle_vv_nxv8i32:
2001 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
2002 ; CHECK-NEXT: vmsle.vv v0, v8, v12
2004 %vc = icmp sle <vscale x 8 x i32> %va, %vb
2005 ret <vscale x 8 x i1> %vc
2008 define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
2009 ; CHECK-LABEL: icmp_sle_vx_nxv8i32:
2011 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2012 ; CHECK-NEXT: vmsle.vx v0, v8, a0
2014 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
2015 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
2016 %vc = icmp sle <vscale x 8 x i32> %va, %splat
2017 ret <vscale x 8 x i1> %vc
2020 define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
2021 ; CHECK-LABEL: icmp_sle_xv_nxv8i32:
2023 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
2024 ; CHECK-NEXT: vmv.v.x v12, a0
2025 ; CHECK-NEXT: vmsle.vv v0, v12, v8
2027 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
2028 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
2029 %vc = icmp sle <vscale x 8 x i32> %splat, %va
2030 ret <vscale x 8 x i1> %vc
2033 define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
2034 ; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
2036 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
2037 ; CHECK-NEXT: vmsle.vi v0, v8, 5
2039 %vc = icmp sle <vscale x 8 x i32> %va, splat (i32 5)
2040 ret <vscale x 8 x i1> %vc
2043 define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2044 ; CHECK-LABEL: icmp_eq_vv_nxv8i64:
2046 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2047 ; CHECK-NEXT: vmseq.vv v0, v8, v16
2049 %vc = icmp eq <vscale x 8 x i64> %va, %vb
2050 ret <vscale x 8 x i1> %vc
2053 define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2054 ; RV32-LABEL: icmp_eq_vx_nxv8i64:
2056 ; RV32-NEXT: addi sp, sp, -16
2057 ; RV32-NEXT: .cfi_def_cfa_offset 16
2058 ; RV32-NEXT: sw a1, 12(sp)
2059 ; RV32-NEXT: sw a0, 8(sp)
2060 ; RV32-NEXT: addi a0, sp, 8
2061 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2062 ; RV32-NEXT: vlse64.v v16, (a0), zero
2063 ; RV32-NEXT: vmseq.vv v0, v8, v16
2064 ; RV32-NEXT: addi sp, sp, 16
2067 ; RV64-LABEL: icmp_eq_vx_nxv8i64:
2069 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2070 ; RV64-NEXT: vmseq.vx v0, v8, a0
2072 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2073 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2074 %vc = icmp eq <vscale x 8 x i64> %va, %splat
2075 ret <vscale x 8 x i1> %vc
2078 define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2079 ; RV32-LABEL: icmp_eq_xv_nxv8i64:
2081 ; RV32-NEXT: addi sp, sp, -16
2082 ; RV32-NEXT: .cfi_def_cfa_offset 16
2083 ; RV32-NEXT: sw a1, 12(sp)
2084 ; RV32-NEXT: sw a0, 8(sp)
2085 ; RV32-NEXT: addi a0, sp, 8
2086 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2087 ; RV32-NEXT: vlse64.v v16, (a0), zero
2088 ; RV32-NEXT: vmseq.vv v0, v16, v8
2089 ; RV32-NEXT: addi sp, sp, 16
2092 ; RV64-LABEL: icmp_eq_xv_nxv8i64:
2094 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2095 ; RV64-NEXT: vmseq.vx v0, v8, a0
2097 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2098 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2099 %vc = icmp eq <vscale x 8 x i64> %splat, %va
2100 ret <vscale x 8 x i1> %vc
2103 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2104 ; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
2106 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2107 ; CHECK-NEXT: vmseq.vi v0, v8, 0
2109 %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 0)
2110 ret <vscale x 8 x i1> %vc
2113 define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2114 ; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
2116 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2117 ; CHECK-NEXT: vmseq.vi v0, v8, 5
2119 %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 5)
2120 ret <vscale x 8 x i1> %vc
2123 define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2124 ; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
2126 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2127 ; CHECK-NEXT: vmseq.vi v0, v8, 5
2129 %vc = icmp eq <vscale x 8 x i64> splat (i64 5), %va
2130 ret <vscale x 8 x i1> %vc
2133 define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2134 ; CHECK-LABEL: icmp_ne_vv_nxv8i64:
2136 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2137 ; CHECK-NEXT: vmsne.vv v0, v8, v16
2139 %vc = icmp ne <vscale x 8 x i64> %va, %vb
2140 ret <vscale x 8 x i1> %vc
2143 define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2144 ; RV32-LABEL: icmp_ne_vx_nxv8i64:
2146 ; RV32-NEXT: addi sp, sp, -16
2147 ; RV32-NEXT: .cfi_def_cfa_offset 16
2148 ; RV32-NEXT: sw a1, 12(sp)
2149 ; RV32-NEXT: sw a0, 8(sp)
2150 ; RV32-NEXT: addi a0, sp, 8
2151 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2152 ; RV32-NEXT: vlse64.v v16, (a0), zero
2153 ; RV32-NEXT: vmsne.vv v0, v8, v16
2154 ; RV32-NEXT: addi sp, sp, 16
2157 ; RV64-LABEL: icmp_ne_vx_nxv8i64:
2159 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2160 ; RV64-NEXT: vmsne.vx v0, v8, a0
2162 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2163 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2164 %vc = icmp ne <vscale x 8 x i64> %va, %splat
2165 ret <vscale x 8 x i1> %vc
2168 define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2169 ; RV32-LABEL: icmp_ne_xv_nxv8i64:
2171 ; RV32-NEXT: addi sp, sp, -16
2172 ; RV32-NEXT: .cfi_def_cfa_offset 16
2173 ; RV32-NEXT: sw a1, 12(sp)
2174 ; RV32-NEXT: sw a0, 8(sp)
2175 ; RV32-NEXT: addi a0, sp, 8
2176 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2177 ; RV32-NEXT: vlse64.v v16, (a0), zero
2178 ; RV32-NEXT: vmsne.vv v0, v16, v8
2179 ; RV32-NEXT: addi sp, sp, 16
2182 ; RV64-LABEL: icmp_ne_xv_nxv8i64:
2184 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2185 ; RV64-NEXT: vmsne.vx v0, v8, a0
2187 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2188 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2189 %vc = icmp ne <vscale x 8 x i64> %splat, %va
2190 ret <vscale x 8 x i1> %vc
2193 define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2194 ; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
2196 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2197 ; CHECK-NEXT: vmsne.vi v0, v8, 5
2199 %vc = icmp ne <vscale x 8 x i64> %va, splat (i64 5)
2200 ret <vscale x 8 x i1> %vc
2203 define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2204 ; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
2206 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2207 ; CHECK-NEXT: vmsltu.vv v0, v16, v8
2209 %vc = icmp ugt <vscale x 8 x i64> %va, %vb
2210 ret <vscale x 8 x i1> %vc
2213 define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2214 ; RV32-LABEL: icmp_ugt_vx_nxv8i64:
2216 ; RV32-NEXT: addi sp, sp, -16
2217 ; RV32-NEXT: .cfi_def_cfa_offset 16
2218 ; RV32-NEXT: sw a1, 12(sp)
2219 ; RV32-NEXT: sw a0, 8(sp)
2220 ; RV32-NEXT: addi a0, sp, 8
2221 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2222 ; RV32-NEXT: vlse64.v v16, (a0), zero
2223 ; RV32-NEXT: vmsltu.vv v0, v16, v8
2224 ; RV32-NEXT: addi sp, sp, 16
2227 ; RV64-LABEL: icmp_ugt_vx_nxv8i64:
2229 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2230 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
2232 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2233 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2234 %vc = icmp ugt <vscale x 8 x i64> %va, %splat
2235 ret <vscale x 8 x i1> %vc
2238 define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2239 ; RV32-LABEL: icmp_ugt_xv_nxv8i64:
2241 ; RV32-NEXT: addi sp, sp, -16
2242 ; RV32-NEXT: .cfi_def_cfa_offset 16
2243 ; RV32-NEXT: sw a1, 12(sp)
2244 ; RV32-NEXT: sw a0, 8(sp)
2245 ; RV32-NEXT: addi a0, sp, 8
2246 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2247 ; RV32-NEXT: vlse64.v v16, (a0), zero
2248 ; RV32-NEXT: vmsltu.vv v0, v8, v16
2249 ; RV32-NEXT: addi sp, sp, 16
2252 ; RV64-LABEL: icmp_ugt_xv_nxv8i64:
2254 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2255 ; RV64-NEXT: vmsltu.vx v0, v8, a0
2257 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2258 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2259 %vc = icmp ugt <vscale x 8 x i64> %splat, %va
2260 ret <vscale x 8 x i1> %vc
2263 define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2264 ; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
2266 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2267 ; CHECK-NEXT: vmsgtu.vi v0, v8, 5
2269 %vc = icmp ugt <vscale x 8 x i64> %va, splat (i64 5)
2270 ret <vscale x 8 x i1> %vc
2273 define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2274 ; CHECK-LABEL: icmp_uge_vv_nxv8i64:
2276 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2277 ; CHECK-NEXT: vmsleu.vv v0, v16, v8
2279 %vc = icmp uge <vscale x 8 x i64> %va, %vb
2280 ret <vscale x 8 x i1> %vc
2283 define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2284 ; RV32-LABEL: icmp_uge_vx_nxv8i64:
2286 ; RV32-NEXT: addi sp, sp, -16
2287 ; RV32-NEXT: .cfi_def_cfa_offset 16
2288 ; RV32-NEXT: sw a1, 12(sp)
2289 ; RV32-NEXT: sw a0, 8(sp)
2290 ; RV32-NEXT: addi a0, sp, 8
2291 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2292 ; RV32-NEXT: vlse64.v v16, (a0), zero
2293 ; RV32-NEXT: vmsleu.vv v0, v16, v8
2294 ; RV32-NEXT: addi sp, sp, 16
2297 ; RV64-LABEL: icmp_uge_vx_nxv8i64:
2299 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2300 ; RV64-NEXT: vmv.v.x v16, a0
2301 ; RV64-NEXT: vmsleu.vv v0, v16, v8
2303 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2304 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2305 %vc = icmp uge <vscale x 8 x i64> %va, %splat
2306 ret <vscale x 8 x i1> %vc
2309 define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2310 ; RV32-LABEL: icmp_uge_xv_nxv8i64:
2312 ; RV32-NEXT: addi sp, sp, -16
2313 ; RV32-NEXT: .cfi_def_cfa_offset 16
2314 ; RV32-NEXT: sw a1, 12(sp)
2315 ; RV32-NEXT: sw a0, 8(sp)
2316 ; RV32-NEXT: addi a0, sp, 8
2317 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2318 ; RV32-NEXT: vlse64.v v16, (a0), zero
2319 ; RV32-NEXT: vmsleu.vv v0, v8, v16
2320 ; RV32-NEXT: addi sp, sp, 16
2323 ; RV64-LABEL: icmp_uge_xv_nxv8i64:
2325 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2326 ; RV64-NEXT: vmsleu.vx v0, v8, a0
2328 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2329 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2330 %vc = icmp uge <vscale x 8 x i64> %splat, %va
2331 ret <vscale x 8 x i1> %vc
2334 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2335 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
2337 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2338 ; CHECK-NEXT: vmv.v.i v16, -16
2339 ; CHECK-NEXT: vmsleu.vv v0, v16, v8
2341 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -16)
2342 ret <vscale x 8 x i1> %vc
2345 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2346 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
2348 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2349 ; CHECK-NEXT: vmsgtu.vi v0, v8, 14
2351 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 15)
2352 ret <vscale x 8 x i1> %vc
2355 define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2356 ; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
2358 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2359 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
2361 %vc = icmp uge <vscale x 8 x i64> splat (i64 15), %va
2362 ret <vscale x 8 x i1> %vc
2365 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2366 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
2368 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
2369 ; CHECK-NEXT: vmset.m v0
2371 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 0)
2372 ret <vscale x 8 x i1> %vc
2375 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2376 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
2378 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2379 ; CHECK-NEXT: vmsgtu.vi v0, v8, 0
2381 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 1)
2382 ret <vscale x 8 x i1> %vc
2385 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
2386 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
2388 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2389 ; CHECK-NEXT: vmsgtu.vi v0, v8, -16
2391 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -15)
2392 ret <vscale x 8 x i1> %vc
2395 define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
2396 ; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
2398 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2399 ; CHECK-NEXT: vmsgtu.vi v0, v8, 15
2401 %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 16)
2402 ret <vscale x 8 x i1> %vc
2405 define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2406 ; CHECK-LABEL: icmp_ult_vv_nxv8i64:
2408 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2409 ; CHECK-NEXT: vmsltu.vv v0, v8, v16
2411 %vc = icmp ult <vscale x 8 x i64> %va, %vb
2412 ret <vscale x 8 x i1> %vc
2415 define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2416 ; RV32-LABEL: icmp_ult_vx_nxv8i64:
2418 ; RV32-NEXT: addi sp, sp, -16
2419 ; RV32-NEXT: .cfi_def_cfa_offset 16
2420 ; RV32-NEXT: sw a1, 12(sp)
2421 ; RV32-NEXT: sw a0, 8(sp)
2422 ; RV32-NEXT: addi a0, sp, 8
2423 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2424 ; RV32-NEXT: vlse64.v v16, (a0), zero
2425 ; RV32-NEXT: vmsltu.vv v0, v8, v16
2426 ; RV32-NEXT: addi sp, sp, 16
2429 ; RV64-LABEL: icmp_ult_vx_nxv8i64:
2431 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2432 ; RV64-NEXT: vmsltu.vx v0, v8, a0
2434 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2435 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2436 %vc = icmp ult <vscale x 8 x i64> %va, %splat
2437 ret <vscale x 8 x i1> %vc
2440 define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2441 ; RV32-LABEL: icmp_ult_xv_nxv8i64:
2443 ; RV32-NEXT: addi sp, sp, -16
2444 ; RV32-NEXT: .cfi_def_cfa_offset 16
2445 ; RV32-NEXT: sw a1, 12(sp)
2446 ; RV32-NEXT: sw a0, 8(sp)
2447 ; RV32-NEXT: addi a0, sp, 8
2448 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2449 ; RV32-NEXT: vlse64.v v16, (a0), zero
2450 ; RV32-NEXT: vmsltu.vv v0, v16, v8
2451 ; RV32-NEXT: addi sp, sp, 16
2454 ; RV64-LABEL: icmp_ult_xv_nxv8i64:
2456 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2457 ; RV64-NEXT: vmsgtu.vx v0, v8, a0
2459 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2460 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2461 %vc = icmp ult <vscale x 8 x i64> %splat, %va
2462 ret <vscale x 8 x i1> %vc
2465 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2466 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
2468 ; CHECK-NEXT: li a0, -16
2469 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2470 ; CHECK-NEXT: vmsltu.vx v0, v8, a0
2472 %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -16)
2473 ret <vscale x 8 x i1> %vc
2476 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2477 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
2479 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2480 ; CHECK-NEXT: vmsleu.vi v0, v8, -16
2482 %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -15)
2483 ret <vscale x 8 x i1> %vc
2486 define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2487 ; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
2489 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2490 ; CHECK-NEXT: vmsgtu.vi v0, v8, -15
2492 %vc = icmp ult <vscale x 8 x i64> splat (i64 -15), %va
2493 ret <vscale x 8 x i1> %vc
2496 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2497 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
2499 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
2500 ; CHECK-NEXT: vmclr.m v0
2502 %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 0)
2503 ret <vscale x 8 x i1> %vc
2506 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2507 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
2509 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2510 ; CHECK-NEXT: vmseq.vi v0, v8, 0
2512 %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 1)
2513 ret <vscale x 8 x i1> %vc
2516 define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
2517 ; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
2519 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2520 ; CHECK-NEXT: vmsleu.vi v0, v8, 15
2522 %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 16)
2523 ret <vscale x 8 x i1> %vc
2526 define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2527 ; CHECK-LABEL: icmp_ule_vv_nxv8i64:
2529 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2530 ; CHECK-NEXT: vmsleu.vv v0, v8, v16
2532 %vc = icmp ule <vscale x 8 x i64> %va, %vb
2533 ret <vscale x 8 x i1> %vc
2536 define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2537 ; RV32-LABEL: icmp_ule_vx_nxv8i64:
2539 ; RV32-NEXT: addi sp, sp, -16
2540 ; RV32-NEXT: .cfi_def_cfa_offset 16
2541 ; RV32-NEXT: sw a1, 12(sp)
2542 ; RV32-NEXT: sw a0, 8(sp)
2543 ; RV32-NEXT: addi a0, sp, 8
2544 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2545 ; RV32-NEXT: vlse64.v v16, (a0), zero
2546 ; RV32-NEXT: vmsleu.vv v0, v8, v16
2547 ; RV32-NEXT: addi sp, sp, 16
2550 ; RV64-LABEL: icmp_ule_vx_nxv8i64:
2552 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2553 ; RV64-NEXT: vmsleu.vx v0, v8, a0
2555 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2556 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2557 %vc = icmp ule <vscale x 8 x i64> %va, %splat
2558 ret <vscale x 8 x i1> %vc
2561 define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2562 ; RV32-LABEL: icmp_ule_xv_nxv8i64:
2564 ; RV32-NEXT: addi sp, sp, -16
2565 ; RV32-NEXT: .cfi_def_cfa_offset 16
2566 ; RV32-NEXT: sw a1, 12(sp)
2567 ; RV32-NEXT: sw a0, 8(sp)
2568 ; RV32-NEXT: addi a0, sp, 8
2569 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2570 ; RV32-NEXT: vlse64.v v16, (a0), zero
2571 ; RV32-NEXT: vmsleu.vv v0, v16, v8
2572 ; RV32-NEXT: addi sp, sp, 16
2575 ; RV64-LABEL: icmp_ule_xv_nxv8i64:
2577 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2578 ; RV64-NEXT: vmv.v.x v16, a0
2579 ; RV64-NEXT: vmsleu.vv v0, v16, v8
2581 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2582 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2583 %vc = icmp ule <vscale x 8 x i64> %splat, %va
2584 ret <vscale x 8 x i1> %vc
2587 define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2588 ; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
2590 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2591 ; CHECK-NEXT: vmsleu.vi v0, v8, 5
2593 %vc = icmp ule <vscale x 8 x i64> %va, splat (i64 5)
2594 ret <vscale x 8 x i1> %vc
2597 define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2598 ; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
2600 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2601 ; CHECK-NEXT: vmslt.vv v0, v16, v8
2603 %vc = icmp sgt <vscale x 8 x i64> %va, %vb
2604 ret <vscale x 8 x i1> %vc
2607 define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2608 ; RV32-LABEL: icmp_sgt_vx_nxv8i64:
2610 ; RV32-NEXT: addi sp, sp, -16
2611 ; RV32-NEXT: .cfi_def_cfa_offset 16
2612 ; RV32-NEXT: sw a1, 12(sp)
2613 ; RV32-NEXT: sw a0, 8(sp)
2614 ; RV32-NEXT: addi a0, sp, 8
2615 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2616 ; RV32-NEXT: vlse64.v v16, (a0), zero
2617 ; RV32-NEXT: vmslt.vv v0, v16, v8
2618 ; RV32-NEXT: addi sp, sp, 16
2621 ; RV64-LABEL: icmp_sgt_vx_nxv8i64:
2623 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2624 ; RV64-NEXT: vmsgt.vx v0, v8, a0
2626 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2627 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2628 %vc = icmp sgt <vscale x 8 x i64> %va, %splat
2629 ret <vscale x 8 x i1> %vc
2632 define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2633 ; RV32-LABEL: icmp_sgt_xv_nxv8i64:
2635 ; RV32-NEXT: addi sp, sp, -16
2636 ; RV32-NEXT: .cfi_def_cfa_offset 16
2637 ; RV32-NEXT: sw a1, 12(sp)
2638 ; RV32-NEXT: sw a0, 8(sp)
2639 ; RV32-NEXT: addi a0, sp, 8
2640 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2641 ; RV32-NEXT: vlse64.v v16, (a0), zero
2642 ; RV32-NEXT: vmslt.vv v0, v8, v16
2643 ; RV32-NEXT: addi sp, sp, 16
2646 ; RV64-LABEL: icmp_sgt_xv_nxv8i64:
2648 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2649 ; RV64-NEXT: vmslt.vx v0, v8, a0
2651 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2652 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2653 %vc = icmp sgt <vscale x 8 x i64> %splat, %va
2654 ret <vscale x 8 x i1> %vc
2657 define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2658 ; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
2660 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2661 ; CHECK-NEXT: vmsgt.vi v0, v8, 5
2663 %vc = icmp sgt <vscale x 8 x i64> %va, splat (i64 5)
2664 ret <vscale x 8 x i1> %vc
2667 define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2668 ; CHECK-LABEL: icmp_sge_vv_nxv8i64:
2670 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2671 ; CHECK-NEXT: vmsle.vv v0, v16, v8
2673 %vc = icmp sge <vscale x 8 x i64> %va, %vb
2674 ret <vscale x 8 x i1> %vc
2677 define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2678 ; RV32-LABEL: icmp_sge_vx_nxv8i64:
2680 ; RV32-NEXT: addi sp, sp, -16
2681 ; RV32-NEXT: .cfi_def_cfa_offset 16
2682 ; RV32-NEXT: sw a1, 12(sp)
2683 ; RV32-NEXT: sw a0, 8(sp)
2684 ; RV32-NEXT: addi a0, sp, 8
2685 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2686 ; RV32-NEXT: vlse64.v v16, (a0), zero
2687 ; RV32-NEXT: vmsle.vv v0, v16, v8
2688 ; RV32-NEXT: addi sp, sp, 16
2691 ; RV64-LABEL: icmp_sge_vx_nxv8i64:
2693 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2694 ; RV64-NEXT: vmv.v.x v16, a0
2695 ; RV64-NEXT: vmsle.vv v0, v16, v8
2697 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2698 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2699 %vc = icmp sge <vscale x 8 x i64> %va, %splat
2700 ret <vscale x 8 x i1> %vc
2703 define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2704 ; RV32-LABEL: icmp_sge_xv_nxv8i64:
2706 ; RV32-NEXT: addi sp, sp, -16
2707 ; RV32-NEXT: .cfi_def_cfa_offset 16
2708 ; RV32-NEXT: sw a1, 12(sp)
2709 ; RV32-NEXT: sw a0, 8(sp)
2710 ; RV32-NEXT: addi a0, sp, 8
2711 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2712 ; RV32-NEXT: vlse64.v v16, (a0), zero
2713 ; RV32-NEXT: vmsle.vv v0, v8, v16
2714 ; RV32-NEXT: addi sp, sp, 16
2717 ; RV64-LABEL: icmp_sge_xv_nxv8i64:
2719 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2720 ; RV64-NEXT: vmsle.vx v0, v8, a0
2722 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2723 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2724 %vc = icmp sge <vscale x 8 x i64> %splat, %va
2725 ret <vscale x 8 x i1> %vc
2728 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2729 ; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
2731 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2732 ; CHECK-NEXT: vmv.v.i v16, -16
2733 ; CHECK-NEXT: vmsle.vv v0, v16, v8
2735 %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -16)
2736 ret <vscale x 8 x i1> %vc
2739 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2740 ; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
2742 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2743 ; CHECK-NEXT: vmsgt.vi v0, v8, -16
2745 %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -15)
2746 ret <vscale x 8 x i1> %vc
2749 define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2750 ; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
2752 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2753 ; CHECK-NEXT: vmsle.vi v0, v8, -15
2755 %vc = icmp sge <vscale x 8 x i64> splat (i64 -15), %va
2756 ret <vscale x 8 x i1> %vc
2759 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2760 ; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
2762 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2763 ; CHECK-NEXT: vmsgt.vi v0, v8, -1
2765 %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 0)
2766 ret <vscale x 8 x i1> %vc
2769 define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2770 ; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
2772 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2773 ; CHECK-NEXT: vmsgt.vi v0, v8, 15
2775 %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 16)
2776 ret <vscale x 8 x i1> %vc
2779 define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2780 ; CHECK-LABEL: icmp_slt_vv_nxv8i64:
2782 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2783 ; CHECK-NEXT: vmslt.vv v0, v8, v16
2785 %vc = icmp slt <vscale x 8 x i64> %va, %vb
2786 ret <vscale x 8 x i1> %vc
2789 define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2790 ; RV32-LABEL: icmp_slt_vx_nxv8i64:
2792 ; RV32-NEXT: addi sp, sp, -16
2793 ; RV32-NEXT: .cfi_def_cfa_offset 16
2794 ; RV32-NEXT: sw a1, 12(sp)
2795 ; RV32-NEXT: sw a0, 8(sp)
2796 ; RV32-NEXT: addi a0, sp, 8
2797 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2798 ; RV32-NEXT: vlse64.v v16, (a0), zero
2799 ; RV32-NEXT: vmslt.vv v0, v8, v16
2800 ; RV32-NEXT: addi sp, sp, 16
2803 ; RV64-LABEL: icmp_slt_vx_nxv8i64:
2805 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2806 ; RV64-NEXT: vmslt.vx v0, v8, a0
2808 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2809 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2810 %vc = icmp slt <vscale x 8 x i64> %va, %splat
2811 ret <vscale x 8 x i1> %vc
2814 define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2815 ; RV32-LABEL: icmp_slt_xv_nxv8i64:
2817 ; RV32-NEXT: addi sp, sp, -16
2818 ; RV32-NEXT: .cfi_def_cfa_offset 16
2819 ; RV32-NEXT: sw a1, 12(sp)
2820 ; RV32-NEXT: sw a0, 8(sp)
2821 ; RV32-NEXT: addi a0, sp, 8
2822 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2823 ; RV32-NEXT: vlse64.v v16, (a0), zero
2824 ; RV32-NEXT: vmslt.vv v0, v16, v8
2825 ; RV32-NEXT: addi sp, sp, 16
2828 ; RV64-LABEL: icmp_slt_xv_nxv8i64:
2830 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2831 ; RV64-NEXT: vmsgt.vx v0, v8, a0
2833 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2834 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2835 %vc = icmp slt <vscale x 8 x i64> %splat, %va
2836 ret <vscale x 8 x i1> %vc
2839 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2840 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
2842 ; CHECK-NEXT: li a0, -16
2843 ; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2844 ; CHECK-NEXT: vmslt.vx v0, v8, a0
2846 %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -16)
2847 ret <vscale x 8 x i1> %vc
2850 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2851 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
2853 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2854 ; CHECK-NEXT: vmsle.vi v0, v8, -16
2856 %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -15)
2857 ret <vscale x 8 x i1> %vc
2860 define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2861 ; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
2863 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2864 ; CHECK-NEXT: vmsgt.vi v0, v8, -15
2866 %vc = icmp slt <vscale x 8 x i64> splat (i64 -15), %va
2867 ret <vscale x 8 x i1> %vc
2870 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2871 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
2873 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2874 ; CHECK-NEXT: vmsle.vi v0, v8, -1
2876 %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 0)
2877 ret <vscale x 8 x i1> %vc
2880 define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2881 ; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
2883 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2884 ; CHECK-NEXT: vmsle.vi v0, v8, 15
2886 %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 16)
2887 ret <vscale x 8 x i1> %vc
2890 define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2891 ; CHECK-LABEL: icmp_sle_vv_nxv8i64:
2893 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2894 ; CHECK-NEXT: vmsle.vv v0, v8, v16
2896 %vc = icmp sle <vscale x 8 x i64> %va, %vb
2897 ret <vscale x 8 x i1> %vc
2900 define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2901 ; RV32-LABEL: icmp_sle_vx_nxv8i64:
2903 ; RV32-NEXT: addi sp, sp, -16
2904 ; RV32-NEXT: .cfi_def_cfa_offset 16
2905 ; RV32-NEXT: sw a1, 12(sp)
2906 ; RV32-NEXT: sw a0, 8(sp)
2907 ; RV32-NEXT: addi a0, sp, 8
2908 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2909 ; RV32-NEXT: vlse64.v v16, (a0), zero
2910 ; RV32-NEXT: vmsle.vv v0, v8, v16
2911 ; RV32-NEXT: addi sp, sp, 16
2914 ; RV64-LABEL: icmp_sle_vx_nxv8i64:
2916 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2917 ; RV64-NEXT: vmsle.vx v0, v8, a0
2919 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2920 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2921 %vc = icmp sle <vscale x 8 x i64> %va, %splat
2922 ret <vscale x 8 x i1> %vc
2925 define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2926 ; RV32-LABEL: icmp_sle_xv_nxv8i64:
2928 ; RV32-NEXT: addi sp, sp, -16
2929 ; RV32-NEXT: .cfi_def_cfa_offset 16
2930 ; RV32-NEXT: sw a1, 12(sp)
2931 ; RV32-NEXT: sw a0, 8(sp)
2932 ; RV32-NEXT: addi a0, sp, 8
2933 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2934 ; RV32-NEXT: vlse64.v v16, (a0), zero
2935 ; RV32-NEXT: vmsle.vv v0, v16, v8
2936 ; RV32-NEXT: addi sp, sp, 16
2939 ; RV64-LABEL: icmp_sle_xv_nxv8i64:
2941 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2942 ; RV64-NEXT: vmv.v.x v16, a0
2943 ; RV64-NEXT: vmsle.vv v0, v16, v8
2945 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2946 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2947 %vc = icmp sle <vscale x 8 x i64> %splat, %va
2948 ret <vscale x 8 x i1> %vc
2951 define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2952 ; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
2954 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
2955 ; CHECK-NEXT: vmsle.vi v0, v8, 5
2957 %vc = icmp sle <vscale x 8 x i64> %va, splat (i64 5)
2958 ret <vscale x 8 x i1> %vc
2961 ; Check a setcc with two constant splats, which would previously get stuck in
2962 ; an infinite loop. DAGCombine isn't clever enough to constant-fold
2963 ; splat_vectors but could continuously swap the operands, trying to put the
2965 define <vscale x 8 x i1> @icmp_eq_ii_nxv8i8() {
2966 ; CHECK-LABEL: icmp_eq_ii_nxv8i8:
2968 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
2969 ; CHECK-NEXT: vmclr.m v0
2971 %vc = icmp eq <vscale x 8 x i8> splat (i8 5), splat (i8 2)
2972 ret <vscale x 8 x i1> %vc
2975 ; This icmp/setcc is split and so we find a scalable-vector mask CONCAT_VECTOR
2976 ; node. Ensure we correctly (custom) lower this.
2977 define <vscale x 16 x i1> @icmp_eq_vi_nx16i64(<vscale x 16 x i64> %va) {
2978 ; CHECK-LABEL: icmp_eq_vi_nx16i64:
2980 ; CHECK-NEXT: csrr a0, vlenb
2981 ; CHECK-NEXT: srli a0, a0, 3
2982 ; CHECK-NEXT: add a1, a0, a0
2983 ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma
2984 ; CHECK-NEXT: vmseq.vi v24, v16, 0
2985 ; CHECK-NEXT: vmseq.vi v0, v8, 0
2986 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2987 ; CHECK-NEXT: vslideup.vx v0, v24, a0
2989 %vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer
2990 ret <vscale x 16 x i1> %vc