1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
5 ;; Signed Comparisons ;;
6 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
12 define <vscale x 16 x i1> @ir_cmpeq_b(<vscale x 16 x i8> %a) {
13 ; CHECK-LABEL: ir_cmpeq_b:
15 ; CHECK-NEXT: ptrue p0.b
16 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
18 %out = icmp eq <vscale x 16 x i8> %a, splat(i8 4)
19 ret <vscale x 16 x i1> %out
22 define <vscale x 16 x i1> @int_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
23 ; CHECK-LABEL: int_cmpeq_b:
25 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
27 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg,
28 <vscale x 16 x i8> %a,
29 <vscale x 16 x i8> splat(i8 4))
30 ret <vscale x 16 x i1> %out
33 define <vscale x 16 x i1> @wide_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
34 ; CHECK-LABEL: wide_cmpeq_b:
36 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
38 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg,
39 <vscale x 16 x i8> %a,
40 <vscale x 2 x i64> splat(i64 4))
41 ret <vscale x 16 x i1> %out
44 define <vscale x 8 x i1> @ir_cmpeq_h(<vscale x 8 x i16> %a) {
45 ; CHECK-LABEL: ir_cmpeq_h:
47 ; CHECK-NEXT: ptrue p0.h
48 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
50 %out = icmp eq <vscale x 8 x i16> %a, splat(i16 -16)
51 ret <vscale x 8 x i1> %out
54 define <vscale x 8 x i1> @int_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
55 ; CHECK-LABEL: int_cmpeq_h:
57 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
59 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %pg,
60 <vscale x 8 x i16> %a,
61 <vscale x 8 x i16> splat(i16 -16))
62 ret <vscale x 8 x i1> %out
65 define <vscale x 8 x i1> @wide_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
66 ; CHECK-LABEL: wide_cmpeq_h:
68 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
70 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %pg,
71 <vscale x 8 x i16> %a,
72 <vscale x 2 x i64> splat(i64 -16))
73 ret <vscale x 8 x i1> %out
76 define <vscale x 4 x i1> @ir_cmpeq_s(<vscale x 4 x i32> %a) {
77 ; CHECK-LABEL: ir_cmpeq_s:
79 ; CHECK-NEXT: ptrue p0.s
80 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
82 %out = icmp eq <vscale x 4 x i32> %a, splat(i32 15)
83 ret <vscale x 4 x i1> %out
86 define <vscale x 4 x i1> @int_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
87 ; CHECK-LABEL: int_cmpeq_s:
89 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
91 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %pg,
92 <vscale x 4 x i32> %a,
93 <vscale x 4 x i32> splat(i32 15))
94 ret <vscale x 4 x i1> %out
97 define <vscale x 4 x i1> @wide_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
98 ; CHECK-LABEL: wide_cmpeq_s:
100 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
102 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
103 <vscale x 4 x i32> %a,
104 <vscale x 2 x i64> splat(i64 15))
105 ret <vscale x 4 x i1> %out
108 define <vscale x 2 x i1> @ir_cmpeq_d(<vscale x 2 x i64> %a) {
109 ; CHECK-LABEL: ir_cmpeq_d:
111 ; CHECK-NEXT: ptrue p0.d
112 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, #0
114 %out = icmp eq <vscale x 2 x i64> %a, zeroinitializer
115 ret <vscale x 2 x i1> %out
118 define <vscale x 2 x i1> @int_cmpeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
119 ; CHECK-LABEL: int_cmpeq_d:
121 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, #0
123 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
124 <vscale x 2 x i64> %a,
125 <vscale x 2 x i64> zeroinitializer)
126 ret <vscale x 2 x i1> %out
133 define <vscale x 16 x i1> @ir_cmpge_b(<vscale x 16 x i8> %a) {
134 ; CHECK-LABEL: ir_cmpge_b:
136 ; CHECK-NEXT: ptrue p0.b
137 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
139 %out = icmp sge <vscale x 16 x i8> %a, splat(i8 4)
140 ret <vscale x 16 x i1> %out
143 define <vscale x 16 x i1> @int_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
144 ; CHECK-LABEL: int_cmpge_b:
146 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
148 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
149 <vscale x 16 x i8> %a,
150 <vscale x 16 x i8> splat(i8 4))
151 ret <vscale x 16 x i1> %out
154 define <vscale x 16 x i1> @wide_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
155 ; CHECK-LABEL: wide_cmpge_b:
157 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
159 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg,
160 <vscale x 16 x i8> %a,
161 <vscale x 2 x i64> splat(i64 4))
162 ret <vscale x 16 x i1> %out
165 define <vscale x 8 x i1> @ir_cmpge_h(<vscale x 8 x i16> %a) {
166 ; CHECK-LABEL: ir_cmpge_h:
168 ; CHECK-NEXT: ptrue p0.h
169 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
171 %out = icmp sge <vscale x 8 x i16> %a, splat(i16 -16)
172 ret <vscale x 8 x i1> %out
175 define <vscale x 8 x i1> @int_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
176 ; CHECK-LABEL: int_cmpge_h:
178 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
180 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
181 <vscale x 8 x i16> %a,
182 <vscale x 8 x i16> splat(i16 -16))
183 ret <vscale x 8 x i1> %out
186 define <vscale x 8 x i1> @wide_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
187 ; CHECK-LABEL: wide_cmpge_h:
189 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
191 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %pg,
192 <vscale x 8 x i16> %a,
193 <vscale x 2 x i64> splat(i64 -16))
194 ret <vscale x 8 x i1> %out
197 define <vscale x 4 x i1> @ir_cmpge_s(<vscale x 4 x i32> %a) {
198 ; CHECK-LABEL: ir_cmpge_s:
200 ; CHECK-NEXT: ptrue p0.s
201 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
203 %out = icmp sge <vscale x 4 x i32> %a, splat(i32 15)
204 ret <vscale x 4 x i1> %out
207 define <vscale x 4 x i1> @int_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
208 ; CHECK-LABEL: int_cmpge_s:
210 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
212 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
213 <vscale x 4 x i32> %a,
214 <vscale x 4 x i32> splat(i32 15))
215 ret <vscale x 4 x i1> %out
218 define <vscale x 4 x i1> @wide_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
219 ; CHECK-LABEL: wide_cmpge_s:
221 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
223 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
224 <vscale x 4 x i32> %a,
225 <vscale x 2 x i64> splat(i64 15))
226 ret <vscale x 4 x i1> %out
229 define <vscale x 2 x i1> @ir_cmpge_d(<vscale x 2 x i64> %a) {
230 ; CHECK-LABEL: ir_cmpge_d:
232 ; CHECK-NEXT: ptrue p0.d
233 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, #0
235 %out = icmp sge <vscale x 2 x i64> %a, zeroinitializer
236 ret <vscale x 2 x i1> %out
239 define <vscale x 2 x i1> @int_cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
240 ; CHECK-LABEL: int_cmpge_d:
242 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, #0
244 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
245 <vscale x 2 x i64> %a,
246 <vscale x 2 x i64> zeroinitializer)
247 ret <vscale x 2 x i1> %out
254 define <vscale x 16 x i1> @ir_cmpgt_b(<vscale x 16 x i8> %a) {
255 ; CHECK-LABEL: ir_cmpgt_b:
257 ; CHECK-NEXT: ptrue p0.b
258 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
260 %out = icmp sgt <vscale x 16 x i8> %a, splat(i8 4)
261 ret <vscale x 16 x i1> %out
264 define <vscale x 16 x i1> @int_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
265 ; CHECK-LABEL: int_cmpgt_b:
267 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
269 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
270 <vscale x 16 x i8> %a,
271 <vscale x 16 x i8> splat(i8 4))
272 ret <vscale x 16 x i1> %out
275 define <vscale x 16 x i1> @wide_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
276 ; CHECK-LABEL: wide_cmpgt_b:
278 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
280 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
281 <vscale x 16 x i8> %a,
282 <vscale x 2 x i64> splat(i64 4))
283 ret <vscale x 16 x i1> %out
286 define <vscale x 8 x i1> @ir_cmpgt_h(<vscale x 8 x i16> %a) {
287 ; CHECK-LABEL: ir_cmpgt_h:
289 ; CHECK-NEXT: ptrue p0.h
290 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
292 %out = icmp sgt <vscale x 8 x i16> %a, splat(i16 -16)
293 ret <vscale x 8 x i1> %out
296 define <vscale x 8 x i1> @int_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
297 ; CHECK-LABEL: int_cmpgt_h:
299 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
301 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
302 <vscale x 8 x i16> %a,
303 <vscale x 8 x i16> splat(i16 -16))
304 ret <vscale x 8 x i1> %out
307 define <vscale x 8 x i1> @wide_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
308 ; CHECK-LABEL: wide_cmpgt_h:
310 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
312 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %pg,
313 <vscale x 8 x i16> %a,
314 <vscale x 2 x i64> splat(i64 -16))
315 ret <vscale x 8 x i1> %out
318 define <vscale x 4 x i1> @ir_cmpgt_s(<vscale x 4 x i32> %a) {
319 ; CHECK-LABEL: ir_cmpgt_s:
321 ; CHECK-NEXT: ptrue p0.s
322 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
324 %out = icmp sgt <vscale x 4 x i32> %a, splat(i32 15)
325 ret <vscale x 4 x i1> %out
328 define <vscale x 4 x i1> @int_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
329 ; CHECK-LABEL: int_cmpgt_s:
331 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
333 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
334 <vscale x 4 x i32> %a,
335 <vscale x 4 x i32> splat(i32 15))
336 ret <vscale x 4 x i1> %out
339 define <vscale x 4 x i1> @wide_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
340 ; CHECK-LABEL: wide_cmpgt_s:
342 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
344 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
345 <vscale x 4 x i32> %a,
346 <vscale x 2 x i64> splat(i64 15))
347 ret <vscale x 4 x i1> %out
350 define <vscale x 2 x i1> @ir_cmpgt_d(<vscale x 2 x i64> %a) {
351 ; CHECK-LABEL: ir_cmpgt_d:
353 ; CHECK-NEXT: ptrue p0.d
354 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, #0
356 %out = icmp sgt <vscale x 2 x i64> %a, zeroinitializer
357 ret <vscale x 2 x i1> %out
360 define <vscale x 2 x i1> @int_cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
361 ; CHECK-LABEL: int_cmpgt_d:
363 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, #0
365 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
366 <vscale x 2 x i64> %a,
367 <vscale x 2 x i64> zeroinitializer)
368 ret <vscale x 2 x i1> %out
375 define <vscale x 16 x i1> @ir_cmple_b(<vscale x 16 x i8> %a) {
376 ; CHECK-LABEL: ir_cmple_b:
378 ; CHECK-NEXT: ptrue p0.b
379 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
381 %out = icmp sle <vscale x 16 x i8> %a, splat(i8 4)
382 ret <vscale x 16 x i1> %out
385 define <vscale x 16 x i1> @int_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
386 ; CHECK-LABEL: int_cmple_b:
388 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
390 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
391 <vscale x 16 x i8> splat(i8 4),
392 <vscale x 16 x i8> %a)
393 ret <vscale x 16 x i1> %out
396 define <vscale x 16 x i1> @wide_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
397 ; CHECK-LABEL: wide_cmple_b:
399 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
401 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg,
402 <vscale x 16 x i8> %a,
403 <vscale x 2 x i64> splat(i64 4))
404 ret <vscale x 16 x i1> %out
407 define <vscale x 8 x i1> @ir_cmple_h(<vscale x 8 x i16> %a) {
408 ; CHECK-LABEL: ir_cmple_h:
410 ; CHECK-NEXT: ptrue p0.h
411 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
413 %out = icmp sle <vscale x 8 x i16> %a, splat(i16 -16)
414 ret <vscale x 8 x i1> %out
417 define <vscale x 8 x i1> @int_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
418 ; CHECK-LABEL: int_cmple_h:
420 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
422 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
423 <vscale x 8 x i16> splat(i16 -16),
424 <vscale x 8 x i16> %a)
425 ret <vscale x 8 x i1> %out
428 define <vscale x 8 x i1> @wide_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
429 ; CHECK-LABEL: wide_cmple_h:
431 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
433 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %pg,
434 <vscale x 8 x i16> %a,
435 <vscale x 2 x i64> splat(i64 -16))
436 ret <vscale x 8 x i1> %out
439 define <vscale x 4 x i1> @ir_cmple_s(<vscale x 4 x i32> %a) {
440 ; CHECK-LABEL: ir_cmple_s:
442 ; CHECK-NEXT: ptrue p0.s
443 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
445 %out = icmp sle <vscale x 4 x i32> %a, splat(i32 15)
446 ret <vscale x 4 x i1> %out
449 define <vscale x 4 x i1> @int_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
450 ; CHECK-LABEL: int_cmple_s:
452 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
454 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
455 <vscale x 4 x i32> splat(i32 15),
456 <vscale x 4 x i32> %a)
457 ret <vscale x 4 x i1> %out
460 define <vscale x 4 x i1> @wide_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
461 ; CHECK-LABEL: wide_cmple_s:
463 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
465 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
466 <vscale x 4 x i32> %a,
467 <vscale x 2 x i64> splat(i64 15))
468 ret <vscale x 4 x i1> %out
471 define <vscale x 2 x i1> @ir_cmple_d(<vscale x 2 x i64> %a) {
472 ; CHECK-LABEL: ir_cmple_d:
474 ; CHECK-NEXT: ptrue p0.d
475 ; CHECK-NEXT: cmple p0.d, p0/z, z0.d, #0
477 %out = icmp sle <vscale x 2 x i64> %a, zeroinitializer
478 ret <vscale x 2 x i1> %out
481 define <vscale x 2 x i1> @int_cmple_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
482 ; CHECK-LABEL: int_cmple_d:
484 ; CHECK-NEXT: cmple p0.d, p0/z, z0.d, #0
486 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
487 <vscale x 2 x i64> zeroinitializer,
488 <vscale x 2 x i64> %a)
489 ret <vscale x 2 x i1> %out
496 define <vscale x 16 x i1> @ir_cmplt_b(<vscale x 16 x i8> %a) {
497 ; CHECK-LABEL: ir_cmplt_b:
499 ; CHECK-NEXT: ptrue p0.b
500 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
502 %out = icmp slt <vscale x 16 x i8> %a, splat(i8 4)
503 ret <vscale x 16 x i1> %out
506 define <vscale x 16 x i1> @int_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
507 ; CHECK-LABEL: int_cmplt_b:
509 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
511 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
512 <vscale x 16 x i8> splat(i8 4),
513 <vscale x 16 x i8> %a)
514 ret <vscale x 16 x i1> %out
517 define <vscale x 16 x i1> @wide_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
518 ; CHECK-LABEL: wide_cmplt_b:
520 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
522 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg,
523 <vscale x 16 x i8> %a,
524 <vscale x 2 x i64> splat(i64 4))
525 ret <vscale x 16 x i1> %out
528 define <vscale x 8 x i1> @ir_cmplt_h(<vscale x 8 x i16> %a) {
529 ; CHECK-LABEL: ir_cmplt_h:
531 ; CHECK-NEXT: ptrue p0.h
532 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
534 %out = icmp slt <vscale x 8 x i16> %a, splat(i16 -16)
535 ret <vscale x 8 x i1> %out
538 define <vscale x 8 x i1> @int_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
539 ; CHECK-LABEL: int_cmplt_h:
541 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
543 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
544 <vscale x 8 x i16> splat(i16 -16),
545 <vscale x 8 x i16> %a)
546 ret <vscale x 8 x i1> %out
549 define <vscale x 8 x i1> @wide_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
550 ; CHECK-LABEL: wide_cmplt_h:
552 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
554 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %pg,
555 <vscale x 8 x i16> %a,
556 <vscale x 2 x i64> splat(i64 -16))
557 ret <vscale x 8 x i1> %out
560 define <vscale x 4 x i1> @ir_cmplt_s(<vscale x 4 x i32> %a) {
561 ; CHECK-LABEL: ir_cmplt_s:
563 ; CHECK-NEXT: ptrue p0.s
564 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
566 %out = icmp slt <vscale x 4 x i32> %a, splat(i32 15)
567 ret <vscale x 4 x i1> %out
570 define <vscale x 4 x i1> @int_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
571 ; CHECK-LABEL: int_cmplt_s:
573 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
575 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
576 <vscale x 4 x i32> splat(i32 15),
577 <vscale x 4 x i32> %a)
578 ret <vscale x 4 x i1> %out
581 define <vscale x 4 x i1> @wide_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
582 ; CHECK-LABEL: wide_cmplt_s:
584 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
586 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
587 <vscale x 4 x i32> %a,
588 <vscale x 2 x i64> splat(i64 15))
589 ret <vscale x 4 x i1> %out
592 define <vscale x 2 x i1> @ir_cmplt_d(<vscale x 2 x i64> %a) {
593 ; CHECK-LABEL: ir_cmplt_d:
595 ; CHECK-NEXT: ptrue p0.d
596 ; CHECK-NEXT: cmplt p0.d, p0/z, z0.d, #0
598 %out = icmp slt <vscale x 2 x i64> %a, zeroinitializer
599 ret <vscale x 2 x i1> %out
602 define <vscale x 2 x i1> @int_cmplt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
603 ; CHECK-LABEL: int_cmplt_d:
605 ; CHECK-NEXT: cmplt p0.d, p0/z, z0.d, #0
607 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
608 <vscale x 2 x i64> zeroinitializer,
609 <vscale x 2 x i64> %a)
610 ret <vscale x 2 x i1> %out
617 define <vscale x 16 x i1> @ir_cmpne_b(<vscale x 16 x i8> %a) {
618 ; CHECK-LABEL: ir_cmpne_b:
620 ; CHECK-NEXT: ptrue p0.b
621 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
623 %out = icmp ne <vscale x 16 x i8> %a, splat(i8 4)
624 ret <vscale x 16 x i1> %out
627 define <vscale x 16 x i1> @int_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
628 ; CHECK-LABEL: int_cmpne_b:
630 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
632 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg,
633 <vscale x 16 x i8> %a,
634 <vscale x 16 x i8> splat(i8 4))
635 ret <vscale x 16 x i1> %out
638 define <vscale x 16 x i1> @wide_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
639 ; CHECK-LABEL: wide_cmpne_b:
641 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
643 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg,
644 <vscale x 16 x i8> %a,
645 <vscale x 2 x i64> splat(i64 4))
646 ret <vscale x 16 x i1> %out
649 define <vscale x 8 x i1> @ir_cmpne_h(<vscale x 8 x i16> %a) {
650 ; CHECK-LABEL: ir_cmpne_h:
652 ; CHECK-NEXT: ptrue p0.h
653 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
655 %out = icmp ne <vscale x 8 x i16> %a, splat(i16 -16)
656 ret <vscale x 8 x i1> %out
659 define <vscale x 8 x i1> @int_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
660 ; CHECK-LABEL: int_cmpne_h:
662 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
664 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %pg,
665 <vscale x 8 x i16> %a,
666 <vscale x 8 x i16> splat(i16 -16))
667 ret <vscale x 8 x i1> %out
670 define <vscale x 8 x i1> @wide_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
671 ; CHECK-LABEL: wide_cmpne_h:
673 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
675 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %pg,
676 <vscale x 8 x i16> %a,
677 <vscale x 2 x i64> splat(i64 -16))
678 ret <vscale x 8 x i1> %out
681 define <vscale x 4 x i1> @ir_cmpne_s(<vscale x 4 x i32> %a) {
682 ; CHECK-LABEL: ir_cmpne_s:
684 ; CHECK-NEXT: ptrue p0.s
685 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
687 %out = icmp ne <vscale x 4 x i32> %a, splat(i32 15)
688 ret <vscale x 4 x i1> %out
691 define <vscale x 4 x i1> @int_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
692 ; CHECK-LABEL: int_cmpne_s:
694 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
696 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %pg,
697 <vscale x 4 x i32> %a,
698 <vscale x 4 x i32> splat(i32 15))
699 ret <vscale x 4 x i1> %out
702 define <vscale x 4 x i1> @wide_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
703 ; CHECK-LABEL: wide_cmpne_s:
705 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
707 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
708 <vscale x 4 x i32> %a,
709 <vscale x 2 x i64> splat(i64 15))
710 ret <vscale x 4 x i1> %out
713 define <vscale x 2 x i1> @ir_cmpne_d(<vscale x 2 x i64> %a) {
714 ; CHECK-LABEL: ir_cmpne_d:
716 ; CHECK-NEXT: ptrue p0.d
717 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
719 %out = icmp ne <vscale x 2 x i64> %a, zeroinitializer
720 ret <vscale x 2 x i1> %out
723 define <vscale x 2 x i1> @int_cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
724 ; CHECK-LABEL: int_cmpne_d:
726 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
728 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
729 <vscale x 2 x i64> %a,
730 <vscale x 2 x i64> zeroinitializer)
731 ret <vscale x 2 x i1> %out
734 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
735 ;; Unsigned Comparisons ;;
736 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
742 define <vscale x 16 x i1> @ir_cmphi_b(<vscale x 16 x i8> %a) {
743 ; CHECK-LABEL: ir_cmphi_b:
745 ; CHECK-NEXT: ptrue p0.b
746 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
748 %out = icmp ugt <vscale x 16 x i8> %a, splat(i8 4)
749 ret <vscale x 16 x i1> %out
752 define <vscale x 16 x i1> @int_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
753 ; CHECK-LABEL: int_cmphi_b:
755 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
757 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
758 <vscale x 16 x i8> %a,
759 <vscale x 16 x i8> splat(i8 4))
760 ret <vscale x 16 x i1> %out
763 define <vscale x 16 x i1> @wide_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
764 ; CHECK-LABEL: wide_cmphi_b:
766 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
768 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg,
769 <vscale x 16 x i8> %a,
770 <vscale x 2 x i64> splat(i64 4))
771 ret <vscale x 16 x i1> %out
774 define <vscale x 8 x i1> @ir_cmphi_h(<vscale x 8 x i16> %a) {
775 ; CHECK-LABEL: ir_cmphi_h:
777 ; CHECK-NEXT: ptrue p0.h
778 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
780 %out = icmp ugt <vscale x 8 x i16> %a, zeroinitializer
781 ret <vscale x 8 x i1> %out
784 define <vscale x 8 x i1> @int_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
785 ; CHECK-LABEL: int_cmphi_h:
787 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, #0
789 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
790 <vscale x 8 x i16> %a,
791 <vscale x 8 x i16> zeroinitializer)
792 ret <vscale x 8 x i1> %out
795 define <vscale x 8 x i1> @wide_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
796 ; CHECK-LABEL: wide_cmphi_h:
798 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, #0
800 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %pg,
801 <vscale x 8 x i16> %a,
802 <vscale x 2 x i64> zeroinitializer)
803 ret <vscale x 8 x i1> %out
806 define <vscale x 4 x i1> @ir_cmphi_s(<vscale x 4 x i32> %a) {
807 ; CHECK-LABEL: ir_cmphi_s:
809 ; CHECK-NEXT: ptrue p0.s
810 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
812 %out = icmp ugt <vscale x 4 x i32> %a, splat(i32 68)
813 ret <vscale x 4 x i1> %out
816 define <vscale x 4 x i1> @int_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
817 ; CHECK-LABEL: int_cmphi_s:
819 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
821 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
822 <vscale x 4 x i32> %a,
823 <vscale x 4 x i32> splat(i32 68))
824 ret <vscale x 4 x i1> %out
827 define <vscale x 4 x i1> @wide_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
828 ; CHECK-LABEL: wide_cmphi_s:
830 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
832 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
833 <vscale x 4 x i32> %a,
834 <vscale x 2 x i64> splat(i64 68))
835 ret <vscale x 4 x i1> %out
838 define <vscale x 2 x i1> @ir_cmphi_d(<vscale x 2 x i64> %a) {
839 ; CHECK-LABEL: ir_cmphi_d:
841 ; CHECK-NEXT: ptrue p0.d
842 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, #127
844 %out = icmp ugt <vscale x 2 x i64> %a, splat(i64 127)
845 ret <vscale x 2 x i1> %out
848 define <vscale x 2 x i1> @int_cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
849 ; CHECK-LABEL: int_cmphi_d:
851 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, #127
853 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
854 <vscale x 2 x i64> %a,
855 <vscale x 2 x i64> splat(i64 127))
856 ret <vscale x 2 x i1> %out
863 define <vscale x 16 x i1> @ir_cmphs_b(<vscale x 16 x i8> %a) {
864 ; CHECK-LABEL: ir_cmphs_b:
866 ; CHECK-NEXT: ptrue p0.b
867 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
869 %out = icmp uge <vscale x 16 x i8> %a, splat(i8 4)
870 ret <vscale x 16 x i1> %out
873 define <vscale x 16 x i1> @int_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
874 ; CHECK-LABEL: int_cmphs_b:
876 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
878 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
879 <vscale x 16 x i8> %a,
880 <vscale x 16 x i8> splat(i8 4))
881 ret <vscale x 16 x i1> %out
884 define <vscale x 16 x i1> @wide_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
885 ; CHECK-LABEL: wide_cmphs_b:
887 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
889 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg,
890 <vscale x 16 x i8> %a,
891 <vscale x 2 x i64> splat(i64 4))
892 ret <vscale x 16 x i1> %out
895 define <vscale x 8 x i1> @ir_cmphs_h(<vscale x 8 x i16> %a) {
896 ; CHECK-LABEL: ir_cmphs_h:
898 ; CHECK-NEXT: ptrue p0.h
900 %out = icmp uge <vscale x 8 x i16> %a, zeroinitializer
901 ret <vscale x 8 x i1> %out
904 define <vscale x 8 x i1> @int_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
905 ; CHECK-LABEL: int_cmphs_h:
907 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, #0
909 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
910 <vscale x 8 x i16> %a,
911 <vscale x 8 x i16> zeroinitializer)
912 ret <vscale x 8 x i1> %out
915 define <vscale x 8 x i1> @wide_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
916 ; CHECK-LABEL: wide_cmphs_h:
918 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, #0
920 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %pg,
921 <vscale x 8 x i16> %a,
922 <vscale x 2 x i64> zeroinitializer)
923 ret <vscale x 8 x i1> %out
926 define <vscale x 4 x i1> @ir_cmphs_s(<vscale x 4 x i32> %a) {
927 ; CHECK-LABEL: ir_cmphs_s:
929 ; CHECK-NEXT: ptrue p0.s
930 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
932 %out = icmp uge <vscale x 4 x i32> %a, splat(i32 68)
933 ret <vscale x 4 x i1> %out
936 define <vscale x 4 x i1> @int_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
937 ; CHECK-LABEL: int_cmphs_s:
939 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
941 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
942 <vscale x 4 x i32> %a,
943 <vscale x 4 x i32> splat(i32 68))
944 ret <vscale x 4 x i1> %out
947 define <vscale x 4 x i1> @wide_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
948 ; CHECK-LABEL: wide_cmphs_s:
950 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
952 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
953 <vscale x 4 x i32> %a,
954 <vscale x 2 x i64> splat(i64 68))
955 ret <vscale x 4 x i1> %out
958 define <vscale x 2 x i1> @ir_cmphs_d(<vscale x 2 x i64> %a) {
959 ; CHECK-LABEL: ir_cmphs_d:
961 ; CHECK-NEXT: ptrue p0.d
962 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, #127
964 %out = icmp uge <vscale x 2 x i64> %a, splat(i64 127)
965 ret <vscale x 2 x i1> %out
968 define <vscale x 2 x i1> @int_cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
969 ; CHECK-LABEL: int_cmphs_d:
971 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, #127
973 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
974 <vscale x 2 x i64> %a,
975 <vscale x 2 x i64> splat(i64 127))
976 ret <vscale x 2 x i1> %out
983 define <vscale x 16 x i1> @ir_cmplo_b(<vscale x 16 x i8> %a) {
984 ; CHECK-LABEL: ir_cmplo_b:
986 ; CHECK-NEXT: ptrue p0.b
987 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
989 %out = icmp ult <vscale x 16 x i8> %a, splat(i8 4)
990 ret <vscale x 16 x i1> %out
993 define <vscale x 16 x i1> @int_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
994 ; CHECK-LABEL: int_cmplo_b:
996 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
998 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
999 <vscale x 16 x i8> splat(i8 4),
1000 <vscale x 16 x i8> %a)
1001 ret <vscale x 16 x i1> %out
1004 define <vscale x 16 x i1> @wide_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1005 ; CHECK-LABEL: wide_cmplo_b:
1007 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
1009 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg,
1010 <vscale x 16 x i8> %a,
1011 <vscale x 2 x i64> splat(i64 4))
1012 ret <vscale x 16 x i1> %out
1015 define <vscale x 8 x i1> @ir_cmplo_h(<vscale x 8 x i16> %a) {
1016 ; CHECK-LABEL: ir_cmplo_h:
1018 ; CHECK-NEXT: ptrue p0.h
1019 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #2
1021 %out = icmp ult <vscale x 8 x i16> %a, splat(i16 2)
1022 ret <vscale x 8 x i1> %out
1025 define <vscale x 8 x i1> @int_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1026 ; CHECK-LABEL: int_cmplo_h:
1028 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #3
1030 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
1031 <vscale x 8 x i16> splat(i16 3),
1032 <vscale x 8 x i16> %a)
1033 ret <vscale x 8 x i1> %out
1036 define <vscale x 8 x i1> @wide_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1037 ; CHECK-LABEL: wide_cmplo_h:
1039 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #4
1041 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg,
1042 <vscale x 8 x i16> %a,
1043 <vscale x 2 x i64> splat(i64 4))
1044 ret <vscale x 8 x i1> %out
1047 define <vscale x 4 x i1> @ir_cmplo_s(<vscale x 4 x i32> %a) {
1048 ; CHECK-LABEL: ir_cmplo_s:
1050 ; CHECK-NEXT: ptrue p0.s
1051 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1053 %out = icmp ult <vscale x 4 x i32> %a, splat(i32 68)
1054 ret <vscale x 4 x i1> %out
1057 define <vscale x 4 x i1> @int_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1058 ; CHECK-LABEL: int_cmplo_s:
1060 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1062 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
1063 <vscale x 4 x i32> splat(i32 68),
1064 <vscale x 4 x i32> %a)
1065 ret <vscale x 4 x i1> %out
1068 define <vscale x 4 x i1> @wide_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1069 ; CHECK-LABEL: wide_cmplo_s:
1071 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1073 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
1074 <vscale x 4 x i32> %a,
1075 <vscale x 2 x i64> splat(i64 68))
1076 ret <vscale x 4 x i1> %out
1079 define <vscale x 2 x i1> @ir_cmplo_d(<vscale x 2 x i64> %a) {
1080 ; CHECK-LABEL: ir_cmplo_d:
1082 ; CHECK-NEXT: ptrue p0.d
1083 ; CHECK-NEXT: cmplo p0.d, p0/z, z0.d, #127
1085 %out = icmp ult <vscale x 2 x i64> %a, splat(i64 127)
1086 ret <vscale x 2 x i1> %out
1089 define <vscale x 2 x i1> @int_cmplo_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1090 ; CHECK-LABEL: int_cmplo_d:
1092 ; CHECK-NEXT: cmplo p0.d, p0/z, z0.d, #127
1094 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
1095 <vscale x 2 x i64> splat(i64 127),
1096 <vscale x 2 x i64> %a)
1097 ret <vscale x 2 x i1> %out
1104 define <vscale x 16 x i1> @ir_cmpls_b(<vscale x 16 x i8> %a) {
1105 ; CHECK-LABEL: ir_cmpls_b:
1107 ; CHECK-NEXT: ptrue p0.b
1108 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1110 %out = icmp ule <vscale x 16 x i8> %a, splat(i8 4)
1111 ret <vscale x 16 x i1> %out
1114 define <vscale x 16 x i1> @int_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1115 ; CHECK-LABEL: int_cmpls_b:
1117 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1119 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
1120 <vscale x 16 x i8> splat(i8 4),
1121 <vscale x 16 x i8> %a)
1122 ret <vscale x 16 x i1> %out
1125 define <vscale x 16 x i1> @wide_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1126 ; CHECK-LABEL: wide_cmpls_b:
1128 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1130 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg,
1131 <vscale x 16 x i8> %a,
1132 <vscale x 2 x i64> splat(i64 4))
1133 ret <vscale x 16 x i1> %out
1136 define <vscale x 8 x i1> @ir_cmpls_h(<vscale x 8 x i16> %a) {
1137 ; CHECK-LABEL: ir_cmpls_h:
1139 ; CHECK-NEXT: ptrue p0.h
1140 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1142 %out = icmp ule <vscale x 8 x i16> %a, zeroinitializer
1143 ret <vscale x 8 x i1> %out
1146 define <vscale x 8 x i1> @int_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1147 ; CHECK-LABEL: int_cmpls_h:
1149 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1151 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
1152 <vscale x 8 x i16> zeroinitializer,
1153 <vscale x 8 x i16> %a)
1154 ret <vscale x 8 x i1> %out
1157 define <vscale x 8 x i1> @wide_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1158 ; CHECK-LABEL: wide_cmpls_h:
1160 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1162 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %pg,
1163 <vscale x 8 x i16> %a,
1164 <vscale x 2 x i64> zeroinitializer)
1165 ret <vscale x 8 x i1> %out
1168 define <vscale x 4 x i1> @ir_cmpls_s(<vscale x 4 x i32> %a) {
1169 ; CHECK-LABEL: ir_cmpls_s:
1171 ; CHECK-NEXT: ptrue p0.s
1172 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1174 %out = icmp ule <vscale x 4 x i32> %a, splat(i32 68)
1175 ret <vscale x 4 x i1> %out
1178 define <vscale x 4 x i1> @int_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1179 ; CHECK-LABEL: int_cmpls_s:
1181 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1183 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
1184 <vscale x 4 x i32> splat(i32 68),
1185 <vscale x 4 x i32> %a)
1186 ret <vscale x 4 x i1> %out
1189 define <vscale x 4 x i1> @wide_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1190 ; CHECK-LABEL: wide_cmpls_s:
1192 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1194 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
1195 <vscale x 4 x i32> %a,
1196 <vscale x 2 x i64> splat(i64 68))
1197 ret <vscale x 4 x i1> %out
1200 define <vscale x 2 x i1> @ir_cmpls_d(<vscale x 2 x i64> %a) {
1201 ; CHECK-LABEL: ir_cmpls_d:
1203 ; CHECK-NEXT: ptrue p0.d
1204 ; CHECK-NEXT: cmpls p0.d, p0/z, z0.d, #127
1206 %out = icmp ule <vscale x 2 x i64> %a, splat(i64 127)
1207 ret <vscale x 2 x i1> %out
1210 define <vscale x 2 x i1> @int_cmpls_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1211 ; CHECK-LABEL: int_cmpls_d:
1213 ; CHECK-NEXT: cmpls p0.d, p0/z, z0.d, #127
1215 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
1216 <vscale x 2 x i64> splat(i64 127),
1217 <vscale x 2 x i64> %a)
1218 ret <vscale x 2 x i1> %out
1221 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1222 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1223 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1224 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1225 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1226 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1227 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1229 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1230 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1231 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1232 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1233 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1234 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1235 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1237 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1238 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1239 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1240 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1241 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1242 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1243 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1245 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1246 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1247 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1248 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1249 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1250 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1251 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1253 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1254 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1255 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1256 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1257 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1258 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1259 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1261 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1262 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1263 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1265 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1266 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1267 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1269 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1270 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1271 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1273 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1274 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1275 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1277 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1278 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1279 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1280 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1281 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1282 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1283 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)