1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
8 define <vscale x 16 x i1> @cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
9 ; CHECK-LABEL: cmpeq_b:
11 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b
13 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg,
14 <vscale x 16 x i8> %a,
15 <vscale x 16 x i8> %b)
16 ret <vscale x 16 x i1> %out
19 define <vscale x 8 x i1> @cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
20 ; CHECK-LABEL: cmpeq_h:
22 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h
24 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %pg,
25 <vscale x 8 x i16> %a,
26 <vscale x 8 x i16> %b)
27 ret <vscale x 8 x i1> %out
30 define <vscale x 4 x i1> @cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
31 ; CHECK-LABEL: cmpeq_s:
33 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s
35 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %pg,
36 <vscale x 4 x i32> %a,
37 <vscale x 4 x i32> %b)
38 ret <vscale x 4 x i1> %out
41 define <vscale x 2 x i1> @cmpeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
42 ; CHECK-LABEL: cmpeq_d:
44 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
46 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
47 <vscale x 2 x i64> %a,
48 <vscale x 2 x i64> %b)
49 ret <vscale x 2 x i1> %out
52 define <vscale x 16 x i1> @cmpeq_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
53 ; CHECK-LABEL: cmpeq_wide_b:
55 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.d
57 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg,
58 <vscale x 16 x i8> %a,
59 <vscale x 2 x i64> %b)
60 ret <vscale x 16 x i1> %out
63 define <vscale x 8 x i1> @cmpeq_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
64 ; CHECK-LABEL: cmpeq_wide_h:
66 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.d
68 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %pg,
69 <vscale x 8 x i16> %a,
70 <vscale x 2 x i64> %b)
71 ret <vscale x 8 x i1> %out
74 define <vscale x 4 x i1> @cmpeq_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
75 ; CHECK-LABEL: cmpeq_wide_s:
77 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.d
79 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
80 <vscale x 4 x i32> %a,
81 <vscale x 2 x i64> %b)
82 ret <vscale x 4 x i1> %out
85 define <vscale x 16 x i1> @cmpeq_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
86 ; CHECK-LABEL: cmpeq_ir_b:
88 ; CHECK-NEXT: ptrue p0.b
89 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, z1.b
91 %out = icmp eq <vscale x 16 x i8> %a, %b
92 ret <vscale x 16 x i1> %out
95 define <vscale x 8 x i1> @cmpeq_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
96 ; CHECK-LABEL: cmpeq_ir_h:
98 ; CHECK-NEXT: ptrue p0.h
99 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, z1.h
101 %out = icmp eq <vscale x 8 x i16> %a, %b
102 ret <vscale x 8 x i1> %out
105 define <vscale x 4 x i1> @cmpeq_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
106 ; CHECK-LABEL: cmpeq_ir_s:
108 ; CHECK-NEXT: ptrue p0.s
109 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.s
111 %out = icmp eq <vscale x 4 x i32> %a, %b
112 ret <vscale x 4 x i1> %out
115 define <vscale x 2 x i1> @cmpeq_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
116 ; CHECK-LABEL: cmpeq_ir_d:
118 ; CHECK-NEXT: ptrue p0.d
119 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
121 %out = icmp eq <vscale x 2 x i64> %a, %b
122 ret <vscale x 2 x i1> %out
129 define <vscale x 16 x i1> @cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
130 ; CHECK-LABEL: cmpge_b:
132 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, z1.b
134 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
135 <vscale x 16 x i8> %a,
136 <vscale x 16 x i8> %b)
137 ret <vscale x 16 x i1> %out
140 define <vscale x 8 x i1> @cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
141 ; CHECK-LABEL: cmpge_h:
143 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, z1.h
145 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
146 <vscale x 8 x i16> %a,
147 <vscale x 8 x i16> %b)
148 ret <vscale x 8 x i1> %out
151 define <vscale x 4 x i1> @cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
152 ; CHECK-LABEL: cmpge_s:
154 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.s
156 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
157 <vscale x 4 x i32> %a,
158 <vscale x 4 x i32> %b)
159 ret <vscale x 4 x i1> %out
162 define <vscale x 2 x i1> @cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
163 ; CHECK-LABEL: cmpge_d:
165 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d
167 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
168 <vscale x 2 x i64> %a,
169 <vscale x 2 x i64> %b)
170 ret <vscale x 2 x i1> %out
173 define <vscale x 16 x i1> @cmpge_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
174 ; CHECK-LABEL: cmpge_wide_b:
176 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, z1.d
178 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg,
179 <vscale x 16 x i8> %a,
180 <vscale x 2 x i64> %b)
181 ret <vscale x 16 x i1> %out
184 define <vscale x 8 x i1> @cmpge_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
185 ; CHECK-LABEL: cmpge_wide_h:
187 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, z1.d
189 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %pg,
190 <vscale x 8 x i16> %a,
191 <vscale x 2 x i64> %b)
192 ret <vscale x 8 x i1> %out
195 define <vscale x 4 x i1> @cmpge_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
196 ; CHECK-LABEL: cmpge_wide_s:
198 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.d
200 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
201 <vscale x 4 x i32> %a,
202 <vscale x 2 x i64> %b)
203 ret <vscale x 4 x i1> %out
206 define <vscale x 16 x i1> @cmpge_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
207 ; CHECK-LABEL: cmpge_ir_b:
209 ; CHECK-NEXT: ptrue p0.b
210 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, z1.b
212 %out = icmp sge <vscale x 16 x i8> %a, %b
213 ret <vscale x 16 x i1> %out
216 define <vscale x 8 x i1> @cmpge_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
217 ; CHECK-LABEL: cmpge_ir_h:
219 ; CHECK-NEXT: ptrue p0.h
220 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, z1.h
222 %out = icmp sge <vscale x 8 x i16> %a, %b
223 ret <vscale x 8 x i1> %out
226 define <vscale x 4 x i1> @cmpge_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
227 ; CHECK-LABEL: cmpge_ir_s:
229 ; CHECK-NEXT: ptrue p0.s
230 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.s
232 %out = icmp sge <vscale x 4 x i32> %a, %b
233 ret <vscale x 4 x i1> %out
236 define <vscale x 2 x i1> @cmpge_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
237 ; CHECK-LABEL: cmpge_ir_d:
239 ; CHECK-NEXT: ptrue p0.d
240 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d
242 %out = icmp sge <vscale x 2 x i64> %a, %b
243 ret <vscale x 2 x i1> %out
246 define <vscale x 16 x i1> @cmpge_ir_comm_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
247 ; CHECK-LABEL: cmpge_ir_comm_b:
249 ; CHECK-NEXT: ptrue p0.b
250 ; CHECK-NEXT: cmpge p0.b, p0/z, z1.b, z0.b
252 %out = icmp sle <vscale x 16 x i8> %a, %b
253 ret <vscale x 16 x i1> %out
256 define <vscale x 8 x i1> @cmpge_ir_comm_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
257 ; CHECK-LABEL: cmpge_ir_comm_h:
259 ; CHECK-NEXT: ptrue p0.h
260 ; CHECK-NEXT: cmpge p0.h, p0/z, z1.h, z0.h
262 %out = icmp sle <vscale x 8 x i16> %a, %b
263 ret <vscale x 8 x i1> %out
266 define <vscale x 4 x i1> @cmpge_ir_comm_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
267 ; CHECK-LABEL: cmpge_ir_comm_s:
269 ; CHECK-NEXT: ptrue p0.s
270 ; CHECK-NEXT: cmpge p0.s, p0/z, z1.s, z0.s
272 %out = icmp sle <vscale x 4 x i32> %a, %b
273 ret <vscale x 4 x i1> %out
276 define <vscale x 2 x i1> @cmpge_ir_comm_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
277 ; CHECK-LABEL: cmpge_ir_comm_d:
279 ; CHECK-NEXT: ptrue p0.d
280 ; CHECK-NEXT: cmpge p0.d, p0/z, z1.d, z0.d
282 %out = icmp sle <vscale x 2 x i64> %a, %b
283 ret <vscale x 2 x i1> %out
290 define <vscale x 16 x i1> @cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
291 ; CHECK-LABEL: cmpgt_b:
293 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, z1.b
295 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
296 <vscale x 16 x i8> %a,
297 <vscale x 16 x i8> %b)
298 ret <vscale x 16 x i1> %out
301 define <vscale x 8 x i1> @cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
302 ; CHECK-LABEL: cmpgt_h:
304 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, z1.h
306 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
307 <vscale x 8 x i16> %a,
308 <vscale x 8 x i16> %b)
309 ret <vscale x 8 x i1> %out
312 define <vscale x 4 x i1> @cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
313 ; CHECK-LABEL: cmpgt_s:
315 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.s
317 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
318 <vscale x 4 x i32> %a,
319 <vscale x 4 x i32> %b)
320 ret <vscale x 4 x i1> %out
323 define <vscale x 2 x i1> @cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
324 ; CHECK-LABEL: cmpgt_d:
326 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d
328 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
329 <vscale x 2 x i64> %a,
330 <vscale x 2 x i64> %b)
331 ret <vscale x 2 x i1> %out
334 define <vscale x 16 x i1> @cmpgt_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
335 ; CHECK-LABEL: cmpgt_wide_b:
337 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, z1.d
339 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
340 <vscale x 16 x i8> %a,
341 <vscale x 2 x i64> %b)
342 ret <vscale x 16 x i1> %out
345 define <vscale x 8 x i1> @cmpgt_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
346 ; CHECK-LABEL: cmpgt_wide_h:
348 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, z1.d
350 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %pg,
351 <vscale x 8 x i16> %a,
352 <vscale x 2 x i64> %b)
353 ret <vscale x 8 x i1> %out
356 define <vscale x 4 x i1> @cmpgt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
357 ; CHECK-LABEL: cmpgt_wide_s:
359 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.d
361 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
362 <vscale x 4 x i32> %a,
363 <vscale x 2 x i64> %b)
364 ret <vscale x 4 x i1> %out
367 define <vscale x 16 x i1> @cmpgt_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
368 ; CHECK-LABEL: cmpgt_ir_b:
370 ; CHECK-NEXT: ptrue p0.b
371 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, z1.b
373 %out = icmp sgt <vscale x 16 x i8> %a, %b
374 ret <vscale x 16 x i1> %out
377 define <vscale x 8 x i1> @cmpgt_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
378 ; CHECK-LABEL: cmpgt_ir_h:
380 ; CHECK-NEXT: ptrue p0.h
381 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, z1.h
383 %out = icmp sgt <vscale x 8 x i16> %a, %b
384 ret <vscale x 8 x i1> %out
387 define <vscale x 4 x i1> @cmpgt_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
388 ; CHECK-LABEL: cmpgt_ir_s:
390 ; CHECK-NEXT: ptrue p0.s
391 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.s
393 %out = icmp sgt <vscale x 4 x i32> %a, %b
394 ret <vscale x 4 x i1> %out
397 define <vscale x 2 x i1> @cmpgt_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
398 ; CHECK-LABEL: cmpgt_ir_d:
400 ; CHECK-NEXT: ptrue p0.d
401 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d
403 %out = icmp sgt <vscale x 2 x i64> %a, %b
404 ret <vscale x 2 x i1> %out
407 define <vscale x 16 x i1> @cmpgt_ir_comm_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
408 ; CHECK-LABEL: cmpgt_ir_comm_b:
410 ; CHECK-NEXT: ptrue p0.b
411 ; CHECK-NEXT: cmpgt p0.b, p0/z, z1.b, z0.b
413 %out = icmp slt <vscale x 16 x i8> %a, %b
414 ret <vscale x 16 x i1> %out
417 define <vscale x 8 x i1> @cmpgt_ir_comm_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
418 ; CHECK-LABEL: cmpgt_ir_comm_h:
420 ; CHECK-NEXT: ptrue p0.h
421 ; CHECK-NEXT: cmpgt p0.h, p0/z, z1.h, z0.h
423 %out = icmp slt <vscale x 8 x i16> %a, %b
424 ret <vscale x 8 x i1> %out
427 define <vscale x 4 x i1> @cmpgt_ir_comm_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
428 ; CHECK-LABEL: cmpgt_ir_comm_s:
430 ; CHECK-NEXT: ptrue p0.s
431 ; CHECK-NEXT: cmpgt p0.s, p0/z, z1.s, z0.s
433 %out = icmp slt <vscale x 4 x i32> %a, %b
434 ret <vscale x 4 x i1> %out
437 define <vscale x 2 x i1> @cmpgt_ir_comm_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
438 ; CHECK-LABEL: cmpgt_ir_comm_d:
440 ; CHECK-NEXT: ptrue p0.d
441 ; CHECK-NEXT: cmpgt p0.d, p0/z, z1.d, z0.d
443 %out = icmp slt <vscale x 2 x i64> %a, %b
444 ret <vscale x 2 x i1> %out
451 define <vscale x 16 x i1> @cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
452 ; CHECK-LABEL: cmphi_b:
454 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, z1.b
456 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
457 <vscale x 16 x i8> %a,
458 <vscale x 16 x i8> %b)
459 ret <vscale x 16 x i1> %out
462 define <vscale x 8 x i1> @cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
463 ; CHECK-LABEL: cmphi_h:
465 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, z1.h
467 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
468 <vscale x 8 x i16> %a,
469 <vscale x 8 x i16> %b)
470 ret <vscale x 8 x i1> %out
473 define <vscale x 4 x i1> @cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
474 ; CHECK-LABEL: cmphi_s:
476 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.s
478 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
479 <vscale x 4 x i32> %a,
480 <vscale x 4 x i32> %b)
481 ret <vscale x 4 x i1> %out
484 define <vscale x 2 x i1> @cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
485 ; CHECK-LABEL: cmphi_d:
487 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d
489 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
490 <vscale x 2 x i64> %a,
491 <vscale x 2 x i64> %b)
492 ret <vscale x 2 x i1> %out
495 define <vscale x 16 x i1> @cmphi_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
496 ; CHECK-LABEL: cmphi_wide_b:
498 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, z1.d
500 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg,
501 <vscale x 16 x i8> %a,
502 <vscale x 2 x i64> %b)
503 ret <vscale x 16 x i1> %out
506 define <vscale x 8 x i1> @cmphi_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
507 ; CHECK-LABEL: cmphi_wide_h:
509 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, z1.d
511 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %pg,
512 <vscale x 8 x i16> %a,
513 <vscale x 2 x i64> %b)
514 ret <vscale x 8 x i1> %out
517 define <vscale x 4 x i1> @cmphi_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
518 ; CHECK-LABEL: cmphi_wide_s:
520 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.d
522 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
523 <vscale x 4 x i32> %a,
524 <vscale x 2 x i64> %b)
525 ret <vscale x 4 x i1> %out
528 define <vscale x 16 x i1> @cmphi_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
529 ; CHECK-LABEL: cmphi_ir_b:
531 ; CHECK-NEXT: ptrue p0.b
532 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, z1.b
534 %out = icmp ugt <vscale x 16 x i8> %a, %b
535 ret <vscale x 16 x i1> %out
538 define <vscale x 8 x i1> @cmphi_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
539 ; CHECK-LABEL: cmphi_ir_h:
541 ; CHECK-NEXT: ptrue p0.h
542 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, z1.h
544 %out = icmp ugt <vscale x 8 x i16> %a, %b
545 ret <vscale x 8 x i1> %out
548 define <vscale x 4 x i1> @cmphi_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
549 ; CHECK-LABEL: cmphi_ir_s:
551 ; CHECK-NEXT: ptrue p0.s
552 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.s
554 %out = icmp ugt <vscale x 4 x i32> %a, %b
555 ret <vscale x 4 x i1> %out
558 define <vscale x 2 x i1> @cmphi_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
559 ; CHECK-LABEL: cmphi_ir_d:
561 ; CHECK-NEXT: ptrue p0.d
562 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d
564 %out = icmp ugt <vscale x 2 x i64> %a, %b
565 ret <vscale x 2 x i1> %out
568 define <vscale x 16 x i1> @cmphi_ir_comm_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
569 ; CHECK-LABEL: cmphi_ir_comm_b:
571 ; CHECK-NEXT: ptrue p0.b
572 ; CHECK-NEXT: cmphi p0.b, p0/z, z1.b, z0.b
574 %out = icmp ult <vscale x 16 x i8> %a, %b
575 ret <vscale x 16 x i1> %out
578 define <vscale x 8 x i1> @cmphi_ir_comm_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
579 ; CHECK-LABEL: cmphi_ir_comm_h:
581 ; CHECK-NEXT: ptrue p0.h
582 ; CHECK-NEXT: cmphi p0.h, p0/z, z1.h, z0.h
584 %out = icmp ult <vscale x 8 x i16> %a, %b
585 ret <vscale x 8 x i1> %out
588 define <vscale x 4 x i1> @cmphi_ir_comm_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
589 ; CHECK-LABEL: cmphi_ir_comm_s:
591 ; CHECK-NEXT: ptrue p0.s
592 ; CHECK-NEXT: cmphi p0.s, p0/z, z1.s, z0.s
594 %out = icmp ult <vscale x 4 x i32> %a, %b
595 ret <vscale x 4 x i1> %out
598 define <vscale x 2 x i1> @cmphi_ir_comm_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
599 ; CHECK-LABEL: cmphi_ir_comm_d:
601 ; CHECK-NEXT: ptrue p0.d
602 ; CHECK-NEXT: cmphi p0.d, p0/z, z1.d, z0.d
604 %out = icmp ult <vscale x 2 x i64> %a, %b
605 ret <vscale x 2 x i1> %out
612 define <vscale x 16 x i1> @cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
613 ; CHECK-LABEL: cmphs_b:
615 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, z1.b
617 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
618 <vscale x 16 x i8> %a,
619 <vscale x 16 x i8> %b)
620 ret <vscale x 16 x i1> %out
623 define <vscale x 8 x i1> @cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
624 ; CHECK-LABEL: cmphs_h:
626 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, z1.h
628 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
629 <vscale x 8 x i16> %a,
630 <vscale x 8 x i16> %b)
631 ret <vscale x 8 x i1> %out
634 define <vscale x 4 x i1> @cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
635 ; CHECK-LABEL: cmphs_s:
637 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.s
639 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
640 <vscale x 4 x i32> %a,
641 <vscale x 4 x i32> %b)
642 ret <vscale x 4 x i1> %out
645 define <vscale x 2 x i1> @cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
646 ; CHECK-LABEL: cmphs_d:
648 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d
650 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
651 <vscale x 2 x i64> %a,
652 <vscale x 2 x i64> %b)
653 ret <vscale x 2 x i1> %out
656 define <vscale x 16 x i1> @cmphs_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
657 ; CHECK-LABEL: cmphs_wide_b:
659 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, z1.d
661 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg,
662 <vscale x 16 x i8> %a,
663 <vscale x 2 x i64> %b)
664 ret <vscale x 16 x i1> %out
667 define <vscale x 8 x i1> @cmphs_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
668 ; CHECK-LABEL: cmphs_wide_h:
670 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, z1.d
672 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %pg,
673 <vscale x 8 x i16> %a,
674 <vscale x 2 x i64> %b)
675 ret <vscale x 8 x i1> %out
678 define <vscale x 4 x i1> @cmphs_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
679 ; CHECK-LABEL: cmphs_wide_s:
681 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.d
683 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
684 <vscale x 4 x i32> %a,
685 <vscale x 2 x i64> %b)
686 ret <vscale x 4 x i1> %out
689 define <vscale x 16 x i1> @cmphs_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
690 ; CHECK-LABEL: cmphs_ir_b:
692 ; CHECK-NEXT: ptrue p0.b
693 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, z1.b
695 %out = icmp uge <vscale x 16 x i8> %a, %b
696 ret <vscale x 16 x i1> %out
699 define <vscale x 8 x i1> @cmphs_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
700 ; CHECK-LABEL: cmphs_ir_h:
702 ; CHECK-NEXT: ptrue p0.h
703 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, z1.h
705 %out = icmp uge <vscale x 8 x i16> %a, %b
706 ret <vscale x 8 x i1> %out
709 define <vscale x 4 x i1> @cmphs_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
710 ; CHECK-LABEL: cmphs_ir_s:
712 ; CHECK-NEXT: ptrue p0.s
713 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.s
715 %out = icmp uge <vscale x 4 x i32> %a, %b
716 ret <vscale x 4 x i1> %out
719 define <vscale x 2 x i1> @cmphs_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
720 ; CHECK-LABEL: cmphs_ir_d:
722 ; CHECK-NEXT: ptrue p0.d
723 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d
725 %out = icmp uge <vscale x 2 x i64> %a, %b
726 ret <vscale x 2 x i1> %out
729 define <vscale x 16 x i1> @cmphs_ir_comm_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
730 ; CHECK-LABEL: cmphs_ir_comm_b:
732 ; CHECK-NEXT: ptrue p0.b
733 ; CHECK-NEXT: cmphs p0.b, p0/z, z1.b, z0.b
735 %out = icmp ule <vscale x 16 x i8> %a, %b
736 ret <vscale x 16 x i1> %out
739 define <vscale x 8 x i1> @cmphs_ir_comm_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
740 ; CHECK-LABEL: cmphs_ir_comm_h:
742 ; CHECK-NEXT: ptrue p0.h
743 ; CHECK-NEXT: cmphs p0.h, p0/z, z1.h, z0.h
745 %out = icmp ule <vscale x 8 x i16> %a, %b
746 ret <vscale x 8 x i1> %out
749 define <vscale x 4 x i1> @cmphs_ir_comm_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
750 ; CHECK-LABEL: cmphs_ir_comm_s:
752 ; CHECK-NEXT: ptrue p0.s
753 ; CHECK-NEXT: cmphs p0.s, p0/z, z1.s, z0.s
755 %out = icmp ule <vscale x 4 x i32> %a, %b
756 ret <vscale x 4 x i1> %out
759 define <vscale x 2 x i1> @cmphs_ir_comm_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
760 ; CHECK-LABEL: cmphs_ir_comm_d:
762 ; CHECK-NEXT: ptrue p0.d
763 ; CHECK-NEXT: cmphs p0.d, p0/z, z1.d, z0.d
765 %out = icmp ule <vscale x 2 x i64> %a, %b
766 ret <vscale x 2 x i1> %out
773 define <vscale x 16 x i1> @cmple_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
774 ; CHECK-LABEL: cmple_wide_b:
776 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, z1.d
778 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg,
779 <vscale x 16 x i8> %a,
780 <vscale x 2 x i64> %b)
781 ret <vscale x 16 x i1> %out
784 define <vscale x 8 x i1> @cmple_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
785 ; CHECK-LABEL: cmple_wide_h:
787 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, z1.d
789 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %pg,
790 <vscale x 8 x i16> %a,
791 <vscale x 2 x i64> %b)
792 ret <vscale x 8 x i1> %out
795 define <vscale x 4 x i1> @cmple_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
796 ; CHECK-LABEL: cmple_wide_s:
798 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, z1.d
800 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
801 <vscale x 4 x i32> %a,
802 <vscale x 2 x i64> %b)
803 ret <vscale x 4 x i1> %out
810 define <vscale x 16 x i1> @cmplo_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
811 ; CHECK-LABEL: cmplo_wide_b:
813 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, z1.d
815 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg,
816 <vscale x 16 x i8> %a,
817 <vscale x 2 x i64> %b)
818 ret <vscale x 16 x i1> %out
821 define <vscale x 8 x i1> @cmplo_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
822 ; CHECK-LABEL: cmplo_wide_h:
824 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, z1.d
826 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg,
827 <vscale x 8 x i16> %a,
828 <vscale x 2 x i64> %b)
829 ret <vscale x 8 x i1> %out
832 define <vscale x 4 x i1> @cmplo_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
833 ; CHECK-LABEL: cmplo_wide_s:
835 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, z1.d
837 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
838 <vscale x 4 x i32> %a,
839 <vscale x 2 x i64> %b)
840 ret <vscale x 4 x i1> %out
847 define <vscale x 16 x i1> @cmpls_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
848 ; CHECK-LABEL: cmpls_wide_b:
850 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, z1.d
852 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg,
853 <vscale x 16 x i8> %a,
854 <vscale x 2 x i64> %b)
855 ret <vscale x 16 x i1> %out
858 define <vscale x 8 x i1> @cmpls_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
859 ; CHECK-LABEL: cmpls_wide_h:
861 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, z1.d
863 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %pg,
864 <vscale x 8 x i16> %a,
865 <vscale x 2 x i64> %b)
866 ret <vscale x 8 x i1> %out
869 define <vscale x 4 x i1> @cmpls_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
870 ; CHECK-LABEL: cmpls_wide_s:
872 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d
874 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
875 <vscale x 4 x i32> %a,
876 <vscale x 2 x i64> %b)
877 ret <vscale x 4 x i1> %out
884 define <vscale x 16 x i1> @cmplt_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
885 ; CHECK-LABEL: cmplt_wide_b:
887 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, z1.d
889 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg,
890 <vscale x 16 x i8> %a,
891 <vscale x 2 x i64> %b)
892 ret <vscale x 16 x i1> %out
895 define <vscale x 8 x i1> @cmplt_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
896 ; CHECK-LABEL: cmplt_wide_h:
898 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, z1.d
900 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %pg,
901 <vscale x 8 x i16> %a,
902 <vscale x 2 x i64> %b)
903 ret <vscale x 8 x i1> %out
906 define <vscale x 4 x i1> @cmplt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
907 ; CHECK-LABEL: cmplt_wide_s:
909 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, z1.d
911 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
912 <vscale x 4 x i32> %a,
913 <vscale x 2 x i64> %b)
914 ret <vscale x 4 x i1> %out
921 define <vscale x 16 x i1> @cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
922 ; CHECK-LABEL: cmpne_b:
924 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b
926 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg,
927 <vscale x 16 x i8> %a,
928 <vscale x 16 x i8> %b)
929 ret <vscale x 16 x i1> %out
932 define <vscale x 8 x i1> @cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
933 ; CHECK-LABEL: cmpne_h:
935 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h
937 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %pg,
938 <vscale x 8 x i16> %a,
939 <vscale x 8 x i16> %b)
940 ret <vscale x 8 x i1> %out
943 define <vscale x 4 x i1> @cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
944 ; CHECK-LABEL: cmpne_s:
946 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s
948 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %pg,
949 <vscale x 4 x i32> %a,
950 <vscale x 4 x i32> %b)
951 ret <vscale x 4 x i1> %out
954 define <vscale x 2 x i1> @cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
955 ; CHECK-LABEL: cmpne_d:
957 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
959 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
960 <vscale x 2 x i64> %a,
961 <vscale x 2 x i64> %b)
962 ret <vscale x 2 x i1> %out
965 define <vscale x 16 x i1> @cmpne_wide_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
966 ; CHECK-LABEL: cmpne_wide_b:
968 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.d
970 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg,
971 <vscale x 16 x i8> %a,
972 <vscale x 2 x i64> %b)
973 ret <vscale x 16 x i1> %out
976 define <vscale x 8 x i1> @cmpne_wide_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
977 ; CHECK-LABEL: cmpne_wide_h:
979 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.d
981 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %pg,
982 <vscale x 8 x i16> %a,
983 <vscale x 2 x i64> %b)
984 ret <vscale x 8 x i1> %out
987 define <vscale x 4 x i1> @cmpne_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
988 ; CHECK-LABEL: cmpne_wide_s:
990 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.d
992 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
993 <vscale x 4 x i32> %a,
994 <vscale x 2 x i64> %b)
995 ret <vscale x 4 x i1> %out
998 define <vscale x 16 x i1> @cmpne_ir_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
999 ; CHECK-LABEL: cmpne_ir_b:
1001 ; CHECK-NEXT: ptrue p0.b
1002 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b
1004 %out = icmp ne <vscale x 16 x i8> %a, %b
1005 ret <vscale x 16 x i1> %out
1008 define <vscale x 8 x i1> @cmpne_ir_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
1009 ; CHECK-LABEL: cmpne_ir_h:
1011 ; CHECK-NEXT: ptrue p0.h
1012 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h
1014 %out = icmp ne <vscale x 8 x i16> %a, %b
1015 ret <vscale x 8 x i1> %out
1018 define <vscale x 4 x i1> @cmpne_ir_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
1019 ; CHECK-LABEL: cmpne_ir_s:
1021 ; CHECK-NEXT: ptrue p0.s
1022 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s
1024 %out = icmp ne <vscale x 4 x i32> %a, %b
1025 ret <vscale x 4 x i1> %out
1028 define <vscale x 2 x i1> @cmpne_ir_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
1029 ; CHECK-LABEL: cmpne_ir_d:
1031 ; CHECK-NEXT: ptrue p0.d
1032 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
1034 %out = icmp ne <vscale x 2 x i64> %a, %b
1035 ret <vscale x 2 x i1> %out
1038 define <vscale x 1 x i1> @cmpne_ir_q(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) {
1039 ; CHECK-LABEL: cmpne_ir_q:
1041 ; CHECK-NEXT: ptrue p0.d
1042 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
1043 ; CHECK-NEXT: punpklo p0.h, p0.b
1045 %out = icmp ne <vscale x 1 x i64> %a, %b
1046 ret <vscale x 1 x i1> %out
1050 define <vscale x 16 x i1> @cmpgt_wide_splat_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, i64 %b) {
1051 ; CHECK-LABEL: cmpgt_wide_splat_b:
1053 ; CHECK-NEXT: mov z1.d, x0
1054 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, z1.d
1056 %splat = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b)
1057 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
1058 <vscale x 16 x i8> %a,
1059 <vscale x 2 x i64> %splat)
1060 ret <vscale x 16 x i1> %out
1063 define <vscale x 4 x i1> @cmpls_wide_splat_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, i64 %b) {
1064 ; CHECK-LABEL: cmpls_wide_splat_s:
1066 ; CHECK-NEXT: mov z1.d, x0
1067 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d
1069 %splat = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %b)
1070 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
1071 <vscale x 4 x i32> %a,
1072 <vscale x 2 x i64> %splat)
1073 ret <vscale x 4 x i1> %out
1076 ; Verify general predicate is folded into the compare
1077 define <vscale x 4 x i1> @predicated_icmp(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
1078 ; CHECK-LABEL: predicated_icmp:
1080 ; CHECK-NEXT: ptrue p0.s
1081 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.s
1082 ; CHECK-NEXT: cmpge p0.s, p0/z, z2.s, z1.s
1084 %icmp1 = icmp sgt <vscale x 4 x i32> %a, %b
1085 %icmp2 = icmp sle <vscale x 4 x i32> %b, %c
1086 %and = and <vscale x 4 x i1> %icmp1, %icmp2
1087 ret <vscale x 4 x i1> %and
1090 define <vscale x 4 x i1> @predicated_icmp_unknown_lhs(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
1091 ; CHECK-LABEL: predicated_icmp_unknown_lhs:
1093 ; CHECK-NEXT: cmpge p0.s, p0/z, z1.s, z0.s
1095 %icmp = icmp sle <vscale x 4 x i32> %b, %c
1096 %and = and <vscale x 4 x i1> %a, %icmp
1097 ret <vscale x 4 x i1> %and
1100 define <vscale x 4 x i1> @predicated_icmp_unknown_rhs(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
1101 ; CHECK-LABEL: predicated_icmp_unknown_rhs:
1103 ; CHECK-NEXT: cmpge p0.s, p0/z, z1.s, z0.s
1105 %icmp = icmp sle <vscale x 4 x i32> %b, %c
1106 %and = and <vscale x 4 x i1> %icmp, %a
1107 ret <vscale x 4 x i1> %and
1110 define <vscale x 16 x i1> @predicated_icmp_eq_imm(<vscale x 16 x i1> %a, <vscale x 16 x i8> %b) {
1111 ; CHECK-LABEL: predicated_icmp_eq_imm:
1113 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0
1115 %imm = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 0, i64 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1116 %icmp = icmp eq <vscale x 16 x i8> %b, %imm
1117 %and = and <vscale x 16 x i1> %a, %icmp
1118 ret <vscale x 16 x i1> %and
1121 define <vscale x 8 x i1> @predicated_icmp_ne_imm(<vscale x 8 x i1> %a, <vscale x 8 x i16> %b) {
1122 ; CHECK-LABEL: predicated_icmp_ne_imm:
1124 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
1126 %imm = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 -16, i64 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1127 %icmp = icmp ne <vscale x 8 x i16> %b, %imm
1128 %and = and <vscale x 8 x i1> %a, %icmp
1129 ret <vscale x 8 x i1> %and
1132 define <vscale x 4 x i1> @predicated_icmp_sge_imm(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b) {
1133 ; CHECK-LABEL: predicated_icmp_sge_imm:
1135 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #1
1137 %imm = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 1, i64 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1138 %icmp = icmp sge <vscale x 4 x i32> %b, %imm
1139 %and = and <vscale x 4 x i1> %a, %icmp
1140 ret <vscale x 4 x i1> %and
1143 define <vscale x 2 x i1> @predicated_icmp_sgt_imm(<vscale x 2 x i1> %a, <vscale x 2 x i64> %b) {
1144 ; CHECK-LABEL: predicated_icmp_sgt_imm:
1146 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, #2
1148 %imm = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 2, i64 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1149 %icmp = icmp sgt <vscale x 2 x i64> %b, %imm
1150 %and = and <vscale x 2 x i1> %a, %icmp
1151 ret <vscale x 2 x i1> %and
1154 define <vscale x 16 x i1> @predicated_icmp_sle_imm(<vscale x 16 x i1> %a, <vscale x 16 x i8> %b) {
1155 ; CHECK-LABEL: predicated_icmp_sle_imm:
1157 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #-1
1159 %imm = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 -1, i64 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1160 %icmp = icmp sle <vscale x 16 x i8> %b, %imm
1161 %and = and <vscale x 16 x i1> %a, %icmp
1162 ret <vscale x 16 x i1> %and
1165 define <vscale x 8 x i1> @predicated_icmp_slt_imm(<vscale x 8 x i1> %a, <vscale x 8 x i16> %b) {
1166 ; CHECK-LABEL: predicated_icmp_slt_imm:
1168 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-2
1170 %imm = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 -2, i64 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1171 %icmp = icmp slt <vscale x 8 x i16> %b, %imm
1172 %and = and <vscale x 8 x i1> %a, %icmp
1173 ret <vscale x 8 x i1> %and
1176 define <vscale x 4 x i1> @predicated_icmp_uge_imm(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b) {
1177 ; CHECK-LABEL: predicated_icmp_uge_imm:
1179 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #1
1181 %imm = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 1, i64 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1182 %icmp = icmp uge <vscale x 4 x i32> %b, %imm
1183 %and = and <vscale x 4 x i1> %a, %icmp
1184 ret <vscale x 4 x i1> %and
1187 define <vscale x 2 x i1> @predicated_icmp_ugt_imm(<vscale x 2 x i1> %a, <vscale x 2 x i64> %b) {
1188 ; CHECK-LABEL: predicated_icmp_ugt_imm:
1190 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, #2
1192 %imm = shufflevector <vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 2, i64 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1193 %icmp = icmp ugt <vscale x 2 x i64> %b, %imm
1194 %and = and <vscale x 2 x i1> %a, %icmp
1195 ret <vscale x 2 x i1> %and
1198 define <vscale x 16 x i1> @predicated_icmp_ule_imm(<vscale x 16 x i1> %a, <vscale x 16 x i8> %b) {
1199 ; CHECK-LABEL: predicated_icmp_ule_imm:
1201 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #3
1203 %imm = shufflevector <vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 3, i64 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1204 %icmp = icmp ule <vscale x 16 x i8> %b, %imm
1205 %and = and <vscale x 16 x i1> %a, %icmp
1206 ret <vscale x 16 x i1> %and
1209 define <vscale x 8 x i1> @predicated_icmp_ult_imm(<vscale x 8 x i1> %a, <vscale x 8 x i16> %b) {
1210 ; CHECK-LABEL: predicated_icmp_ult_imm:
1212 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #127
1214 %imm = shufflevector <vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 127, i64 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1215 %icmp = icmp ult <vscale x 8 x i16> %b, %imm
1216 %and = and <vscale x 8 x i1> %a, %icmp
1217 ret <vscale x 8 x i1> %and
1220 %svboolx2 = type { <vscale x 4 x i1>, <vscale x 4 x i1> }
1222 define %svboolx2 @and_of_multiuse_icmp_sle(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
1223 ; CHECK-LABEL: and_of_multiuse_icmp_sle:
1225 ; CHECK-NEXT: ptrue p1.s
1226 ; CHECK-NEXT: cmpge p1.s, p1/z, z1.s, z0.s
1227 ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
1229 %cmp = icmp sle <vscale x 4 x i32> %b, %c
1230 %and = and <vscale x 4 x i1> %a, %cmp
1231 %ins.1 = insertvalue %svboolx2 poison, <vscale x 4 x i1> %and, 0
1232 %ins.2 = insertvalue %svboolx2 %ins.1, <vscale x 4 x i1> %cmp, 1
1233 ret %svboolx2 %ins.2
1236 define %svboolx2 @and_of_multiuse_icmp_sle_imm(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b) {
1237 ; CHECK-LABEL: and_of_multiuse_icmp_sle_imm:
1239 ; CHECK-NEXT: ptrue p1.s
1240 ; CHECK-NEXT: cmple p1.s, p1/z, z0.s, #1
1241 ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
1243 %imm = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 1, i64 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1244 %cmp = icmp sle <vscale x 4 x i32> %b, %imm
1245 %and = and <vscale x 4 x i1> %a, %cmp
1246 %ins.1 = insertvalue %svboolx2 poison, <vscale x 4 x i1> %and, 0
1247 %ins.2 = insertvalue %svboolx2 %ins.1, <vscale x 4 x i1> %cmp, 1
1248 ret %svboolx2 %ins.2
1251 define %svboolx2 @and_of_multiuse_icmp_ugt(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
1252 ; CHECK-LABEL: and_of_multiuse_icmp_ugt:
1254 ; CHECK-NEXT: ptrue p1.s
1255 ; CHECK-NEXT: cmphi p1.s, p1/z, z0.s, z1.s
1256 ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
1258 %cmp = icmp ugt <vscale x 4 x i32> %b, %c
1259 %and = and <vscale x 4 x i1> %a, %cmp
1260 %ins.1 = insertvalue %svboolx2 poison, <vscale x 4 x i1> %and, 0
1261 %ins.2 = insertvalue %svboolx2 %ins.1, <vscale x 4 x i1> %cmp, 1
1262 ret %svboolx2 %ins.2
1265 define %svboolx2 @and_of_multiuse_icmp_ugt_imm(<vscale x 4 x i1> %a, <vscale x 4 x i32> %b) {
1266 ; CHECK-LABEL: and_of_multiuse_icmp_ugt_imm:
1268 ; CHECK-NEXT: ptrue p1.s
1269 ; CHECK-NEXT: cmphi p1.s, p1/z, z0.s, #1
1270 ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b
1272 %imm = shufflevector <vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 1, i64 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1273 %cmp = icmp ugt <vscale x 4 x i32> %b, %imm
1274 %and = and <vscale x 4 x i1> %a, %cmp
1275 %ins.1 = insertvalue %svboolx2 poison, <vscale x 4 x i1> %and, 0
1276 %ins.2 = insertvalue %svboolx2 %ins.1, <vscale x 4 x i1> %cmp, 1
1277 ret %svboolx2 %ins.2
1280 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1281 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1282 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1283 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1284 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1285 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1286 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1288 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1289 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1290 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1291 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1292 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1293 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1294 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1296 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1297 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1298 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1299 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1300 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1301 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1302 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1304 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1305 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1306 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1307 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1308 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1309 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1310 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1312 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1313 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1314 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1315 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1316 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1317 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1318 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1320 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1321 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1322 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1324 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1325 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1326 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1328 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1329 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1330 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1332 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1333 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1334 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1336 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1337 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1338 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1339 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1340 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1341 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1342 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1344 declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)