1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
5 ;; Signed Comparisons ;;
6 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
12 define <vscale x 16 x i1> @ir_cmpeq_b(<vscale x 16 x i8> %a) {
13 ; CHECK-LABEL: ir_cmpeq_b:
15 ; CHECK-NEXT: ptrue p0.b
16 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
18 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
19 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
20 %out = icmp eq <vscale x 16 x i8> %a, %splat
21 ret <vscale x 16 x i1> %out
24 define <vscale x 16 x i1> @int_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
25 ; CHECK-LABEL: int_cmpeq_b:
27 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
29 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
30 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
31 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1> %pg,
32 <vscale x 16 x i8> %a,
33 <vscale x 16 x i8> %splat)
34 ret <vscale x 16 x i1> %out
37 define <vscale x 16 x i1> @wide_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
38 ; CHECK-LABEL: wide_cmpeq_b:
40 ; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #4
42 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
43 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
44 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg,
45 <vscale x 16 x i8> %a,
46 <vscale x 2 x i64> %splat)
47 ret <vscale x 16 x i1> %out
50 define <vscale x 8 x i1> @ir_cmpeq_h(<vscale x 8 x i16> %a) {
51 ; CHECK-LABEL: ir_cmpeq_h:
53 ; CHECK-NEXT: ptrue p0.h
54 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
56 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
57 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
58 %out = icmp eq <vscale x 8 x i16> %a, %splat
59 ret <vscale x 8 x i1> %out
62 define <vscale x 8 x i1> @int_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
63 ; CHECK-LABEL: int_cmpeq_h:
65 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
67 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
68 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
69 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1> %pg,
70 <vscale x 8 x i16> %a,
71 <vscale x 8 x i16> %splat)
72 ret <vscale x 8 x i1> %out
75 define <vscale x 8 x i1> @wide_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
76 ; CHECK-LABEL: wide_cmpeq_h:
78 ; CHECK-NEXT: cmpeq p0.h, p0/z, z0.h, #-16
80 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
81 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
82 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %pg,
83 <vscale x 8 x i16> %a,
84 <vscale x 2 x i64> %splat)
85 ret <vscale x 8 x i1> %out
88 define <vscale x 4 x i1> @ir_cmpeq_s(<vscale x 4 x i32> %a) {
89 ; CHECK-LABEL: ir_cmpeq_s:
91 ; CHECK-NEXT: ptrue p0.s
92 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
94 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
95 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
96 %out = icmp eq <vscale x 4 x i32> %a, %splat
97 ret <vscale x 4 x i1> %out
100 define <vscale x 4 x i1> @int_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
101 ; CHECK-LABEL: int_cmpeq_s:
103 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
105 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
106 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
107 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1> %pg,
108 <vscale x 4 x i32> %a,
109 <vscale x 4 x i32> %splat)
110 ret <vscale x 4 x i1> %out
113 define <vscale x 4 x i1> @wide_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
114 ; CHECK-LABEL: wide_cmpeq_s:
116 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, #15
118 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
119 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
120 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
121 <vscale x 4 x i32> %a,
122 <vscale x 2 x i64> %splat)
123 ret <vscale x 4 x i1> %out
126 define <vscale x 2 x i1> @ir_cmpeq_d(<vscale x 2 x i64> %a) {
127 ; CHECK-LABEL: ir_cmpeq_d:
129 ; CHECK-NEXT: ptrue p0.d
130 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, #0
132 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
133 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
134 %out = icmp eq <vscale x 2 x i64> %a, %splat
135 ret <vscale x 2 x i1> %out
138 define <vscale x 2 x i1> @int_cmpeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
139 ; CHECK-LABEL: int_cmpeq_d:
141 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, #0
143 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
144 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
145 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
146 <vscale x 2 x i64> %a,
147 <vscale x 2 x i64> %splat)
148 ret <vscale x 2 x i1> %out
155 define <vscale x 16 x i1> @ir_cmpge_b(<vscale x 16 x i8> %a) {
156 ; CHECK-LABEL: ir_cmpge_b:
158 ; CHECK-NEXT: ptrue p0.b
159 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
161 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
162 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
163 %out = icmp sge <vscale x 16 x i8> %a, %splat
164 ret <vscale x 16 x i1> %out
167 define <vscale x 16 x i1> @int_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
168 ; CHECK-LABEL: int_cmpge_b:
170 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
172 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
173 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
174 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
175 <vscale x 16 x i8> %a,
176 <vscale x 16 x i8> %splat)
177 ret <vscale x 16 x i1> %out
180 define <vscale x 16 x i1> @wide_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
181 ; CHECK-LABEL: wide_cmpge_b:
183 ; CHECK-NEXT: cmpge p0.b, p0/z, z0.b, #4
185 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
186 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
187 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg,
188 <vscale x 16 x i8> %a,
189 <vscale x 2 x i64> %splat)
190 ret <vscale x 16 x i1> %out
193 define <vscale x 8 x i1> @ir_cmpge_h(<vscale x 8 x i16> %a) {
194 ; CHECK-LABEL: ir_cmpge_h:
196 ; CHECK-NEXT: ptrue p0.h
197 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
199 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
200 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
201 %out = icmp sge <vscale x 8 x i16> %a, %splat
202 ret <vscale x 8 x i1> %out
205 define <vscale x 8 x i1> @int_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
206 ; CHECK-LABEL: int_cmpge_h:
208 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
210 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
211 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
212 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
213 <vscale x 8 x i16> %a,
214 <vscale x 8 x i16> %splat)
215 ret <vscale x 8 x i1> %out
218 define <vscale x 8 x i1> @wide_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
219 ; CHECK-LABEL: wide_cmpge_h:
221 ; CHECK-NEXT: cmpge p0.h, p0/z, z0.h, #-16
223 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
224 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
225 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %pg,
226 <vscale x 8 x i16> %a,
227 <vscale x 2 x i64> %splat)
228 ret <vscale x 8 x i1> %out
231 define <vscale x 4 x i1> @ir_cmpge_s(<vscale x 4 x i32> %a) {
232 ; CHECK-LABEL: ir_cmpge_s:
234 ; CHECK-NEXT: ptrue p0.s
235 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
237 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
238 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
239 %out = icmp sge <vscale x 4 x i32> %a, %splat
240 ret <vscale x 4 x i1> %out
243 define <vscale x 4 x i1> @int_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
244 ; CHECK-LABEL: int_cmpge_s:
246 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
248 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
249 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
250 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
251 <vscale x 4 x i32> %a,
252 <vscale x 4 x i32> %splat)
253 ret <vscale x 4 x i1> %out
256 define <vscale x 4 x i1> @wide_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
257 ; CHECK-LABEL: wide_cmpge_s:
259 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, #15
261 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
262 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
263 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
264 <vscale x 4 x i32> %a,
265 <vscale x 2 x i64> %splat)
266 ret <vscale x 4 x i1> %out
269 define <vscale x 2 x i1> @ir_cmpge_d(<vscale x 2 x i64> %a) {
270 ; CHECK-LABEL: ir_cmpge_d:
272 ; CHECK-NEXT: ptrue p0.d
273 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, #0
275 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
276 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
277 %out = icmp sge <vscale x 2 x i64> %a, %splat
278 ret <vscale x 2 x i1> %out
281 define <vscale x 2 x i1> @int_cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
282 ; CHECK-LABEL: int_cmpge_d:
284 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, #0
286 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
287 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
288 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
289 <vscale x 2 x i64> %a,
290 <vscale x 2 x i64> %splat)
291 ret <vscale x 2 x i1> %out
298 define <vscale x 16 x i1> @ir_cmpgt_b(<vscale x 16 x i8> %a) {
299 ; CHECK-LABEL: ir_cmpgt_b:
301 ; CHECK-NEXT: ptrue p0.b
302 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
304 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
305 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
306 %out = icmp sgt <vscale x 16 x i8> %a, %splat
307 ret <vscale x 16 x i1> %out
310 define <vscale x 16 x i1> @int_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
311 ; CHECK-LABEL: int_cmpgt_b:
313 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
315 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
316 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
317 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
318 <vscale x 16 x i8> %a,
319 <vscale x 16 x i8> %splat)
320 ret <vscale x 16 x i1> %out
323 define <vscale x 16 x i1> @wide_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
324 ; CHECK-LABEL: wide_cmpgt_b:
326 ; CHECK-NEXT: cmpgt p0.b, p0/z, z0.b, #4
328 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
329 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
330 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
331 <vscale x 16 x i8> %a,
332 <vscale x 2 x i64> %splat)
333 ret <vscale x 16 x i1> %out
336 define <vscale x 8 x i1> @ir_cmpgt_h(<vscale x 8 x i16> %a) {
337 ; CHECK-LABEL: ir_cmpgt_h:
339 ; CHECK-NEXT: ptrue p0.h
340 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
342 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
343 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
344 %out = icmp sgt <vscale x 8 x i16> %a, %splat
345 ret <vscale x 8 x i1> %out
348 define <vscale x 8 x i1> @int_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
349 ; CHECK-LABEL: int_cmpgt_h:
351 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
353 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
354 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
355 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
356 <vscale x 8 x i16> %a,
357 <vscale x 8 x i16> %splat)
358 ret <vscale x 8 x i1> %out
361 define <vscale x 8 x i1> @wide_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
362 ; CHECK-LABEL: wide_cmpgt_h:
364 ; CHECK-NEXT: cmpgt p0.h, p0/z, z0.h, #-16
366 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
367 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
368 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %pg,
369 <vscale x 8 x i16> %a,
370 <vscale x 2 x i64> %splat)
371 ret <vscale x 8 x i1> %out
374 define <vscale x 4 x i1> @ir_cmpgt_s(<vscale x 4 x i32> %a) {
375 ; CHECK-LABEL: ir_cmpgt_s:
377 ; CHECK-NEXT: ptrue p0.s
378 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
380 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
381 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
382 %out = icmp sgt <vscale x 4 x i32> %a, %splat
383 ret <vscale x 4 x i1> %out
386 define <vscale x 4 x i1> @int_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
387 ; CHECK-LABEL: int_cmpgt_s:
389 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
391 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
392 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
393 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
394 <vscale x 4 x i32> %a,
395 <vscale x 4 x i32> %splat)
396 ret <vscale x 4 x i1> %out
399 define <vscale x 4 x i1> @wide_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
400 ; CHECK-LABEL: wide_cmpgt_s:
402 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, #15
404 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
405 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
406 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
407 <vscale x 4 x i32> %a,
408 <vscale x 2 x i64> %splat)
409 ret <vscale x 4 x i1> %out
412 define <vscale x 2 x i1> @ir_cmpgt_d(<vscale x 2 x i64> %a) {
413 ; CHECK-LABEL: ir_cmpgt_d:
415 ; CHECK-NEXT: ptrue p0.d
416 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, #0
418 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
419 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
420 %out = icmp sgt <vscale x 2 x i64> %a, %splat
421 ret <vscale x 2 x i1> %out
424 define <vscale x 2 x i1> @int_cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
425 ; CHECK-LABEL: int_cmpgt_d:
427 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, #0
429 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
430 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
431 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
432 <vscale x 2 x i64> %a,
433 <vscale x 2 x i64> %splat)
434 ret <vscale x 2 x i1> %out
441 define <vscale x 16 x i1> @ir_cmple_b(<vscale x 16 x i8> %a) {
442 ; CHECK-LABEL: ir_cmple_b:
444 ; CHECK-NEXT: ptrue p0.b
445 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
447 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
448 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
449 %out = icmp sle <vscale x 16 x i8> %a, %splat
450 ret <vscale x 16 x i1> %out
453 define <vscale x 16 x i1> @int_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
454 ; CHECK-LABEL: int_cmple_b:
456 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
458 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
459 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
460 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1> %pg,
461 <vscale x 16 x i8> %splat,
462 <vscale x 16 x i8> %a)
463 ret <vscale x 16 x i1> %out
466 define <vscale x 16 x i1> @wide_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
467 ; CHECK-LABEL: wide_cmple_b:
469 ; CHECK-NEXT: cmple p0.b, p0/z, z0.b, #4
471 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
472 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
473 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg,
474 <vscale x 16 x i8> %a,
475 <vscale x 2 x i64> %splat)
476 ret <vscale x 16 x i1> %out
479 define <vscale x 8 x i1> @ir_cmple_h(<vscale x 8 x i16> %a) {
480 ; CHECK-LABEL: ir_cmple_h:
482 ; CHECK-NEXT: ptrue p0.h
483 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
485 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
486 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
487 %out = icmp sle <vscale x 8 x i16> %a, %splat
488 ret <vscale x 8 x i1> %out
491 define <vscale x 8 x i1> @int_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
492 ; CHECK-LABEL: int_cmple_h:
494 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
496 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
497 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
498 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1> %pg,
499 <vscale x 8 x i16> %splat,
500 <vscale x 8 x i16> %a)
501 ret <vscale x 8 x i1> %out
504 define <vscale x 8 x i1> @wide_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
505 ; CHECK-LABEL: wide_cmple_h:
507 ; CHECK-NEXT: cmple p0.h, p0/z, z0.h, #-16
509 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
510 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
511 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %pg,
512 <vscale x 8 x i16> %a,
513 <vscale x 2 x i64> %splat)
514 ret <vscale x 8 x i1> %out
517 define <vscale x 4 x i1> @ir_cmple_s(<vscale x 4 x i32> %a) {
518 ; CHECK-LABEL: ir_cmple_s:
520 ; CHECK-NEXT: ptrue p0.s
521 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
523 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
524 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
525 %out = icmp sle <vscale x 4 x i32> %a, %splat
526 ret <vscale x 4 x i1> %out
529 define <vscale x 4 x i1> @int_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
530 ; CHECK-LABEL: int_cmple_s:
532 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
534 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
535 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
536 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1> %pg,
537 <vscale x 4 x i32> %splat,
538 <vscale x 4 x i32> %a)
539 ret <vscale x 4 x i1> %out
542 define <vscale x 4 x i1> @wide_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
543 ; CHECK-LABEL: wide_cmple_s:
545 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, #15
547 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
548 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
549 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
550 <vscale x 4 x i32> %a,
551 <vscale x 2 x i64> %splat)
552 ret <vscale x 4 x i1> %out
555 define <vscale x 2 x i1> @ir_cmple_d(<vscale x 2 x i64> %a) {
556 ; CHECK-LABEL: ir_cmple_d:
558 ; CHECK-NEXT: ptrue p0.d
559 ; CHECK-NEXT: cmple p0.d, p0/z, z0.d, #0
561 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
562 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
563 %out = icmp sle <vscale x 2 x i64> %a, %splat
564 ret <vscale x 2 x i1> %out
567 define <vscale x 2 x i1> @int_cmple_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
568 ; CHECK-LABEL: int_cmple_d:
570 ; CHECK-NEXT: cmple p0.d, p0/z, z0.d, #0
572 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
573 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
574 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
575 <vscale x 2 x i64> %splat,
576 <vscale x 2 x i64> %a)
577 ret <vscale x 2 x i1> %out
584 define <vscale x 16 x i1> @ir_cmplt_b(<vscale x 16 x i8> %a) {
585 ; CHECK-LABEL: ir_cmplt_b:
587 ; CHECK-NEXT: ptrue p0.b
588 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
590 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
591 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
592 %out = icmp slt <vscale x 16 x i8> %a, %splat
593 ret <vscale x 16 x i1> %out
596 define <vscale x 16 x i1> @int_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
597 ; CHECK-LABEL: int_cmplt_b:
599 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
601 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
602 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
603 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1> %pg,
604 <vscale x 16 x i8> %splat,
605 <vscale x 16 x i8> %a)
606 ret <vscale x 16 x i1> %out
609 define <vscale x 16 x i1> @wide_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
610 ; CHECK-LABEL: wide_cmplt_b:
612 ; CHECK-NEXT: cmplt p0.b, p0/z, z0.b, #4
614 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
615 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
616 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg,
617 <vscale x 16 x i8> %a,
618 <vscale x 2 x i64> %splat)
619 ret <vscale x 16 x i1> %out
622 define <vscale x 8 x i1> @ir_cmplt_h(<vscale x 8 x i16> %a) {
623 ; CHECK-LABEL: ir_cmplt_h:
625 ; CHECK-NEXT: ptrue p0.h
626 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
628 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
629 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
630 %out = icmp slt <vscale x 8 x i16> %a, %splat
631 ret <vscale x 8 x i1> %out
634 define <vscale x 8 x i1> @int_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
635 ; CHECK-LABEL: int_cmplt_h:
637 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
639 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
640 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
641 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1> %pg,
642 <vscale x 8 x i16> %splat,
643 <vscale x 8 x i16> %a)
644 ret <vscale x 8 x i1> %out
647 define <vscale x 8 x i1> @wide_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
648 ; CHECK-LABEL: wide_cmplt_h:
650 ; CHECK-NEXT: cmplt p0.h, p0/z, z0.h, #-16
652 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
653 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
654 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %pg,
655 <vscale x 8 x i16> %a,
656 <vscale x 2 x i64> %splat)
657 ret <vscale x 8 x i1> %out
660 define <vscale x 4 x i1> @ir_cmplt_s(<vscale x 4 x i32> %a) {
661 ; CHECK-LABEL: ir_cmplt_s:
663 ; CHECK-NEXT: ptrue p0.s
664 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
666 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
667 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
668 %out = icmp slt <vscale x 4 x i32> %a, %splat
669 ret <vscale x 4 x i1> %out
672 define <vscale x 4 x i1> @int_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
673 ; CHECK-LABEL: int_cmplt_s:
675 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
677 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
678 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
679 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1> %pg,
680 <vscale x 4 x i32> %splat,
681 <vscale x 4 x i32> %a)
682 ret <vscale x 4 x i1> %out
685 define <vscale x 4 x i1> @wide_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
686 ; CHECK-LABEL: wide_cmplt_s:
688 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, #15
690 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
691 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
692 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
693 <vscale x 4 x i32> %a,
694 <vscale x 2 x i64> %splat)
695 ret <vscale x 4 x i1> %out
698 define <vscale x 2 x i1> @ir_cmplt_d(<vscale x 2 x i64> %a) {
699 ; CHECK-LABEL: ir_cmplt_d:
701 ; CHECK-NEXT: ptrue p0.d
702 ; CHECK-NEXT: cmplt p0.d, p0/z, z0.d, #0
704 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
705 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
706 %out = icmp slt <vscale x 2 x i64> %a, %splat
707 ret <vscale x 2 x i1> %out
710 define <vscale x 2 x i1> @int_cmplt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
711 ; CHECK-LABEL: int_cmplt_d:
713 ; CHECK-NEXT: cmplt p0.d, p0/z, z0.d, #0
715 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
716 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
717 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
718 <vscale x 2 x i64> %splat,
719 <vscale x 2 x i64> %a)
720 ret <vscale x 2 x i1> %out
727 define <vscale x 16 x i1> @ir_cmpne_b(<vscale x 16 x i8> %a) {
728 ; CHECK-LABEL: ir_cmpne_b:
730 ; CHECK-NEXT: ptrue p0.b
731 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
733 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
734 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
735 %out = icmp ne <vscale x 16 x i8> %a, %splat
736 ret <vscale x 16 x i1> %out
739 define <vscale x 16 x i1> @int_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
740 ; CHECK-LABEL: int_cmpne_b:
742 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
744 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
745 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
746 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1> %pg,
747 <vscale x 16 x i8> %a,
748 <vscale x 16 x i8> %splat)
749 ret <vscale x 16 x i1> %out
752 define <vscale x 16 x i1> @wide_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
753 ; CHECK-LABEL: wide_cmpne_b:
755 ; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #4
757 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
758 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
759 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg,
760 <vscale x 16 x i8> %a,
761 <vscale x 2 x i64> %splat)
762 ret <vscale x 16 x i1> %out
765 define <vscale x 8 x i1> @ir_cmpne_h(<vscale x 8 x i16> %a) {
766 ; CHECK-LABEL: ir_cmpne_h:
768 ; CHECK-NEXT: ptrue p0.h
769 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
771 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
772 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
773 %out = icmp ne <vscale x 8 x i16> %a, %splat
774 ret <vscale x 8 x i1> %out
777 define <vscale x 8 x i1> @int_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
778 ; CHECK-LABEL: int_cmpne_h:
780 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
782 %elt = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
783 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
784 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1> %pg,
785 <vscale x 8 x i16> %a,
786 <vscale x 8 x i16> %splat)
787 ret <vscale x 8 x i1> %out
790 define <vscale x 8 x i1> @wide_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
791 ; CHECK-LABEL: wide_cmpne_h:
793 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #-16
795 %elt = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
796 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
797 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %pg,
798 <vscale x 8 x i16> %a,
799 <vscale x 2 x i64> %splat)
800 ret <vscale x 8 x i1> %out
803 define <vscale x 4 x i1> @ir_cmpne_s(<vscale x 4 x i32> %a) {
804 ; CHECK-LABEL: ir_cmpne_s:
806 ; CHECK-NEXT: ptrue p0.s
807 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
809 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
810 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
811 %out = icmp ne <vscale x 4 x i32> %a, %splat
812 ret <vscale x 4 x i1> %out
815 define <vscale x 4 x i1> @int_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
816 ; CHECK-LABEL: int_cmpne_s:
818 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
820 %elt = insertelement <vscale x 4 x i32> undef, i32 15, i32 0
821 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
822 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1> %pg,
823 <vscale x 4 x i32> %a,
824 <vscale x 4 x i32> %splat)
825 ret <vscale x 4 x i1> %out
828 define <vscale x 4 x i1> @wide_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
829 ; CHECK-LABEL: wide_cmpne_s:
831 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #15
833 %elt = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
834 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
835 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
836 <vscale x 4 x i32> %a,
837 <vscale x 2 x i64> %splat)
838 ret <vscale x 4 x i1> %out
841 define <vscale x 2 x i1> @ir_cmpne_d(<vscale x 2 x i64> %a) {
842 ; CHECK-LABEL: ir_cmpne_d:
844 ; CHECK-NEXT: ptrue p0.d
845 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
847 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
848 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
849 %out = icmp ne <vscale x 2 x i64> %a, %splat
850 ret <vscale x 2 x i1> %out
853 define <vscale x 2 x i1> @int_cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
854 ; CHECK-LABEL: int_cmpne_d:
856 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
858 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
859 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
860 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
861 <vscale x 2 x i64> %a,
862 <vscale x 2 x i64> %splat)
863 ret <vscale x 2 x i1> %out
866 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
867 ;; Unsigned Comparisons ;;
868 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
874 define <vscale x 16 x i1> @ir_cmphi_b(<vscale x 16 x i8> %a) {
875 ; CHECK-LABEL: ir_cmphi_b:
877 ; CHECK-NEXT: ptrue p0.b
878 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
880 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
881 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
882 %out = icmp ugt <vscale x 16 x i8> %a, %splat
883 ret <vscale x 16 x i1> %out
886 define <vscale x 16 x i1> @int_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
887 ; CHECK-LABEL: int_cmphi_b:
889 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
891 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
892 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
893 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
894 <vscale x 16 x i8> %a,
895 <vscale x 16 x i8> %splat)
896 ret <vscale x 16 x i1> %out
899 define <vscale x 16 x i1> @wide_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
900 ; CHECK-LABEL: wide_cmphi_b:
902 ; CHECK-NEXT: cmphi p0.b, p0/z, z0.b, #4
904 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
905 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
906 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg,
907 <vscale x 16 x i8> %a,
908 <vscale x 2 x i64> %splat)
909 ret <vscale x 16 x i1> %out
912 define <vscale x 8 x i1> @ir_cmphi_h(<vscale x 8 x i16> %a) {
913 ; CHECK-LABEL: ir_cmphi_h:
915 ; CHECK-NEXT: ptrue p0.h
916 ; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
918 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
919 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
920 %out = icmp ugt <vscale x 8 x i16> %a, %splat
921 ret <vscale x 8 x i1> %out
924 define <vscale x 8 x i1> @int_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
925 ; CHECK-LABEL: int_cmphi_h:
927 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, #0
929 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
930 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
931 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
932 <vscale x 8 x i16> %a,
933 <vscale x 8 x i16> %splat)
934 ret <vscale x 8 x i1> %out
937 define <vscale x 8 x i1> @wide_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
938 ; CHECK-LABEL: wide_cmphi_h:
940 ; CHECK-NEXT: cmphi p0.h, p0/z, z0.h, #0
942 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
943 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
944 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %pg,
945 <vscale x 8 x i16> %a,
946 <vscale x 2 x i64> %splat)
947 ret <vscale x 8 x i1> %out
950 define <vscale x 4 x i1> @ir_cmphi_s(<vscale x 4 x i32> %a) {
951 ; CHECK-LABEL: ir_cmphi_s:
953 ; CHECK-NEXT: ptrue p0.s
954 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
956 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
957 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
958 %out = icmp ugt <vscale x 4 x i32> %a, %splat
959 ret <vscale x 4 x i1> %out
962 define <vscale x 4 x i1> @int_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
963 ; CHECK-LABEL: int_cmphi_s:
965 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
967 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
968 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
969 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
970 <vscale x 4 x i32> %a,
971 <vscale x 4 x i32> %splat)
972 ret <vscale x 4 x i1> %out
975 define <vscale x 4 x i1> @wide_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
976 ; CHECK-LABEL: wide_cmphi_s:
978 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, #68
980 %elt = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
981 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
982 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
983 <vscale x 4 x i32> %a,
984 <vscale x 2 x i64> %splat)
985 ret <vscale x 4 x i1> %out
988 define <vscale x 2 x i1> @ir_cmphi_d(<vscale x 2 x i64> %a) {
989 ; CHECK-LABEL: ir_cmphi_d:
991 ; CHECK-NEXT: ptrue p0.d
992 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, #127
994 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
995 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
996 %out = icmp ugt <vscale x 2 x i64> %a, %splat
997 ret <vscale x 2 x i1> %out
1000 define <vscale x 2 x i1> @int_cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1001 ; CHECK-LABEL: int_cmphi_d:
1003 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, #127
1005 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1006 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1007 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
1008 <vscale x 2 x i64> %a,
1009 <vscale x 2 x i64> %splat)
1010 ret <vscale x 2 x i1> %out
1017 define <vscale x 16 x i1> @ir_cmphs_b(<vscale x 16 x i8> %a) {
1018 ; CHECK-LABEL: ir_cmphs_b:
1020 ; CHECK-NEXT: ptrue p0.b
1021 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
1023 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1024 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1025 %out = icmp uge <vscale x 16 x i8> %a, %splat
1026 ret <vscale x 16 x i1> %out
1029 define <vscale x 16 x i1> @int_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1030 ; CHECK-LABEL: int_cmphs_b:
1032 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
1034 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1035 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1036 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
1037 <vscale x 16 x i8> %a,
1038 <vscale x 16 x i8> %splat)
1039 ret <vscale x 16 x i1> %out
1042 define <vscale x 16 x i1> @wide_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1043 ; CHECK-LABEL: wide_cmphs_b:
1045 ; CHECK-NEXT: cmphs p0.b, p0/z, z0.b, #4
1047 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
1048 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1049 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg,
1050 <vscale x 16 x i8> %a,
1051 <vscale x 2 x i64> %splat)
1052 ret <vscale x 16 x i1> %out
1055 define <vscale x 8 x i1> @ir_cmphs_h(<vscale x 8 x i16> %a) {
1056 ; CHECK-LABEL: ir_cmphs_h:
1058 ; CHECK-NEXT: ptrue p0.h
1060 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
1061 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1062 %out = icmp uge <vscale x 8 x i16> %a, %splat
1063 ret <vscale x 8 x i1> %out
1066 define <vscale x 8 x i1> @int_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1067 ; CHECK-LABEL: int_cmphs_h:
1069 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, #0
1071 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
1072 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1073 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
1074 <vscale x 8 x i16> %a,
1075 <vscale x 8 x i16> %splat)
1076 ret <vscale x 8 x i1> %out
1079 define <vscale x 8 x i1> @wide_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1080 ; CHECK-LABEL: wide_cmphs_h:
1082 ; CHECK-NEXT: cmphs p0.h, p0/z, z0.h, #0
1084 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
1085 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1086 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %pg,
1087 <vscale x 8 x i16> %a,
1088 <vscale x 2 x i64> %splat)
1089 ret <vscale x 8 x i1> %out
1092 define <vscale x 4 x i1> @ir_cmphs_s(<vscale x 4 x i32> %a) {
1093 ; CHECK-LABEL: ir_cmphs_s:
1095 ; CHECK-NEXT: ptrue p0.s
1096 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
1098 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1099 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1100 %out = icmp uge <vscale x 4 x i32> %a, %splat
1101 ret <vscale x 4 x i1> %out
1104 define <vscale x 4 x i1> @int_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1105 ; CHECK-LABEL: int_cmphs_s:
1107 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
1109 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1110 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1111 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
1112 <vscale x 4 x i32> %a,
1113 <vscale x 4 x i32> %splat)
1114 ret <vscale x 4 x i1> %out
1117 define <vscale x 4 x i1> @wide_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1118 ; CHECK-LABEL: wide_cmphs_s:
1120 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, #68
1122 %elt = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
1123 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1124 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
1125 <vscale x 4 x i32> %a,
1126 <vscale x 2 x i64> %splat)
1127 ret <vscale x 4 x i1> %out
1130 define <vscale x 2 x i1> @ir_cmphs_d(<vscale x 2 x i64> %a) {
1131 ; CHECK-LABEL: ir_cmphs_d:
1133 ; CHECK-NEXT: ptrue p0.d
1134 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, #127
1136 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1137 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1138 %out = icmp uge <vscale x 2 x i64> %a, %splat
1139 ret <vscale x 2 x i1> %out
1142 define <vscale x 2 x i1> @int_cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1143 ; CHECK-LABEL: int_cmphs_d:
1145 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, #127
1147 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1148 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1149 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
1150 <vscale x 2 x i64> %a,
1151 <vscale x 2 x i64> %splat)
1152 ret <vscale x 2 x i1> %out
1159 define <vscale x 16 x i1> @ir_cmplo_b(<vscale x 16 x i8> %a) {
1160 ; CHECK-LABEL: ir_cmplo_b:
1162 ; CHECK-NEXT: ptrue p0.b
1163 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
1165 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1166 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1167 %out = icmp ult <vscale x 16 x i8> %a, %splat
1168 ret <vscale x 16 x i1> %out
1171 define <vscale x 16 x i1> @int_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1172 ; CHECK-LABEL: int_cmplo_b:
1174 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
1176 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1177 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1178 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1> %pg,
1179 <vscale x 16 x i8> %splat,
1180 <vscale x 16 x i8> %a)
1181 ret <vscale x 16 x i1> %out
1184 define <vscale x 16 x i1> @wide_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1185 ; CHECK-LABEL: wide_cmplo_b:
1187 ; CHECK-NEXT: cmplo p0.b, p0/z, z0.b, #4
1189 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
1190 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1191 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg,
1192 <vscale x 16 x i8> %a,
1193 <vscale x 2 x i64> %splat)
1194 ret <vscale x 16 x i1> %out
1197 define <vscale x 8 x i1> @ir_cmplo_h(<vscale x 8 x i16> %a) {
1198 ; CHECK-LABEL: ir_cmplo_h:
1200 ; CHECK-NEXT: ptrue p0.h
1201 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #2
1203 %elt = insertelement <vscale x 8 x i16> undef, i16 2, i32 0
1204 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1205 %out = icmp ult <vscale x 8 x i16> %a, %splat
1206 ret <vscale x 8 x i1> %out
1209 define <vscale x 8 x i1> @int_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1210 ; CHECK-LABEL: int_cmplo_h:
1212 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #3
1214 %elt = insertelement <vscale x 8 x i16> undef, i16 3, i32 0
1215 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1216 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1> %pg,
1217 <vscale x 8 x i16> %splat,
1218 <vscale x 8 x i16> %a)
1219 ret <vscale x 8 x i1> %out
1222 define <vscale x 8 x i1> @wide_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1223 ; CHECK-LABEL: wide_cmplo_h:
1225 ; CHECK-NEXT: cmplo p0.h, p0/z, z0.h, #4
1227 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
1228 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1229 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg,
1230 <vscale x 8 x i16> %a,
1231 <vscale x 2 x i64> %splat)
1232 ret <vscale x 8 x i1> %out
1235 define <vscale x 4 x i1> @ir_cmplo_s(<vscale x 4 x i32> %a) {
1236 ; CHECK-LABEL: ir_cmplo_s:
1238 ; CHECK-NEXT: ptrue p0.s
1239 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1241 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1242 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1243 %out = icmp ult <vscale x 4 x i32> %a, %splat
1244 ret <vscale x 4 x i1> %out
1247 define <vscale x 4 x i1> @int_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1248 ; CHECK-LABEL: int_cmplo_s:
1250 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1252 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1253 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1254 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1> %pg,
1255 <vscale x 4 x i32> %splat,
1256 <vscale x 4 x i32> %a)
1257 ret <vscale x 4 x i1> %out
1260 define <vscale x 4 x i1> @wide_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1261 ; CHECK-LABEL: wide_cmplo_s:
1263 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, #68
1265 %elt = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
1266 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1267 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
1268 <vscale x 4 x i32> %a,
1269 <vscale x 2 x i64> %splat)
1270 ret <vscale x 4 x i1> %out
1273 define <vscale x 2 x i1> @ir_cmplo_d(<vscale x 2 x i64> %a) {
1274 ; CHECK-LABEL: ir_cmplo_d:
1276 ; CHECK-NEXT: ptrue p0.d
1277 ; CHECK-NEXT: cmplo p0.d, p0/z, z0.d, #127
1279 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1280 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1281 %out = icmp ult <vscale x 2 x i64> %a, %splat
1282 ret <vscale x 2 x i1> %out
1285 define <vscale x 2 x i1> @int_cmplo_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1286 ; CHECK-LABEL: int_cmplo_d:
1288 ; CHECK-NEXT: cmplo p0.d, p0/z, z0.d, #127
1290 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1291 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1292 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
1293 <vscale x 2 x i64> %splat,
1294 <vscale x 2 x i64> %a)
1295 ret <vscale x 2 x i1> %out
1302 define <vscale x 16 x i1> @ir_cmpls_b(<vscale x 16 x i8> %a) {
1303 ; CHECK-LABEL: ir_cmpls_b:
1305 ; CHECK-NEXT: ptrue p0.b
1306 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1308 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1309 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1310 %out = icmp ule <vscale x 16 x i8> %a, %splat
1311 ret <vscale x 16 x i1> %out
1314 define <vscale x 16 x i1> @int_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1315 ; CHECK-LABEL: int_cmpls_b:
1317 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1319 %elt = insertelement <vscale x 16 x i8> undef, i8 4, i32 0
1320 %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1321 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1> %pg,
1322 <vscale x 16 x i8> %splat,
1323 <vscale x 16 x i8> %a)
1324 ret <vscale x 16 x i1> %out
1327 define <vscale x 16 x i1> @wide_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1328 ; CHECK-LABEL: wide_cmpls_b:
1330 ; CHECK-NEXT: cmpls p0.b, p0/z, z0.b, #4
1332 %elt = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
1333 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1334 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg,
1335 <vscale x 16 x i8> %a,
1336 <vscale x 2 x i64> %splat)
1337 ret <vscale x 16 x i1> %out
1340 define <vscale x 8 x i1> @ir_cmpls_h(<vscale x 8 x i16> %a) {
1341 ; CHECK-LABEL: ir_cmpls_h:
1343 ; CHECK-NEXT: ptrue p0.h
1344 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1346 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
1347 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1348 %out = icmp ule <vscale x 8 x i16> %a, %splat
1349 ret <vscale x 8 x i1> %out
1352 define <vscale x 8 x i1> @int_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1353 ; CHECK-LABEL: int_cmpls_h:
1355 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1357 %elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
1358 %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1359 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1> %pg,
1360 <vscale x 8 x i16> %splat,
1361 <vscale x 8 x i16> %a)
1362 ret <vscale x 8 x i1> %out
1365 define <vscale x 8 x i1> @wide_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1366 ; CHECK-LABEL: wide_cmpls_h:
1368 ; CHECK-NEXT: cmpls p0.h, p0/z, z0.h, #0
1370 %elt = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
1371 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1372 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %pg,
1373 <vscale x 8 x i16> %a,
1374 <vscale x 2 x i64> %splat)
1375 ret <vscale x 8 x i1> %out
1378 define <vscale x 4 x i1> @ir_cmpls_s(<vscale x 4 x i32> %a) {
1379 ; CHECK-LABEL: ir_cmpls_s:
1381 ; CHECK-NEXT: ptrue p0.s
1382 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1384 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1385 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1386 %out = icmp ule <vscale x 4 x i32> %a, %splat
1387 ret <vscale x 4 x i1> %out
1390 define <vscale x 4 x i1> @int_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1391 ; CHECK-LABEL: int_cmpls_s:
1393 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1395 %elt = insertelement <vscale x 4 x i32> undef, i32 68, i32 0
1396 %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1397 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1> %pg,
1398 <vscale x 4 x i32> %splat,
1399 <vscale x 4 x i32> %a)
1400 ret <vscale x 4 x i1> %out
1403 define <vscale x 4 x i1> @wide_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1404 ; CHECK-LABEL: wide_cmpls_s:
1406 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, #68
1408 %elt = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
1409 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1410 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
1411 <vscale x 4 x i32> %a,
1412 <vscale x 2 x i64> %splat)
1413 ret <vscale x 4 x i1> %out
1416 define <vscale x 2 x i1> @ir_cmpls_d(<vscale x 2 x i64> %a) {
1417 ; CHECK-LABEL: ir_cmpls_d:
1419 ; CHECK-NEXT: ptrue p0.d
1420 ; CHECK-NEXT: cmpls p0.d, p0/z, z0.d, #127
1422 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1423 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1424 %out = icmp ule <vscale x 2 x i64> %a, %splat
1425 ret <vscale x 2 x i1> %out
1428 define <vscale x 2 x i1> @int_cmpls_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1429 ; CHECK-LABEL: int_cmpls_d:
1431 ; CHECK-NEXT: cmpls p0.d, p0/z, z0.d, #127
1433 %elt = insertelement <vscale x 2 x i64> undef, i64 127, i32 0
1434 %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1435 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
1436 <vscale x 2 x i64> %splat,
1437 <vscale x 2 x i64> %a)
1438 ret <vscale x 2 x i1> %out
1441 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1442 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1443 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1444 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1445 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1446 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1447 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1449 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1450 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1451 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1452 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1453 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1454 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1455 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1457 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1458 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1459 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1460 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1461 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1462 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1463 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1465 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1466 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1467 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1468 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1469 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1470 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1471 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1473 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1474 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1475 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1476 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1477 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1478 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1479 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1481 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1482 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1483 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1485 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1486 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1487 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1489 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1490 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1491 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1493 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1494 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1495 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
1497 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1498 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1499 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1500 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1501 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
1502 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
1503 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)