1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s
9 define <vscale x 16 x i1> @whilele_b_ww(i32 %a, i32 %b) {
10 ; CHECK-LABEL: whilele_b_ww:
12 ; CHECK-NEXT: whilele p0.b, w0, w1
14 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %a, i32 %b)
15 ret <vscale x 16 x i1> %out
18 define <vscale x 16 x i1> @whilele_b_xx(i64 %a, i64 %b) {
19 ; CHECK-LABEL: whilele_b_xx:
21 ; CHECK-NEXT: whilele p0.b, x0, x1
23 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %a, i64 %b)
24 ret <vscale x 16 x i1> %out
27 define <vscale x 8 x i1> @whilele_h_ww(i32 %a, i32 %b) {
28 ; CHECK-LABEL: whilele_h_ww:
30 ; CHECK-NEXT: whilele p0.h, w0, w1
32 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %a, i32 %b)
33 ret <vscale x 8 x i1> %out
36 define <vscale x 8 x i1> @whilele_h_xx(i64 %a, i64 %b) {
37 ; CHECK-LABEL: whilele_h_xx:
39 ; CHECK-NEXT: whilele p0.h, x0, x1
41 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %a, i64 %b)
42 ret <vscale x 8 x i1> %out
45 define <vscale x 4 x i1> @whilele_s_ww(i32 %a, i32 %b) {
46 ; CHECK-LABEL: whilele_s_ww:
48 ; CHECK-NEXT: whilele p0.s, w0, w1
50 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %a, i32 %b)
51 ret <vscale x 4 x i1> %out
54 define <vscale x 4 x i1> @whilele_s_xx(i64 %a, i64 %b) {
55 ; CHECK-LABEL: whilele_s_xx:
57 ; CHECK-NEXT: whilele p0.s, x0, x1
59 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %a, i64 %b)
60 ret <vscale x 4 x i1> %out
63 define <vscale x 2 x i1> @whilele_d_ww(i32 %a, i32 %b) {
64 ; CHECK-LABEL: whilele_d_ww:
66 ; CHECK-NEXT: whilele p0.d, w0, w1
68 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b)
69 ret <vscale x 2 x i1> %out
72 define <vscale x 2 x i1> @whilele_d_xx(i64 %a, i64 %b) {
73 ; CHECK-LABEL: whilele_d_xx:
75 ; CHECK-NEXT: whilele p0.d, x0, x1
77 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %a, i64 %b)
78 ret <vscale x 2 x i1> %out
81 define <vscale x 2 x i1> @whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
82 ; CHECK-LABEL: whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec:
84 ; CHECK-NEXT: mov w8, #3 // =0x3
85 ; CHECK-NEXT: whilele p0.d, xzr, x8
87 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 0, i64 3)
88 ret <vscale x 2 x i1> %out
91 define <vscale x 16 x i1> @whilele_b_ii() {
92 ; CHECK-LABEL: whilele_b_ii:
94 ; CHECK-NEXT: ptrue p0.b, vl6
96 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 -2, i64 3)
97 ret <vscale x 16 x i1> %out
100 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
101 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
103 ; CHECK-NEXT: mov w8, #9 // =0x9
104 ; CHECK-NEXT: whilele p0.b, xzr, x8
106 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 0, i64 9)
107 ret <vscale x 16 x i1> %out
110 define <vscale x 16 x i1> @whilele_b_vl_maximum() vscale_range(16, 16) {
111 ; CHECK-LABEL: whilele_b_vl_maximum:
113 ; CHECK-NEXT: ptrue p0.b, vl256
115 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 0, i64 255)
116 ret <vscale x 16 x i1> %out
119 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_overflow() {
120 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_overflow:
122 ; CHECK-NEXT: mov w8, #2 // =0x2
123 ; CHECK-NEXT: mov w9, #2147483647 // =0x7fffffff
124 ; CHECK-NEXT: movk w8, #32768, lsl #16
125 ; CHECK-NEXT: whilele p0.b, w9, w8
127 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 2147483647, i32 -2147483646)
128 ret <vscale x 16 x i1> %out
131 define <vscale x 16 x i1> @whilele_b_ii_known_always_true() {
132 ; CHECK-LABEL: whilele_b_ii_known_always_true:
134 ; CHECK-NEXT: ptrue p0.b
136 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 2147483646, i32 2147483647)
137 ret <vscale x 16 x i1> %out
144 define <vscale x 16 x i1> @whilelo_b_ww(i32 %a, i32 %b) {
145 ; CHECK-LABEL: whilelo_b_ww:
147 ; CHECK-NEXT: whilelo p0.b, w0, w1
149 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b)
150 ret <vscale x 16 x i1> %out
153 define <vscale x 16 x i1> @whilelo_b_xx(i64 %a, i64 %b) {
154 ; CHECK-LABEL: whilelo_b_xx:
156 ; CHECK-NEXT: whilelo p0.b, x0, x1
158 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %a, i64 %b)
159 ret <vscale x 16 x i1> %out
162 define <vscale x 8 x i1> @whilelo_h_ww(i32 %a, i32 %b) {
163 ; CHECK-LABEL: whilelo_h_ww:
165 ; CHECK-NEXT: whilelo p0.h, w0, w1
167 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %a, i32 %b)
168 ret <vscale x 8 x i1> %out
171 define <vscale x 8 x i1> @whilelo_h_xx(i64 %a, i64 %b) {
172 ; CHECK-LABEL: whilelo_h_xx:
174 ; CHECK-NEXT: whilelo p0.h, x0, x1
176 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %a, i64 %b)
177 ret <vscale x 8 x i1> %out
180 define <vscale x 4 x i1> @whilelo_s_ww(i32 %a, i32 %b) {
181 ; CHECK-LABEL: whilelo_s_ww:
183 ; CHECK-NEXT: whilelo p0.s, w0, w1
185 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b)
186 ret <vscale x 4 x i1> %out
189 define <vscale x 4 x i1> @whilelo_s_xx(i64 %a, i64 %b) {
190 ; CHECK-LABEL: whilelo_s_xx:
192 ; CHECK-NEXT: whilelo p0.s, x0, x1
194 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b)
195 ret <vscale x 4 x i1> %out
198 define <vscale x 2 x i1> @whilelo_d_ww(i32 %a, i32 %b) {
199 ; CHECK-LABEL: whilelo_d_ww:
201 ; CHECK-NEXT: whilelo p0.d, w0, w1
203 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b)
204 ret <vscale x 2 x i1> %out
207 define <vscale x 2 x i1> @whilelo_d_xx(i64 %a, i64 %b) {
208 ; CHECK-LABEL: whilelo_d_xx:
210 ; CHECK-NEXT: whilelo p0.d, x0, x1
212 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %a, i64 %b)
213 ret <vscale x 2 x i1> %out
216 define <vscale x 2 x i1> @whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
217 ; CHECK-LABEL: whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec:
219 ; CHECK-NEXT: mov w8, #3 // =0x3
220 ; CHECK-NEXT: whilelo p0.d, xzr, x8
222 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 0, i64 3)
223 ret <vscale x 2 x i1> %out
226 define <vscale x 16 x i1> @whilelo_b_ii() {
227 ; CHECK-LABEL: whilelo_b_ii:
229 ; CHECK-NEXT: ptrue p0.b, vl6
231 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 2, i64 8)
232 ret <vscale x 16 x i1> %out
235 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
236 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
238 ; CHECK-NEXT: mov w8, #9 // =0x9
239 ; CHECK-NEXT: whilelo p0.b, xzr, x8
241 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 0, i64 9)
242 ret <vscale x 16 x i1> %out
245 define <vscale x 16 x i1> @whilelo_b_vl_maximum() vscale_range(16, 16) {
246 ; CHECK-LABEL: whilelo_b_vl_maximum:
248 ; CHECK-NEXT: ptrue p0.b, vl256
250 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 0, i64 256)
251 ret <vscale x 16 x i1> %out
254 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_overflow() {
255 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_overflow:
257 ; CHECK-NEXT: mov w8, #6 // =0x6
258 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
259 ; CHECK-NEXT: whilelo p0.b, w9, w8
261 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 4294967295, i32 6)
262 ret <vscale x 16 x i1> %out
269 define <vscale x 16 x i1> @whilels_b_ww(i32 %a, i32 %b) {
270 ; CHECK-LABEL: whilels_b_ww:
272 ; CHECK-NEXT: whilels p0.b, w0, w1
274 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %a, i32 %b)
275 ret <vscale x 16 x i1> %out
278 define <vscale x 16 x i1> @whilels_b_xx(i64 %a, i64 %b) {
279 ; CHECK-LABEL: whilels_b_xx:
281 ; CHECK-NEXT: whilels p0.b, x0, x1
283 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %a, i64 %b)
284 ret <vscale x 16 x i1> %out
287 define <vscale x 8 x i1> @whilels_h_ww(i32 %a, i32 %b) {
288 ; CHECK-LABEL: whilels_h_ww:
290 ; CHECK-NEXT: whilels p0.h, w0, w1
292 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %a, i32 %b)
293 ret <vscale x 8 x i1> %out
296 define <vscale x 8 x i1> @whilels_h_xx(i64 %a, i64 %b) {
297 ; CHECK-LABEL: whilels_h_xx:
299 ; CHECK-NEXT: whilels p0.h, x0, x1
301 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %a, i64 %b)
302 ret <vscale x 8 x i1> %out
305 define <vscale x 4 x i1> @whilels_s_ww(i32 %a, i32 %b) {
306 ; CHECK-LABEL: whilels_s_ww:
308 ; CHECK-NEXT: whilels p0.s, w0, w1
310 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %a, i32 %b)
311 ret <vscale x 4 x i1> %out
314 define <vscale x 4 x i1> @whilels_s_xx(i64 %a, i64 %b) {
315 ; CHECK-LABEL: whilels_s_xx:
317 ; CHECK-NEXT: whilels p0.s, x0, x1
319 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %a, i64 %b)
320 ret <vscale x 4 x i1> %out
323 define <vscale x 2 x i1> @whilels_d_ww(i32 %a, i32 %b) {
324 ; CHECK-LABEL: whilels_d_ww:
326 ; CHECK-NEXT: whilels p0.d, w0, w1
328 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b)
329 ret <vscale x 2 x i1> %out
332 define <vscale x 2 x i1> @whilels_d_xx(i64 %a, i64 %b) {
333 ; CHECK-LABEL: whilels_d_xx:
335 ; CHECK-NEXT: whilels p0.d, x0, x1
337 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %a, i64 %b)
338 ret <vscale x 2 x i1> %out
341 define <vscale x 2 x i1> @whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
342 ; CHECK-LABEL: whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec:
344 ; CHECK-NEXT: mov w8, #3 // =0x3
345 ; CHECK-NEXT: whilels p0.d, xzr, x8
347 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 0, i64 3)
348 ret <vscale x 2 x i1> %out
351 define <vscale x 16 x i1> @whilels_b_ii() {
352 ; CHECK-LABEL: whilels_b_ii:
354 ; CHECK-NEXT: ptrue p0.b, vl7
356 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 2, i64 8)
357 ret <vscale x 16 x i1> %out
360 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
361 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
363 ; CHECK-NEXT: mov w8, #9 // =0x9
364 ; CHECK-NEXT: whilels p0.b, xzr, x8
366 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 0, i64 9)
367 ret <vscale x 16 x i1> %out
370 define <vscale x 16 x i1> @whilels_b_ii_vl_maximum() vscale_range(16, 16) {
371 ; CHECK-LABEL: whilels_b_ii_vl_maximum:
373 ; CHECK-NEXT: ptrue p0.b, vl256
375 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 0, i64 255)
376 ret <vscale x 16 x i1> %out
379 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_overflow() {
380 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_overflow:
382 ; CHECK-NEXT: mov w8, #6 // =0x6
383 ; CHECK-NEXT: mov w9, #-1 // =0xffffffff
384 ; CHECK-NEXT: whilels p0.b, w9, w8
386 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 4294967295, i32 6)
387 ret <vscale x 16 x i1> %out
390 define <vscale x 16 x i1> @whilels_b_ii_known_always_true() {
391 ; CHECK-LABEL: whilels_b_ii_known_always_true:
393 ; CHECK-NEXT: ptrue p0.b
395 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 4294967294, i32 4294967295)
396 ret <vscale x 16 x i1> %out
403 define <vscale x 16 x i1> @whilelt_b_ww(i32 %a, i32 %b) {
404 ; CHECK-LABEL: whilelt_b_ww:
406 ; CHECK-NEXT: whilelt p0.b, w0, w1
408 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %a, i32 %b)
409 ret <vscale x 16 x i1> %out
412 define <vscale x 16 x i1> @whilelt_b_xx(i64 %a, i64 %b) {
413 ; CHECK-LABEL: whilelt_b_xx:
415 ; CHECK-NEXT: whilelt p0.b, x0, x1
417 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %a, i64 %b)
418 ret <vscale x 16 x i1> %out
421 define <vscale x 8 x i1> @whilelt_h_ww(i32 %a, i32 %b) {
422 ; CHECK-LABEL: whilelt_h_ww:
424 ; CHECK-NEXT: whilelt p0.h, w0, w1
426 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %a, i32 %b)
427 ret <vscale x 8 x i1> %out
430 define <vscale x 8 x i1> @whilelt_h_xx(i64 %a, i64 %b) {
431 ; CHECK-LABEL: whilelt_h_xx:
433 ; CHECK-NEXT: whilelt p0.h, x0, x1
435 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %a, i64 %b)
436 ret <vscale x 8 x i1> %out
439 define <vscale x 4 x i1> @whilelt_s_ww(i32 %a, i32 %b) {
440 ; CHECK-LABEL: whilelt_s_ww:
442 ; CHECK-NEXT: whilelt p0.s, w0, w1
444 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %a, i32 %b)
445 ret <vscale x 4 x i1> %out
448 define <vscale x 4 x i1> @whilelt_s_xx(i64 %a, i64 %b) {
449 ; CHECK-LABEL: whilelt_s_xx:
451 ; CHECK-NEXT: whilelt p0.s, x0, x1
453 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %a, i64 %b)
454 ret <vscale x 4 x i1> %out
457 define <vscale x 2 x i1> @whilelt_d_ww(i32 %a, i32 %b) {
458 ; CHECK-LABEL: whilelt_d_ww:
460 ; CHECK-NEXT: whilelt p0.d, w0, w1
462 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b)
463 ret <vscale x 2 x i1> %out
466 define <vscale x 2 x i1> @whilelt_d_xx(i64 %a, i64 %b) {
467 ; CHECK-LABEL: whilelt_d_xx:
469 ; CHECK-NEXT: whilelt p0.d, x0, x1
471 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %a, i64 %b)
472 ret <vscale x 2 x i1> %out
475 define <vscale x 2 x i1> @whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
476 ; CHECK-LABEL: whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
478 ; CHECK-NEXT: mov w8, #3 // =0x3
479 ; CHECK-NEXT: whilelt p0.d, xzr, x8
481 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 0, i64 3)
482 ret <vscale x 2 x i1> %out
485 define <vscale x 16 x i1> @whilelt_b_ii() {
486 ; CHECK-LABEL: whilelt_b_ii:
488 ; CHECK-NEXT: ptrue p0.b, vl5
490 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 -2, i64 3)
491 ret <vscale x 16 x i1> %out
494 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
495 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
497 ; CHECK-NEXT: mov w8, #9 // =0x9
498 ; CHECK-NEXT: whilelt p0.b, xzr, x8
500 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 0, i64 9)
501 ret <vscale x 16 x i1> %out
504 define <vscale x 16 x i1> @whilelt_b_ii_vl_maximum() vscale_range(16, 16) {
505 ; CHECK-LABEL: whilelt_b_ii_vl_maximum:
507 ; CHECK-NEXT: ptrue p0.b, vl256
509 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 0, i64 256)
510 ret <vscale x 16 x i1> %out
513 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_overflow() {
514 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_overflow:
516 ; CHECK-NEXT: mov w8, #2 // =0x2
517 ; CHECK-NEXT: mov w9, #2147483647 // =0x7fffffff
518 ; CHECK-NEXT: movk w8, #32768, lsl #16
519 ; CHECK-NEXT: whilelt p0.b, w9, w8
521 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 2147483647, i32 -2147483646)
522 ret <vscale x 16 x i1> %out
525 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32, i32)
526 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64, i64)
527 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32, i32)
528 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64, i64)
529 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32, i32)
530 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64, i64)
531 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32)
532 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64, i64)
534 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32, i32)
535 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64, i64)
536 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32, i32)
537 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64, i64)
538 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32, i32)
539 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64, i64)
540 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32)
541 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64, i64)
543 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32, i32)
544 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64, i64)
545 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32, i32)
546 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64, i64)
547 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32, i32)
548 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64, i64)
549 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32)
550 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64, i64)
552 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32, i32)
553 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64, i64)
554 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32, i32)
555 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64, i64)
556 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32, i32)
557 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64, i64)
558 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32)
559 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64, i64)