1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
9 define <vscale x 16 x i1> @whilele_b_ww(i32 %a, i32 %b) {
10 ; CHECK-LABEL: whilele_b_ww:
12 ; CHECK-NEXT: whilele p0.b, w0, w1
14 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %a, i32 %b)
15 ret <vscale x 16 x i1> %out
18 define <vscale x 16 x i1> @whilele_b_xx(i64 %a, i64 %b) {
19 ; CHECK-LABEL: whilele_b_xx:
21 ; CHECK-NEXT: whilele p0.b, x0, x1
23 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %a, i64 %b)
24 ret <vscale x 16 x i1> %out
27 define <vscale x 8 x i1> @whilele_h_ww(i32 %a, i32 %b) {
28 ; CHECK-LABEL: whilele_h_ww:
30 ; CHECK-NEXT: whilele p0.h, w0, w1
32 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %a, i32 %b)
33 ret <vscale x 8 x i1> %out
36 define <vscale x 8 x i1> @whilele_h_xx(i64 %a, i64 %b) {
37 ; CHECK-LABEL: whilele_h_xx:
39 ; CHECK-NEXT: whilele p0.h, x0, x1
41 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %a, i64 %b)
42 ret <vscale x 8 x i1> %out
45 define <vscale x 4 x i1> @whilele_s_ww(i32 %a, i32 %b) {
46 ; CHECK-LABEL: whilele_s_ww:
48 ; CHECK-NEXT: whilele p0.s, w0, w1
50 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %a, i32 %b)
51 ret <vscale x 4 x i1> %out
54 define <vscale x 4 x i1> @whilele_s_xx(i64 %a, i64 %b) {
55 ; CHECK-LABEL: whilele_s_xx:
57 ; CHECK-NEXT: whilele p0.s, x0, x1
59 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %a, i64 %b)
60 ret <vscale x 4 x i1> %out
63 define <vscale x 2 x i1> @whilele_d_ww(i32 %a, i32 %b) {
64 ; CHECK-LABEL: whilele_d_ww:
66 ; CHECK-NEXT: whilele p0.d, w0, w1
68 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b)
69 ret <vscale x 2 x i1> %out
72 define <vscale x 2 x i1> @whilele_d_xx(i64 %a, i64 %b) {
73 ; CHECK-LABEL: whilele_d_xx:
75 ; CHECK-NEXT: whilele p0.d, x0, x1
77 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %a, i64 %b)
78 ret <vscale x 2 x i1> %out
81 define <vscale x 2 x i1> @whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
82 ; CHECK-LABEL: whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec:
84 ; CHECK-NEXT: mov w8, #3
85 ; CHECK-NEXT: whilele p0.d, xzr, x8
87 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 0, i64 3)
88 ret <vscale x 2 x i1> %out
91 define <vscale x 16 x i1> @whilele_b_ii() {
92 ; CHECK-LABEL: whilele_b_ii:
93 ; CHECK: // %bb.0: // %entry
94 ; CHECK-NEXT: ptrue p0.b, vl6
97 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 -2, i64 3)
98 ret <vscale x 16 x i1> %out
101 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
102 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
103 ; CHECK: // %bb.0: // %entry
104 ; CHECK-NEXT: mov w8, #9
105 ; CHECK-NEXT: whilele p0.b, xzr, x8
108 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 0, i64 9)
109 ret <vscale x 16 x i1> %out
112 define <vscale x 16 x i1> @whilele_b_vl_maximum() vscale_range(16, 16) {
113 ; CHECK-LABEL: whilele_b_vl_maximum:
115 ; CHECK-NEXT: ptrue p0.b, vl256
117 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 0, i64 255)
118 ret <vscale x 16 x i1> %out
121 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_overflow() {
122 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_overflow:
123 ; CHECK: // %bb.0: // %entry
124 ; CHECK-NEXT: mov w8, #2
125 ; CHECK-NEXT: mov w9, #2147483647
126 ; CHECK-NEXT: movk w8, #32768, lsl #16
127 ; CHECK-NEXT: whilele p0.b, w9, w8
130 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 2147483647, i32 -2147483646)
131 ret <vscale x 16 x i1> %out
134 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_increment_overflow() {
135 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_increment_overflow:
136 ; CHECK: // %bb.0: // %entry
137 ; CHECK-NEXT: mov w8, #2147483647
138 ; CHECK-NEXT: whilele p0.b, wzr, w8
141 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 0, i32 2147483647)
142 ret <vscale x 16 x i1> %out
149 define <vscale x 16 x i1> @whilelo_b_ww(i32 %a, i32 %b) {
150 ; CHECK-LABEL: whilelo_b_ww:
152 ; CHECK-NEXT: whilelo p0.b, w0, w1
154 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b)
155 ret <vscale x 16 x i1> %out
158 define <vscale x 16 x i1> @whilelo_b_xx(i64 %a, i64 %b) {
159 ; CHECK-LABEL: whilelo_b_xx:
161 ; CHECK-NEXT: whilelo p0.b, x0, x1
163 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %a, i64 %b)
164 ret <vscale x 16 x i1> %out
167 define <vscale x 8 x i1> @whilelo_h_ww(i32 %a, i32 %b) {
168 ; CHECK-LABEL: whilelo_h_ww:
170 ; CHECK-NEXT: whilelo p0.h, w0, w1
172 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %a, i32 %b)
173 ret <vscale x 8 x i1> %out
176 define <vscale x 8 x i1> @whilelo_h_xx(i64 %a, i64 %b) {
177 ; CHECK-LABEL: whilelo_h_xx:
179 ; CHECK-NEXT: whilelo p0.h, x0, x1
181 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %a, i64 %b)
182 ret <vscale x 8 x i1> %out
185 define <vscale x 4 x i1> @whilelo_s_ww(i32 %a, i32 %b) {
186 ; CHECK-LABEL: whilelo_s_ww:
188 ; CHECK-NEXT: whilelo p0.s, w0, w1
190 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b)
191 ret <vscale x 4 x i1> %out
194 define <vscale x 4 x i1> @whilelo_s_xx(i64 %a, i64 %b) {
195 ; CHECK-LABEL: whilelo_s_xx:
197 ; CHECK-NEXT: whilelo p0.s, x0, x1
199 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b)
200 ret <vscale x 4 x i1> %out
203 define <vscale x 2 x i1> @whilelo_d_ww(i32 %a, i32 %b) {
204 ; CHECK-LABEL: whilelo_d_ww:
206 ; CHECK-NEXT: whilelo p0.d, w0, w1
208 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b)
209 ret <vscale x 2 x i1> %out
212 define <vscale x 2 x i1> @whilelo_d_xx(i64 %a, i64 %b) {
213 ; CHECK-LABEL: whilelo_d_xx:
215 ; CHECK-NEXT: whilelo p0.d, x0, x1
217 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %a, i64 %b)
218 ret <vscale x 2 x i1> %out
221 define <vscale x 2 x i1> @whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
222 ; CHECK-LABEL: whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec:
224 ; CHECK-NEXT: mov w8, #3
225 ; CHECK-NEXT: whilelo p0.d, xzr, x8
227 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 0, i64 3)
228 ret <vscale x 2 x i1> %out
231 define <vscale x 16 x i1> @whilelo_b_ii() {
232 ; CHECK-LABEL: whilelo_b_ii:
233 ; CHECK: // %bb.0: // %entry
234 ; CHECK-NEXT: ptrue p0.b, vl6
237 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 2, i64 8)
238 ret <vscale x 16 x i1> %out
241 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
242 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
243 ; CHECK: // %bb.0: // %entry
244 ; CHECK-NEXT: mov w8, #9
245 ; CHECK-NEXT: whilelo p0.b, xzr, x8
248 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 0, i64 9)
249 ret <vscale x 16 x i1> %out
252 define <vscale x 16 x i1> @whilelo_b_vl_maximum() vscale_range(16, 16) {
253 ; CHECK-LABEL: whilelo_b_vl_maximum:
255 ; CHECK-NEXT: ptrue p0.b, vl256
257 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 0, i64 256)
258 ret <vscale x 16 x i1> %out
261 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_overflow() {
262 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_overflow:
263 ; CHECK: // %bb.0: // %entry
264 ; CHECK-NEXT: mov w8, #6
265 ; CHECK-NEXT: mov w9, #-1
266 ; CHECK-NEXT: whilelo p0.b, w9, w8
269 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 4294967295, i32 6)
270 ret <vscale x 16 x i1> %out
277 define <vscale x 16 x i1> @whilels_b_ww(i32 %a, i32 %b) {
278 ; CHECK-LABEL: whilels_b_ww:
280 ; CHECK-NEXT: whilels p0.b, w0, w1
282 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %a, i32 %b)
283 ret <vscale x 16 x i1> %out
286 define <vscale x 16 x i1> @whilels_b_xx(i64 %a, i64 %b) {
287 ; CHECK-LABEL: whilels_b_xx:
289 ; CHECK-NEXT: whilels p0.b, x0, x1
291 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %a, i64 %b)
292 ret <vscale x 16 x i1> %out
295 define <vscale x 8 x i1> @whilels_h_ww(i32 %a, i32 %b) {
296 ; CHECK-LABEL: whilels_h_ww:
298 ; CHECK-NEXT: whilels p0.h, w0, w1
300 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %a, i32 %b)
301 ret <vscale x 8 x i1> %out
304 define <vscale x 8 x i1> @whilels_h_xx(i64 %a, i64 %b) {
305 ; CHECK-LABEL: whilels_h_xx:
307 ; CHECK-NEXT: whilels p0.h, x0, x1
309 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %a, i64 %b)
310 ret <vscale x 8 x i1> %out
313 define <vscale x 4 x i1> @whilels_s_ww(i32 %a, i32 %b) {
314 ; CHECK-LABEL: whilels_s_ww:
316 ; CHECK-NEXT: whilels p0.s, w0, w1
318 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %a, i32 %b)
319 ret <vscale x 4 x i1> %out
322 define <vscale x 4 x i1> @whilels_s_xx(i64 %a, i64 %b) {
323 ; CHECK-LABEL: whilels_s_xx:
325 ; CHECK-NEXT: whilels p0.s, x0, x1
327 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %a, i64 %b)
328 ret <vscale x 4 x i1> %out
331 define <vscale x 2 x i1> @whilels_d_ww(i32 %a, i32 %b) {
332 ; CHECK-LABEL: whilels_d_ww:
334 ; CHECK-NEXT: whilels p0.d, w0, w1
336 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b)
337 ret <vscale x 2 x i1> %out
340 define <vscale x 2 x i1> @whilels_d_xx(i64 %a, i64 %b) {
341 ; CHECK-LABEL: whilels_d_xx:
343 ; CHECK-NEXT: whilels p0.d, x0, x1
345 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %a, i64 %b)
346 ret <vscale x 2 x i1> %out
349 define <vscale x 2 x i1> @whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
350 ; CHECK-LABEL: whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec:
352 ; CHECK-NEXT: mov w8, #3
353 ; CHECK-NEXT: whilels p0.d, xzr, x8
355 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 0, i64 3)
356 ret <vscale x 2 x i1> %out
359 define <vscale x 16 x i1> @whilels_b_ii() {
360 ; CHECK-LABEL: whilels_b_ii:
361 ; CHECK: // %bb.0: // %entry
362 ; CHECK-NEXT: ptrue p0.b, vl7
365 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 2, i64 8)
366 ret <vscale x 16 x i1> %out
369 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
370 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
371 ; CHECK: // %bb.0: // %entry
372 ; CHECK-NEXT: mov w8, #9
373 ; CHECK-NEXT: whilels p0.b, xzr, x8
376 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 0, i64 9)
377 ret <vscale x 16 x i1> %out
380 define <vscale x 16 x i1> @whilels_b_ii_vl_maximum() vscale_range(16, 16) {
381 ; CHECK-LABEL: whilels_b_ii_vl_maximum:
383 ; CHECK-NEXT: ptrue p0.b, vl256
385 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 0, i64 255)
386 ret <vscale x 16 x i1> %out
389 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_overflow() {
390 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_overflow:
391 ; CHECK: // %bb.0: // %entry
392 ; CHECK-NEXT: mov w8, #6
393 ; CHECK-NEXT: mov w9, #-1
394 ; CHECK-NEXT: whilels p0.b, w9, w8
397 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 4294967295, i32 6)
398 ret <vscale x 16 x i1> %out
401 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_increment_overflow() {
402 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_increment_overflow:
403 ; CHECK: // %bb.0: // %entry
404 ; CHECK-NEXT: mov w8, #-1
405 ; CHECK-NEXT: whilels p0.b, wzr, w8
408 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 0, i32 4294967295)
409 ret <vscale x 16 x i1> %out
416 define <vscale x 16 x i1> @whilelt_b_ww(i32 %a, i32 %b) {
417 ; CHECK-LABEL: whilelt_b_ww:
419 ; CHECK-NEXT: whilelt p0.b, w0, w1
421 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %a, i32 %b)
422 ret <vscale x 16 x i1> %out
425 define <vscale x 16 x i1> @whilelt_b_xx(i64 %a, i64 %b) {
426 ; CHECK-LABEL: whilelt_b_xx:
428 ; CHECK-NEXT: whilelt p0.b, x0, x1
430 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %a, i64 %b)
431 ret <vscale x 16 x i1> %out
434 define <vscale x 8 x i1> @whilelt_h_ww(i32 %a, i32 %b) {
435 ; CHECK-LABEL: whilelt_h_ww:
437 ; CHECK-NEXT: whilelt p0.h, w0, w1
439 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %a, i32 %b)
440 ret <vscale x 8 x i1> %out
443 define <vscale x 8 x i1> @whilelt_h_xx(i64 %a, i64 %b) {
444 ; CHECK-LABEL: whilelt_h_xx:
446 ; CHECK-NEXT: whilelt p0.h, x0, x1
448 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %a, i64 %b)
449 ret <vscale x 8 x i1> %out
452 define <vscale x 4 x i1> @whilelt_s_ww(i32 %a, i32 %b) {
453 ; CHECK-LABEL: whilelt_s_ww:
455 ; CHECK-NEXT: whilelt p0.s, w0, w1
457 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %a, i32 %b)
458 ret <vscale x 4 x i1> %out
461 define <vscale x 4 x i1> @whilelt_s_xx(i64 %a, i64 %b) {
462 ; CHECK-LABEL: whilelt_s_xx:
464 ; CHECK-NEXT: whilelt p0.s, x0, x1
466 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %a, i64 %b)
467 ret <vscale x 4 x i1> %out
470 define <vscale x 2 x i1> @whilelt_d_ww(i32 %a, i32 %b) {
471 ; CHECK-LABEL: whilelt_d_ww:
473 ; CHECK-NEXT: whilelt p0.d, w0, w1
475 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b)
476 ret <vscale x 2 x i1> %out
479 define <vscale x 2 x i1> @whilelt_d_xx(i64 %a, i64 %b) {
480 ; CHECK-LABEL: whilelt_d_xx:
482 ; CHECK-NEXT: whilelt p0.d, x0, x1
484 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %a, i64 %b)
485 ret <vscale x 2 x i1> %out
488 define <vscale x 2 x i1> @whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
489 ; CHECK-LABEL: whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
491 ; CHECK-NEXT: mov w8, #3
492 ; CHECK-NEXT: whilelt p0.d, xzr, x8
494 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 0, i64 3)
495 ret <vscale x 2 x i1> %out
498 define <vscale x 16 x i1> @whilelt_b_ii() {
499 ; CHECK-LABEL: whilelt_b_ii:
500 ; CHECK: // %bb.0: // %entry
501 ; CHECK-NEXT: ptrue p0.b, vl5
504 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 -2, i64 3)
505 ret <vscale x 16 x i1> %out
508 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
509 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
510 ; CHECK: // %bb.0: // %entry
511 ; CHECK-NEXT: mov w8, #9
512 ; CHECK-NEXT: whilelt p0.b, xzr, x8
515 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 0, i64 9)
516 ret <vscale x 16 x i1> %out
519 define <vscale x 16 x i1> @whilelt_b_ii_vl_maximum() vscale_range(16, 16) {
520 ; CHECK-LABEL: whilelt_b_ii_vl_maximum:
522 ; CHECK-NEXT: ptrue p0.b, vl256
524 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 0, i64 256)
525 ret <vscale x 16 x i1> %out
528 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_overflow() {
529 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_overflow:
530 ; CHECK: // %bb.0: // %entry
531 ; CHECK-NEXT: mov w8, #2
532 ; CHECK-NEXT: mov w9, #2147483647
533 ; CHECK-NEXT: movk w8, #32768, lsl #16
534 ; CHECK-NEXT: whilelt p0.b, w9, w8
537 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 2147483647, i32 -2147483646)
538 ret <vscale x 16 x i1> %out
541 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32, i32)
542 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64, i64)
543 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32, i32)
544 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64, i64)
545 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32, i32)
546 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64, i64)
547 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32)
548 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64, i64)
550 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32, i32)
551 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64, i64)
552 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32, i32)
553 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64, i64)
554 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32, i32)
555 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64, i64)
556 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32)
557 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64, i64)
559 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32, i32)
560 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64, i64)
561 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32, i32)
562 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64, i64)
563 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32, i32)
564 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64, i64)
565 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32)
566 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64, i64)
568 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32, i32)
569 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64, i64)
570 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32, i32)
571 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64, i64)
572 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32, i32)
573 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64, i64)
574 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32)
575 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64, i64)