1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
9 define <vscale x 16 x i1> @whilege_b_ww(i32 %a, i32 %b) {
10 ; CHECK-LABEL: whilege_b_ww:
12 ; CHECK-NEXT: whilege p0.b, w0, w1
14 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %a, i32 %b)
15 ret <vscale x 16 x i1> %out
18 define <vscale x 16 x i1> @whilege_b_xx(i64 %a, i64 %b) {
19 ; CHECK-LABEL: whilege_b_xx:
21 ; CHECK-NEXT: whilege p0.b, x0, x1
23 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %a, i64 %b)
24 ret <vscale x 16 x i1> %out
27 define <vscale x 8 x i1> @whilege_h_ww(i32 %a, i32 %b) {
28 ; CHECK-LABEL: whilege_h_ww:
30 ; CHECK-NEXT: whilege p0.h, w0, w1
32 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %a, i32 %b)
33 ret <vscale x 8 x i1> %out
36 define <vscale x 8 x i1> @whilege_h_xx(i64 %a, i64 %b) {
37 ; CHECK-LABEL: whilege_h_xx:
39 ; CHECK-NEXT: whilege p0.h, x0, x1
41 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %a, i64 %b)
42 ret <vscale x 8 x i1> %out
45 define <vscale x 4 x i1> @whilege_s_ww(i32 %a, i32 %b) {
46 ; CHECK-LABEL: whilege_s_ww:
48 ; CHECK-NEXT: whilege p0.s, w0, w1
50 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b)
51 ret <vscale x 4 x i1> %out
54 define <vscale x 4 x i1> @whilege_s_xx(i64 %a, i64 %b) {
55 ; CHECK-LABEL: whilege_s_xx:
57 ; CHECK-NEXT: whilege p0.s, x0, x1
59 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b)
60 ret <vscale x 4 x i1> %out
63 define <vscale x 2 x i1> @whilege_d_ww(i32 %a, i32 %b) {
64 ; CHECK-LABEL: whilege_d_ww:
66 ; CHECK-NEXT: whilege p0.d, w0, w1
68 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b)
69 ret <vscale x 2 x i1> %out
72 define <vscale x 2 x i1> @whilege_d_xx(i64 %a, i64 %b) {
73 ; CHECK-LABEL: whilege_d_xx:
75 ; CHECK-NEXT: whilege p0.d, x0, x1
77 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %a, i64 %b)
78 ret <vscale x 2 x i1> %out
81 define <vscale x 2 x i1> @whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
82 ; CHECK-LABEL: whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec:
84 ; CHECK-NEXT: mov w8, #3
85 ; CHECK-NEXT: whilege p0.d, x8, xzr
87 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 3, i64 0)
88 ret <vscale x 2 x i1> %out
91 define <vscale x 16 x i1> @whilege_b_ii() {
92 ; CHECK-LABEL: whilege_b_ii:
93 ; CHECK: // %bb.0: // %entry
94 ; CHECK-NEXT: ptrue p0.b, vl6
97 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 3, i32 -2)
98 ret <vscale x 16 x i1> %out
101 define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
102 ; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
103 ; CHECK: // %bb.0: // %entry
104 ; CHECK-NEXT: mov w8, #9
105 ; CHECK-NEXT: whilege p0.b, x8, xzr
108 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 9, i64 0)
109 ret <vscale x 16 x i1> %out
112 define <vscale x 16 x i1> @whilege_b_ii_vl_maximum() vscale_range(16, 16) {
113 ; CHECK-LABEL: whilege_b_ii_vl_maximum:
115 ; CHECK-NEXT: ptrue p0.b, vl256
117 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 255, i64 0)
118 ret <vscale x 16 x i1> %out
121 define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_overflow() {
122 ; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_overflow:
123 ; CHECK: // %bb.0: // %entry
124 ; CHECK-NEXT: mov w8, #2
125 ; CHECK-NEXT: mov w9, #2147483647
126 ; CHECK-NEXT: movk w8, #32768, lsl #16
127 ; CHECK-NEXT: whilege p0.b, w9, w8
130 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 2147483647, i32 -2147483646)
131 ret <vscale x 16 x i1> %out
134 define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_increment_overflow() {
135 ; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_increment_overflow:
136 ; CHECK: // %bb.0: // %entry
137 ; CHECK-NEXT: mov w8, #2147483647
138 ; CHECK-NEXT: mov w9, #-2147483641
139 ; CHECK-NEXT: whilege p0.b, w9, w8
142 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 -2147483641, i32 2147483647)
143 ret <vscale x 16 x i1> %out
150 define <vscale x 16 x i1> @whilehs_b_ww(i32 %a, i32 %b) {
151 ; CHECK-LABEL: whilehs_b_ww:
153 ; CHECK-NEXT: whilehs p0.b, w0, w1
155 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %a, i32 %b)
156 ret <vscale x 16 x i1> %out
159 define <vscale x 16 x i1> @whilehs_b_xx(i64 %a, i64 %b) {
160 ; CHECK-LABEL: whilehs_b_xx:
162 ; CHECK-NEXT: whilehs p0.b, x0, x1
164 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %a, i64 %b)
165 ret <vscale x 16 x i1> %out
168 define <vscale x 8 x i1> @whilehs_h_ww(i32 %a, i32 %b) {
169 ; CHECK-LABEL: whilehs_h_ww:
171 ; CHECK-NEXT: whilehs p0.h, w0, w1
173 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %a, i32 %b)
174 ret <vscale x 8 x i1> %out
177 define <vscale x 8 x i1> @whilehs_h_xx(i64 %a, i64 %b) {
178 ; CHECK-LABEL: whilehs_h_xx:
180 ; CHECK-NEXT: whilehs p0.h, x0, x1
182 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %a, i64 %b)
183 ret <vscale x 8 x i1> %out
186 define <vscale x 4 x i1> @whilehs_s_ww(i32 %a, i32 %b) {
187 ; CHECK-LABEL: whilehs_s_ww:
189 ; CHECK-NEXT: whilehs p0.s, w0, w1
191 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b)
192 ret <vscale x 4 x i1> %out
195 define <vscale x 4 x i1> @whilehs_s_xx(i64 %a, i64 %b) {
196 ; CHECK-LABEL: whilehs_s_xx:
198 ; CHECK-NEXT: whilehs p0.s, x0, x1
200 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b)
201 ret <vscale x 4 x i1> %out
204 define <vscale x 2 x i1> @whilehs_d_ww(i32 %a, i32 %b) {
205 ; CHECK-LABEL: whilehs_d_ww:
207 ; CHECK-NEXT: whilehs p0.d, w0, w1
209 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b)
210 ret <vscale x 2 x i1> %out
213 define <vscale x 2 x i1> @whilehs_d_xx(i64 %a, i64 %b) {
214 ; CHECK-LABEL: whilehs_d_xx:
216 ; CHECK-NEXT: whilehs p0.d, x0, x1
218 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %a, i64 %b)
219 ret <vscale x 2 x i1> %out
222 define <vscale x 2 x i1> @whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
223 ; CHECK-LABEL: whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec:
225 ; CHECK-NEXT: mov w8, #3
226 ; CHECK-NEXT: whilehs p0.d, x8, xzr
228 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 3, i64 0)
229 ret <vscale x 2 x i1> %out
232 define <vscale x 16 x i1> @whilehs_b_ii() {
233 ; CHECK-LABEL: whilehs_b_ii:
234 ; CHECK: // %bb.0: // %entry
235 ; CHECK-NEXT: ptrue p0.b, vl7
238 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 8, i64 2)
239 ret <vscale x 16 x i1> %out
242 define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
243 ; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
244 ; CHECK: // %bb.0: // %entry
245 ; CHECK-NEXT: mov w8, #9
246 ; CHECK-NEXT: whilehs p0.b, x8, xzr
249 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 9, i64 0)
250 ret <vscale x 16 x i1> %out
253 define <vscale x 16 x i1> @whilehs_b_ii_vl_maximum() vscale_range(16, 16) {
254 ; CHECK-LABEL: whilehs_b_ii_vl_maximum:
256 ; CHECK-NEXT: ptrue p0.b, vl256
258 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 255, i64 0)
259 ret <vscale x 16 x i1> %out
262 define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_overflow() {
263 ; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_overflow:
264 ; CHECK: // %bb.0: // %entry
265 ; CHECK-NEXT: mov w8, #-1
266 ; CHECK-NEXT: mov w9, #6
267 ; CHECK-NEXT: whilehs p0.b, w9, w8
270 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 6, i32 4294967295)
271 ret <vscale x 16 x i1> %out
274 define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_increment_overflow() {
275 ; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_increment_overflow:
276 ; CHECK: // %bb.0: // %entry
277 ; CHECK-NEXT: mov w8, #-1
278 ; CHECK-NEXT: whilehs p0.b, w8, wzr
281 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 4294967295, i32 0)
282 ret <vscale x 16 x i1> %out
289 define <vscale x 16 x i1> @whilegt_b_ww(i32 %a, i32 %b) {
290 ; CHECK-LABEL: whilegt_b_ww:
292 ; CHECK-NEXT: whilegt p0.b, w0, w1
294 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %a, i32 %b)
295 ret <vscale x 16 x i1> %out
298 define <vscale x 16 x i1> @whilegt_b_xx(i64 %a, i64 %b) {
299 ; CHECK-LABEL: whilegt_b_xx:
301 ; CHECK-NEXT: whilegt p0.b, x0, x1
303 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %a, i64 %b)
304 ret <vscale x 16 x i1> %out
307 define <vscale x 8 x i1> @whilegt_h_ww(i32 %a, i32 %b) {
308 ; CHECK-LABEL: whilegt_h_ww:
310 ; CHECK-NEXT: whilegt p0.h, w0, w1
312 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %a, i32 %b)
313 ret <vscale x 8 x i1> %out
316 define <vscale x 8 x i1> @whilegt_h_xx(i64 %a, i64 %b) {
317 ; CHECK-LABEL: whilegt_h_xx:
319 ; CHECK-NEXT: whilegt p0.h, x0, x1
321 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %a, i64 %b)
322 ret <vscale x 8 x i1> %out
325 define <vscale x 4 x i1> @whilegt_s_ww(i32 %a, i32 %b) {
326 ; CHECK-LABEL: whilegt_s_ww:
328 ; CHECK-NEXT: whilegt p0.s, w0, w1
330 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b)
331 ret <vscale x 4 x i1> %out
334 define <vscale x 4 x i1> @whilegt_s_xx(i64 %a, i64 %b) {
335 ; CHECK-LABEL: whilegt_s_xx:
337 ; CHECK-NEXT: whilegt p0.s, x0, x1
339 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b)
340 ret <vscale x 4 x i1> %out
343 define <vscale x 2 x i1> @whilegt_d_ww(i32 %a, i32 %b) {
344 ; CHECK-LABEL: whilegt_d_ww:
346 ; CHECK-NEXT: whilegt p0.d, w0, w1
348 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b)
349 ret <vscale x 2 x i1> %out
352 define <vscale x 2 x i1> @whilegt_d_xx(i64 %a, i64 %b) {
353 ; CHECK-LABEL: whilegt_d_xx:
355 ; CHECK-NEXT: whilegt p0.d, x0, x1
357 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %a, i64 %b)
358 ret <vscale x 2 x i1> %out
361 define <vscale x 2 x i1> @whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
362 ; CHECK-LABEL: whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
364 ; CHECK-NEXT: mov w8, #3
365 ; CHECK-NEXT: whilegt p0.d, x8, xzr
367 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 3, i64 0)
368 ret <vscale x 2 x i1> %out
371 define <vscale x 16 x i1> @whilegt_b_ii() {
372 ; CHECK-LABEL: whilegt_b_ii:
373 ; CHECK: // %bb.0: // %entry
374 ; CHECK-NEXT: ptrue p0.b, vl5
377 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 3, i32 -2)
378 ret <vscale x 16 x i1> %out
381 define <vscale x 16 x i1> @whilegt_b_ii_fold_to_ptrue_nonexistent_vl9() {
382 ; CHECK-LABEL: whilegt_b_ii_fold_to_ptrue_nonexistent_vl9:
383 ; CHECK: // %bb.0: // %entry
384 ; CHECK-NEXT: mov w8, #9
385 ; CHECK-NEXT: whilegt p0.b, x8, xzr
388 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 9, i64 0)
389 ret <vscale x 16 x i1> %out
392 define <vscale x 16 x i1> @whilegt_b_ii_vl_maximum() vscale_range(16, 16) {
393 ; CHECK-LABEL: whilegt_b_ii_vl_maximum:
395 ; CHECK-NEXT: ptrue p0.b, vl256
397 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 256, i64 0)
398 ret <vscale x 16 x i1> %out
401 define <vscale x 16 x i1> @whilegt_b_ii_dont_fold_to_ptrue_overflow() {
402 ; CHECK-LABEL: whilegt_b_ii_dont_fold_to_ptrue_overflow:
403 ; CHECK: // %bb.0: // %entry
404 ; CHECK-NEXT: mov w8, #2147483647
405 ; CHECK-NEXT: mov w9, #-2147483641
406 ; CHECK-NEXT: whilegt p0.b, w9, w8
409 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 -2147483641, i32 2147483647)
410 ret <vscale x 16 x i1> %out
417 define <vscale x 16 x i1> @whilehi_b_ww(i32 %a, i32 %b) {
418 ; CHECK-LABEL: whilehi_b_ww:
420 ; CHECK-NEXT: whilehi p0.b, w0, w1
422 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %a, i32 %b)
423 ret <vscale x 16 x i1> %out
426 define <vscale x 16 x i1> @whilehi_b_xx(i64 %a, i64 %b) {
427 ; CHECK-LABEL: whilehi_b_xx:
429 ; CHECK-NEXT: whilehi p0.b, x0, x1
431 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %a, i64 %b)
432 ret <vscale x 16 x i1> %out
435 define <vscale x 8 x i1> @whilehi_h_ww(i32 %a, i32 %b) {
436 ; CHECK-LABEL: whilehi_h_ww:
438 ; CHECK-NEXT: whilehi p0.h, w0, w1
440 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %a, i32 %b)
441 ret <vscale x 8 x i1> %out
444 define <vscale x 8 x i1> @whilehi_h_xx(i64 %a, i64 %b) {
445 ; CHECK-LABEL: whilehi_h_xx:
447 ; CHECK-NEXT: whilehi p0.h, x0, x1
449 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %a, i64 %b)
450 ret <vscale x 8 x i1> %out
453 define <vscale x 4 x i1> @whilehi_s_ww(i32 %a, i32 %b) {
454 ; CHECK-LABEL: whilehi_s_ww:
456 ; CHECK-NEXT: whilehi p0.s, w0, w1
458 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b)
459 ret <vscale x 4 x i1> %out
462 define <vscale x 4 x i1> @whilehi_s_xx(i64 %a, i64 %b) {
463 ; CHECK-LABEL: whilehi_s_xx:
465 ; CHECK-NEXT: whilehi p0.s, x0, x1
467 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b)
468 ret <vscale x 4 x i1> %out
471 define <vscale x 2 x i1> @whilehi_d_ww(i32 %a, i32 %b) {
472 ; CHECK-LABEL: whilehi_d_ww:
474 ; CHECK-NEXT: whilehi p0.d, w0, w1
476 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b)
477 ret <vscale x 2 x i1> %out
480 define <vscale x 2 x i1> @whilehi_d_xx(i64 %a, i64 %b) {
481 ; CHECK-LABEL: whilehi_d_xx:
483 ; CHECK-NEXT: whilehi p0.d, x0, x1
485 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %a, i64 %b)
486 ret <vscale x 2 x i1> %out
489 define <vscale x 2 x i1> @whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
490 ; CHECK-LABEL: whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec:
492 ; CHECK-NEXT: mov w8, #3
493 ; CHECK-NEXT: whilehi p0.d, x8, xzr
495 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 3, i64 0)
496 ret <vscale x 2 x i1> %out
499 define <vscale x 16 x i1> @whilehi_b_ii() {
500 ; CHECK-LABEL: whilehi_b_ii:
501 ; CHECK: // %bb.0: // %entry
502 ; CHECK-NEXT: ptrue p0.b, vl6
505 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 8, i64 2)
506 ret <vscale x 16 x i1> %out
509 define <vscale x 16 x i1> @whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
510 ; CHECK-LABEL: whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
511 ; CHECK: // %bb.0: // %entry
512 ; CHECK-NEXT: mov w8, #9
513 ; CHECK-NEXT: whilehi p0.b, x8, xzr
516 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 9, i64 0)
517 ret <vscale x 16 x i1> %out
520 define <vscale x 16 x i1> @whilehi_b_ii_vl_maximum() vscale_range(16, 16) {
521 ; CHECK-LABEL: whilehi_b_ii_vl_maximum:
523 ; CHECK-NEXT: ptrue p0.b, vl256
525 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 256, i64 0)
526 ret <vscale x 16 x i1> %out
529 define <vscale x 16 x i1> @whilelhi_b_ii_dont_fold_to_ptrue_overflow() {
530 ; CHECK-LABEL: whilelhi_b_ii_dont_fold_to_ptrue_overflow:
531 ; CHECK: // %bb.0: // %entry
532 ; CHECK-NEXT: mov w8, #-1
533 ; CHECK-NEXT: mov w9, #7
534 ; CHECK-NEXT: whilehi p0.b, w9, w8
537 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 7, i32 4294967295)
538 ret <vscale x 16 x i1> %out
541 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32)
542 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64)
543 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32)
544 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64, i64)
545 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32)
546 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64)
547 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32)
548 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64, i64)
550 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32, i32)
551 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64, i64)
552 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32, i32)
553 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64, i64)
554 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32, i32)
555 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64, i64)
556 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32)
557 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64, i64)
559 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32, i32)
560 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64, i64)
561 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32, i32)
562 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64, i64)
563 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32, i32)
564 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64, i64)
565 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32)
566 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64, i64)
568 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32, i32)
569 declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64, i64)
570 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32, i32)
571 declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64, i64)
572 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32, i32)
573 declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64, i64)
574 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32)
575 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64, i64)