1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
3 // RUN: -disable-O0-optnone \
4 // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s
6 // REQUIRES: aarch64-registered-target
12 // CHECK-LABEL: @eq_bool(
14 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
15 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
17 svbool_t
eq_bool(svbool_t a
, svbool_t b
) {
21 // CHECK-LABEL: @eq_i8(
23 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
24 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
25 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
27 svint8_t
eq_i8(svint8_t a
, svint8_t b
) {
31 // CHECK-LABEL: @eq_i16(
33 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
34 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
35 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
37 svint16_t
eq_i16(svint16_t a
, svint16_t b
) {
41 // CHECK-LABEL: @eq_i32(
43 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
44 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
45 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
47 svint32_t
eq_i32(svint32_t a
, svint32_t b
) {
51 // CHECK-LABEL: @eq_i64(
53 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
54 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
55 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
57 svint64_t
eq_i64(svint64_t a
, svint64_t b
) {
61 // CHECK-LABEL: @eq_u8(
63 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
64 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
65 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
67 svint8_t
eq_u8(svuint8_t a
, svuint8_t b
) {
71 // CHECK-LABEL: @eq_u16(
73 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
74 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
75 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
77 svint16_t
eq_u16(svuint16_t a
, svuint16_t b
) {
81 // CHECK-LABEL: @eq_u32(
83 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
84 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
85 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
87 svint32_t
eq_u32(svuint32_t a
, svuint32_t b
) {
91 // CHECK-LABEL: @eq_u64(
93 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
94 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
95 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
97 svint64_t
eq_u64(svuint64_t a
, svuint64_t b
) {
101 // CHECK-LABEL: @eq_f16(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
104 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
105 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
107 svint16_t
eq_f16(svfloat16_t a
, svfloat16_t b
) {
111 // CHECK-LABEL: @eq_f32(
112 // CHECK-NEXT: entry:
113 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
114 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
115 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
117 svint32_t
eq_f32(svfloat32_t a
, svfloat32_t b
) {
121 // CHECK-LABEL: @eq_f64(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
124 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
125 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
127 svint64_t
eq_f64(svfloat64_t a
, svfloat64_t b
) {
133 // CHECK-LABEL: @neq_bool(
134 // CHECK-NEXT: entry:
135 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
136 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
138 svbool_t
neq_bool(svbool_t a
, svbool_t b
) {
142 // CHECK-LABEL: @neq_i8(
143 // CHECK-NEXT: entry:
144 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
145 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
146 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
148 svint8_t
neq_i8(svint8_t a
, svint8_t b
) {
152 // CHECK-LABEL: @neq_i16(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
155 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
156 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
158 svint16_t
neq_i16(svint16_t a
, svint16_t b
) {
162 // CHECK-LABEL: @neq_i32(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
165 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
166 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
168 svint32_t
neq_i32(svint32_t a
, svint32_t b
) {
172 // CHECK-LABEL: @neq_i64(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
175 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
176 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
178 svint64_t
neq_i64(svint64_t a
, svint64_t b
) {
182 // CHECK-LABEL: @neq_u8(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
185 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
186 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
188 svint8_t
neq_u8(svuint8_t a
, svuint8_t b
) {
192 // CHECK-LABEL: @neq_u16(
193 // CHECK-NEXT: entry:
194 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
195 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
196 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
198 svint16_t
neq_u16(svuint16_t a
, svuint16_t b
) {
202 // CHECK-LABEL: @neq_u32(
203 // CHECK-NEXT: entry:
204 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
205 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
206 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
208 svint32_t
neq_u32(svuint32_t a
, svuint32_t b
) {
212 // CHECK-LABEL: @neq_u64(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
215 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
216 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
218 svint64_t
neq_u64(svuint64_t a
, svuint64_t b
) {
222 // CHECK-LABEL: @neq_f16(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
225 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
226 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
228 svint16_t
neq_f16(svfloat16_t a
, svfloat16_t b
) {
232 // CHECK-LABEL: @neq_f32(
233 // CHECK-NEXT: entry:
234 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
235 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
236 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
238 svint32_t
neq_f32(svfloat32_t a
, svfloat32_t b
) {
242 // CHECK-LABEL: @neq_f64(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
245 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
246 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
248 svint64_t
neq_f64(svfloat64_t a
, svfloat64_t b
) {
254 // CHECK-LABEL: @lt_bool(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
257 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
259 svbool_t
lt_bool(svbool_t a
, svbool_t b
) {
263 // CHECK-LABEL: @lt_i8(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
266 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
267 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
269 svint8_t
lt_i8(svint8_t a
, svint8_t b
) {
273 // CHECK-LABEL: @lt_i16(
274 // CHECK-NEXT: entry:
275 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
276 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
277 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
279 svint16_t
lt_i16(svint16_t a
, svint16_t b
) {
283 // CHECK-LABEL: @lt_i32(
284 // CHECK-NEXT: entry:
285 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
286 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
287 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
289 svint32_t
lt_i32(svint32_t a
, svint32_t b
) {
293 // CHECK-LABEL: @lt_i64(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
296 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
297 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
299 svint64_t
lt_i64(svint64_t a
, svint64_t b
) {
303 // CHECK-LABEL: @lt_u8(
304 // CHECK-NEXT: entry:
305 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
306 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
307 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
309 svint8_t
lt_u8(svuint8_t a
, svuint8_t b
) {
313 // CHECK-LABEL: @lt_u16(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
316 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
317 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
319 svint16_t
lt_u16(svuint16_t a
, svuint16_t b
) {
323 // CHECK-LABEL: @lt_u32(
324 // CHECK-NEXT: entry:
325 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
326 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
327 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
329 svint32_t
lt_u32(svuint32_t a
, svuint32_t b
) {
333 // CHECK-LABEL: @lt_u64(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
336 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
337 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
339 svint64_t
lt_u64(svuint64_t a
, svuint64_t b
) {
343 // CHECK-LABEL: @lt_f16(
344 // CHECK-NEXT: entry:
345 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
346 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
347 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
349 svint16_t
lt_f16(svfloat16_t a
, svfloat16_t b
) {
353 // CHECK-LABEL: @lt_f32(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
356 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
357 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
359 svint32_t
lt_f32(svfloat32_t a
, svfloat32_t b
) {
363 // CHECK-LABEL: @lt_f64(
364 // CHECK-NEXT: entry:
365 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
366 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
367 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
369 svint64_t
lt_f64(svfloat64_t a
, svfloat64_t b
) {
375 // CHECK-LABEL: @leq_bool(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
378 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
380 svbool_t
leq_bool(svbool_t a
, svbool_t b
) {
384 // CHECK-LABEL: @leq_i8(
385 // CHECK-NEXT: entry:
386 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
387 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
388 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
390 svint8_t
leq_i8(svint8_t a
, svint8_t b
) {
394 // CHECK-LABEL: @leq_i16(
395 // CHECK-NEXT: entry:
396 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
397 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
398 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
400 svint16_t
leq_i16(svint16_t a
, svint16_t b
) {
404 // CHECK-LABEL: @leq_i32(
405 // CHECK-NEXT: entry:
406 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
407 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
408 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
410 svint32_t
leq_i32(svint32_t a
, svint32_t b
) {
414 // CHECK-LABEL: @leq_i64(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
417 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
418 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
420 svint64_t
leq_i64(svint64_t a
, svint64_t b
) {
424 // CHECK-LABEL: @leq_u8(
425 // CHECK-NEXT: entry:
426 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
427 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
428 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
430 svint8_t
leq_u8(svuint8_t a
, svuint8_t b
) {
434 // CHECK-LABEL: @leq_u16(
435 // CHECK-NEXT: entry:
436 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
437 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
438 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
440 svint16_t
leq_u16(svuint16_t a
, svuint16_t b
) {
444 // CHECK-LABEL: @leq_u32(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
447 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
448 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
450 svint32_t
leq_u32(svuint32_t a
, svuint32_t b
) {
454 // CHECK-LABEL: @leq_u64(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
457 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
458 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
460 svint64_t
leq_u64(svuint64_t a
, svuint64_t b
) {
464 // CHECK-LABEL: @leq_f16(
465 // CHECK-NEXT: entry:
466 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
467 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
468 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
470 svint16_t
leq_f16(svfloat16_t a
, svfloat16_t b
) {
474 // CHECK-LABEL: @leq_f32(
475 // CHECK-NEXT: entry:
476 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
477 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
478 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
480 svint32_t
leq_f32(svfloat32_t a
, svfloat32_t b
) {
484 // CHECK-LABEL: @leq_f64(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
487 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
488 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
490 svint64_t
leq_f64(svfloat64_t a
, svfloat64_t b
) {
496 // CHECK-LABEL: @gt_bool(
497 // CHECK-NEXT: entry:
498 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
499 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
501 svbool_t
gt_bool(svbool_t a
, svbool_t b
) {
505 // CHECK-LABEL: @gt_i8(
506 // CHECK-NEXT: entry:
507 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
508 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
509 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
511 svint8_t
gt_i8(svint8_t a
, svint8_t b
) {
515 // CHECK-LABEL: @gt_i16(
516 // CHECK-NEXT: entry:
517 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
518 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
519 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
521 svint16_t
gt_i16(svint16_t a
, svint16_t b
) {
525 // CHECK-LABEL: @gt_i32(
526 // CHECK-NEXT: entry:
527 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
528 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
529 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
531 svint32_t
gt_i32(svint32_t a
, svint32_t b
) {
535 // CHECK-LABEL: @gt_i64(
536 // CHECK-NEXT: entry:
537 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
538 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
539 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
541 svint64_t
gt_i64(svint64_t a
, svint64_t b
) {
545 // CHECK-LABEL: @gt_u8(
546 // CHECK-NEXT: entry:
547 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
548 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
549 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
551 svint8_t
gt_u8(svuint8_t a
, svuint8_t b
) {
555 // CHECK-LABEL: @gt_u16(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
558 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
559 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
561 svint16_t
gt_u16(svuint16_t a
, svuint16_t b
) {
565 // CHECK-LABEL: @gt_u32(
566 // CHECK-NEXT: entry:
567 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
568 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
569 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
571 svint32_t
gt_u32(svuint32_t a
, svuint32_t b
) {
575 // CHECK-LABEL: @gt_u64(
576 // CHECK-NEXT: entry:
577 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
578 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
579 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
581 svint64_t
gt_u64(svuint64_t a
, svuint64_t b
) {
585 // CHECK-LABEL: @gt_f16(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
588 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
589 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
591 svint16_t
gt_f16(svfloat16_t a
, svfloat16_t b
) {
595 // CHECK-LABEL: @gt_f32(
596 // CHECK-NEXT: entry:
597 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
598 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
599 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
601 svint32_t
gt_f32(svfloat32_t a
, svfloat32_t b
) {
605 // CHECK-LABEL: @gt_f64(
606 // CHECK-NEXT: entry:
607 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
608 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
609 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
611 svint64_t
gt_f64(svfloat64_t a
, svfloat64_t b
) {
617 // CHECK-LABEL: @geq_bool(
618 // CHECK-NEXT: entry:
619 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
620 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
622 svbool_t
geq_bool(svbool_t a
, svbool_t b
) {
626 // CHECK-LABEL: @geq_i8(
627 // CHECK-NEXT: entry:
628 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
629 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
630 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
632 svint8_t
geq_i8(svint8_t a
, svint8_t b
) {
636 // CHECK-LABEL: @geq_i16(
637 // CHECK-NEXT: entry:
638 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
639 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
640 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
642 svint16_t
geq_i16(svint16_t a
, svint16_t b
) {
646 // CHECK-LABEL: @geq_i32(
647 // CHECK-NEXT: entry:
648 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
649 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
650 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
652 svint32_t
geq_i32(svint32_t a
, svint32_t b
) {
656 // CHECK-LABEL: @geq_i64(
657 // CHECK-NEXT: entry:
658 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
659 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
660 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
662 svint64_t
geq_i64(svint64_t a
, svint64_t b
) {
666 // CHECK-LABEL: @geq_u8(
667 // CHECK-NEXT: entry:
668 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
669 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
670 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
672 svint8_t
geq_u8(svuint8_t a
, svuint8_t b
) {
676 // CHECK-LABEL: @geq_u16(
677 // CHECK-NEXT: entry:
678 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
679 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
680 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
682 svint16_t
geq_u16(svuint16_t a
, svuint16_t b
) {
686 // CHECK-LABEL: @geq_u32(
687 // CHECK-NEXT: entry:
688 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
689 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
690 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
692 svint32_t
geq_u32(svuint32_t a
, svuint32_t b
) {
696 // CHECK-LABEL: @geq_u64(
697 // CHECK-NEXT: entry:
698 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
699 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
700 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
702 svint64_t
geq_u64(svuint64_t a
, svuint64_t b
) {
706 // CHECK-LABEL: @geq_f16(
707 // CHECK-NEXT: entry:
708 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
709 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
710 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
712 svint16_t
geq_f16(svfloat16_t a
, svfloat16_t b
) {
716 // CHECK-LABEL: @geq_f32(
717 // CHECK-NEXT: entry:
718 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
719 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
720 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
722 svint32_t
geq_f32(svfloat32_t a
, svfloat32_t b
) {
726 // CHECK-LABEL: @geq_f64(
727 // CHECK-NEXT: entry:
728 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
729 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
730 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
732 svint64_t
geq_f64(svfloat64_t a
, svfloat64_t b
) {