Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-vector-compare-ops.c
blob6c0fe134f56aad8482553d0f06b013b227b30029
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
3 // RUN: -disable-O0-optnone \
4 // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s
6 // REQUIRES: aarch64-registered-target
8 #include <arm_sve.h>
10 // EQ
12 // CHECK-LABEL: @eq_bool(
13 // CHECK-NEXT: entry:
14 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
15 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
17 svbool_t eq_bool(svbool_t a, svbool_t b) {
18 return a == b;
21 // CHECK-LABEL: @eq_i8(
22 // CHECK-NEXT: entry:
23 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
24 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
25 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
27 svint8_t eq_i8(svint8_t a, svint8_t b) {
28 return a == b;
31 // CHECK-LABEL: @eq_i16(
32 // CHECK-NEXT: entry:
33 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
34 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
35 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
37 svint16_t eq_i16(svint16_t a, svint16_t b) {
38 return a == b;
41 // CHECK-LABEL: @eq_i32(
42 // CHECK-NEXT: entry:
43 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
44 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
45 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
47 svint32_t eq_i32(svint32_t a, svint32_t b) {
48 return a == b;
51 // CHECK-LABEL: @eq_i64(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
54 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
55 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
57 svint64_t eq_i64(svint64_t a, svint64_t b) {
58 return a == b;
61 // CHECK-LABEL: @eq_u8(
62 // CHECK-NEXT: entry:
63 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
64 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
65 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
67 svint8_t eq_u8(svuint8_t a, svuint8_t b) {
68 return a == b;
71 // CHECK-LABEL: @eq_u16(
72 // CHECK-NEXT: entry:
73 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
74 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
75 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
77 svint16_t eq_u16(svuint16_t a, svuint16_t b) {
78 return a == b;
81 // CHECK-LABEL: @eq_u32(
82 // CHECK-NEXT: entry:
83 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
84 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
85 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
87 svint32_t eq_u32(svuint32_t a, svuint32_t b) {
88 return a == b;
91 // CHECK-LABEL: @eq_u64(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
94 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
95 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
97 svint64_t eq_u64(svuint64_t a, svuint64_t b) {
98 return a == b;
101 // CHECK-LABEL: @eq_f16(
102 // CHECK-NEXT: entry:
103 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
104 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
105 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
107 svint16_t eq_f16(svfloat16_t a, svfloat16_t b) {
108 return a == b;
111 // CHECK-LABEL: @eq_f32(
112 // CHECK-NEXT: entry:
113 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
114 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
115 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
117 svint32_t eq_f32(svfloat32_t a, svfloat32_t b) {
118 return a == b;
121 // CHECK-LABEL: @eq_f64(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
124 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
125 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
127 svint64_t eq_f64(svfloat64_t a, svfloat64_t b) {
128 return a == b;
131 // NEQ
133 // CHECK-LABEL: @neq_bool(
134 // CHECK-NEXT: entry:
135 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
136 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
138 svbool_t neq_bool(svbool_t a, svbool_t b) {
139 return a != b;
142 // CHECK-LABEL: @neq_i8(
143 // CHECK-NEXT: entry:
144 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
145 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
146 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
148 svint8_t neq_i8(svint8_t a, svint8_t b) {
149 return a != b;
152 // CHECK-LABEL: @neq_i16(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
155 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
156 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
158 svint16_t neq_i16(svint16_t a, svint16_t b) {
159 return a != b;
162 // CHECK-LABEL: @neq_i32(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
165 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
166 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
168 svint32_t neq_i32(svint32_t a, svint32_t b) {
169 return a != b;
172 // CHECK-LABEL: @neq_i64(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
175 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
176 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
178 svint64_t neq_i64(svint64_t a, svint64_t b) {
179 return a != b;
182 // CHECK-LABEL: @neq_u8(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
185 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
186 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
188 svint8_t neq_u8(svuint8_t a, svuint8_t b) {
189 return a != b;
192 // CHECK-LABEL: @neq_u16(
193 // CHECK-NEXT: entry:
194 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
195 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
196 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
198 svint16_t neq_u16(svuint16_t a, svuint16_t b) {
199 return a != b;
202 // CHECK-LABEL: @neq_u32(
203 // CHECK-NEXT: entry:
204 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
205 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
206 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
208 svint32_t neq_u32(svuint32_t a, svuint32_t b) {
209 return a != b;
212 // CHECK-LABEL: @neq_u64(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
215 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
216 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
218 svint64_t neq_u64(svuint64_t a, svuint64_t b) {
219 return a != b;
222 // CHECK-LABEL: @neq_f16(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
225 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
226 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
228 svint16_t neq_f16(svfloat16_t a, svfloat16_t b) {
229 return a != b;
232 // CHECK-LABEL: @neq_f32(
233 // CHECK-NEXT: entry:
234 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
235 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
236 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
238 svint32_t neq_f32(svfloat32_t a, svfloat32_t b) {
239 return a != b;
242 // CHECK-LABEL: @neq_f64(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[CMP:%.*]] = fcmp une <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
245 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
246 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
248 svint64_t neq_f64(svfloat64_t a, svfloat64_t b) {
249 return a != b;
252 // LT
254 // CHECK-LABEL: @lt_bool(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
257 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
259 svbool_t lt_bool(svbool_t a, svbool_t b) {
260 return a < b;
263 // CHECK-LABEL: @lt_i8(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
266 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
267 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
269 svint8_t lt_i8(svint8_t a, svint8_t b) {
270 return a < b;
273 // CHECK-LABEL: @lt_i16(
274 // CHECK-NEXT: entry:
275 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
276 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
277 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
279 svint16_t lt_i16(svint16_t a, svint16_t b) {
280 return a < b;
283 // CHECK-LABEL: @lt_i32(
284 // CHECK-NEXT: entry:
285 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
286 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
287 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
289 svint32_t lt_i32(svint32_t a, svint32_t b) {
290 return a < b;
293 // CHECK-LABEL: @lt_i64(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
296 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
297 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
299 svint64_t lt_i64(svint64_t a, svint64_t b) {
300 return a < b;
303 // CHECK-LABEL: @lt_u8(
304 // CHECK-NEXT: entry:
305 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
306 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
307 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
309 svint8_t lt_u8(svuint8_t a, svuint8_t b) {
310 return a < b;
313 // CHECK-LABEL: @lt_u16(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
316 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
317 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
319 svint16_t lt_u16(svuint16_t a, svuint16_t b) {
320 return a < b;
323 // CHECK-LABEL: @lt_u32(
324 // CHECK-NEXT: entry:
325 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
326 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
327 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
329 svint32_t lt_u32(svuint32_t a, svuint32_t b) {
330 return a < b;
333 // CHECK-LABEL: @lt_u64(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
336 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
337 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
339 svint64_t lt_u64(svuint64_t a, svuint64_t b) {
340 return a < b;
343 // CHECK-LABEL: @lt_f16(
344 // CHECK-NEXT: entry:
345 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
346 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
347 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
349 svint16_t lt_f16(svfloat16_t a, svfloat16_t b) {
350 return a < b;
353 // CHECK-LABEL: @lt_f32(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
356 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
357 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
359 svint32_t lt_f32(svfloat32_t a, svfloat32_t b) {
360 return a < b;
363 // CHECK-LABEL: @lt_f64(
364 // CHECK-NEXT: entry:
365 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
366 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
367 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
369 svint64_t lt_f64(svfloat64_t a, svfloat64_t b) {
370 return a < b;
373 // LEQ
375 // CHECK-LABEL: @leq_bool(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
378 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
380 svbool_t leq_bool(svbool_t a, svbool_t b) {
381 return a <= b;
384 // CHECK-LABEL: @leq_i8(
385 // CHECK-NEXT: entry:
386 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
387 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
388 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
390 svint8_t leq_i8(svint8_t a, svint8_t b) {
391 return a <= b;
394 // CHECK-LABEL: @leq_i16(
395 // CHECK-NEXT: entry:
396 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
397 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
398 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
400 svint16_t leq_i16(svint16_t a, svint16_t b) {
401 return a <= b;
404 // CHECK-LABEL: @leq_i32(
405 // CHECK-NEXT: entry:
406 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
407 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
408 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
410 svint32_t leq_i32(svint32_t a, svint32_t b) {
411 return a <= b;
414 // CHECK-LABEL: @leq_i64(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
417 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
418 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
420 svint64_t leq_i64(svint64_t a, svint64_t b) {
421 return a <= b;
424 // CHECK-LABEL: @leq_u8(
425 // CHECK-NEXT: entry:
426 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
427 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
428 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
430 svint8_t leq_u8(svuint8_t a, svuint8_t b) {
431 return a <= b;
434 // CHECK-LABEL: @leq_u16(
435 // CHECK-NEXT: entry:
436 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
437 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
438 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
440 svint16_t leq_u16(svuint16_t a, svuint16_t b) {
441 return a <= b;
444 // CHECK-LABEL: @leq_u32(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
447 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
448 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
450 svint32_t leq_u32(svuint32_t a, svuint32_t b) {
451 return a <= b;
454 // CHECK-LABEL: @leq_u64(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[CMP:%.*]] = icmp ule <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
457 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
458 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
460 svint64_t leq_u64(svuint64_t a, svuint64_t b) {
461 return a <= b;
464 // CHECK-LABEL: @leq_f16(
465 // CHECK-NEXT: entry:
466 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
467 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
468 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
470 svint16_t leq_f16(svfloat16_t a, svfloat16_t b) {
471 return a <= b;
474 // CHECK-LABEL: @leq_f32(
475 // CHECK-NEXT: entry:
476 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
477 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
478 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
480 svint32_t leq_f32(svfloat32_t a, svfloat32_t b) {
481 return a <= b;
484 // CHECK-LABEL: @leq_f64(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
487 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
488 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
490 svint64_t leq_f64(svfloat64_t a, svfloat64_t b) {
491 return a <= b;
494 // GT
496 // CHECK-LABEL: @gt_bool(
497 // CHECK-NEXT: entry:
498 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
499 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
501 svbool_t gt_bool(svbool_t a, svbool_t b) {
502 return a > b;
505 // CHECK-LABEL: @gt_i8(
506 // CHECK-NEXT: entry:
507 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
508 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
509 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
511 svint8_t gt_i8(svint8_t a, svint8_t b) {
512 return a > b;
515 // CHECK-LABEL: @gt_i16(
516 // CHECK-NEXT: entry:
517 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
518 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
519 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
521 svint16_t gt_i16(svint16_t a, svint16_t b) {
522 return a > b;
525 // CHECK-LABEL: @gt_i32(
526 // CHECK-NEXT: entry:
527 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
528 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
529 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
531 svint32_t gt_i32(svint32_t a, svint32_t b) {
532 return a > b;
535 // CHECK-LABEL: @gt_i64(
536 // CHECK-NEXT: entry:
537 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
538 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
539 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
541 svint64_t gt_i64(svint64_t a, svint64_t b) {
542 return a > b;
545 // CHECK-LABEL: @gt_u8(
546 // CHECK-NEXT: entry:
547 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
548 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
549 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
551 svint8_t gt_u8(svuint8_t a, svuint8_t b) {
552 return a > b;
555 // CHECK-LABEL: @gt_u16(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
558 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
559 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
561 svint16_t gt_u16(svuint16_t a, svuint16_t b) {
562 return a > b;
565 // CHECK-LABEL: @gt_u32(
566 // CHECK-NEXT: entry:
567 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
568 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
569 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
571 svint32_t gt_u32(svuint32_t a, svuint32_t b) {
572 return a > b;
575 // CHECK-LABEL: @gt_u64(
576 // CHECK-NEXT: entry:
577 // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
578 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
579 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
581 svint64_t gt_u64(svuint64_t a, svuint64_t b) {
582 return a > b;
585 // CHECK-LABEL: @gt_f16(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
588 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
589 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
591 svint16_t gt_f16(svfloat16_t a, svfloat16_t b) {
592 return a > b;
595 // CHECK-LABEL: @gt_f32(
596 // CHECK-NEXT: entry:
597 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
598 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
599 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
601 svint32_t gt_f32(svfloat32_t a, svfloat32_t b) {
602 return a > b;
605 // CHECK-LABEL: @gt_f64(
606 // CHECK-NEXT: entry:
607 // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
608 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
609 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
611 svint64_t gt_f64(svfloat64_t a, svfloat64_t b) {
612 return a > b;
615 // GEQ
617 // CHECK-LABEL: @geq_bool(
618 // CHECK-NEXT: entry:
619 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
620 // CHECK-NEXT: ret <vscale x 16 x i1> [[CMP]]
622 svbool_t geq_bool(svbool_t a, svbool_t b) {
623 return a >= b;
626 // CHECK-LABEL: @geq_i8(
627 // CHECK-NEXT: entry:
628 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
629 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
630 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
632 svint8_t geq_i8(svint8_t a, svint8_t b) {
633 return a >= b;
636 // CHECK-LABEL: @geq_i16(
637 // CHECK-NEXT: entry:
638 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
639 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
640 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
642 svint16_t geq_i16(svint16_t a, svint16_t b) {
643 return a >= b;
646 // CHECK-LABEL: @geq_i32(
647 // CHECK-NEXT: entry:
648 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
649 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
650 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
652 svint32_t geq_i32(svint32_t a, svint32_t b) {
653 return a >= b;
656 // CHECK-LABEL: @geq_i64(
657 // CHECK-NEXT: entry:
658 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
659 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
660 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
662 svint64_t geq_i64(svint64_t a, svint64_t b) {
663 return a >= b;
666 // CHECK-LABEL: @geq_u8(
667 // CHECK-NEXT: entry:
668 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
669 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
670 // CHECK-NEXT: ret <vscale x 16 x i8> [[CONV]]
672 svint8_t geq_u8(svuint8_t a, svuint8_t b) {
673 return a >= b;
676 // CHECK-LABEL: @geq_u16(
677 // CHECK-NEXT: entry:
678 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
679 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
680 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
682 svint16_t geq_u16(svuint16_t a, svuint16_t b) {
683 return a >= b;
686 // CHECK-LABEL: @geq_u32(
687 // CHECK-NEXT: entry:
688 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
689 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
690 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
692 svint32_t geq_u32(svuint32_t a, svuint32_t b) {
693 return a >= b;
696 // CHECK-LABEL: @geq_u64(
697 // CHECK-NEXT: entry:
698 // CHECK-NEXT: [[CMP:%.*]] = icmp uge <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
699 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
700 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
702 svint64_t geq_u64(svuint64_t a, svuint64_t b) {
703 return a >= b;
706 // CHECK-LABEL: @geq_f16(
707 // CHECK-NEXT: entry:
708 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
709 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
710 // CHECK-NEXT: ret <vscale x 8 x i16> [[CONV]]
712 svint16_t geq_f16(svfloat16_t a, svfloat16_t b) {
713 return a >= b;
716 // CHECK-LABEL: @geq_f32(
717 // CHECK-NEXT: entry:
718 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
719 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
720 // CHECK-NEXT: ret <vscale x 4 x i32> [[CONV]]
722 svint32_t geq_f32(svfloat32_t a, svfloat32_t b) {
723 return a >= b;
726 // CHECK-LABEL: @geq_f64(
727 // CHECK-NEXT: entry:
728 // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
729 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
730 // CHECK-NEXT: ret <vscale x 2 x i64> [[CONV]]
732 svint64_t geq_f64(svfloat64_t a, svfloat64_t b) {
733 return a >= b;