1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
5 define void @v8f32_fcmp_true(ptr %res, ptr %a0, ptr %a1) nounwind {
6 ; CHECK-LABEL: v8f32_fcmp_true:
8 ; CHECK-NEXT: xvrepli.b $xr0, -1
9 ; CHECK-NEXT: xvst $xr0, $a0, 0
11 %v0 = load <8 x float>, ptr %a0
12 %v1 = load <8 x float>, ptr %a1
13 %cmp = fcmp true <8 x float> %v0, %v1
14 %ext = sext <8 x i1> %cmp to <8 x i32>
15 store <8 x i32> %ext, ptr %res
20 define void @v4f64_fcmp_false(ptr %res, ptr %a0, ptr %a1) nounwind {
21 ; CHECK-LABEL: v4f64_fcmp_false:
23 ; CHECK-NEXT: xvrepli.b $xr0, 0
24 ; CHECK-NEXT: xvst $xr0, $a0, 0
26 %v0 = load <4 x double>, ptr %a0
27 %v1 = load <4 x double>, ptr %a1
28 %cmp = fcmp false <4 x double> %v0, %v1
29 %ext = sext <4 x i1> %cmp to <4 x i64>
30 store <4 x i64> %ext, ptr %res
35 define void @v8f32_fcmp_oeq(ptr %res, ptr %a0, ptr %a1) nounwind {
36 ; CHECK-LABEL: v8f32_fcmp_oeq:
38 ; CHECK-NEXT: xvld $xr0, $a1, 0
39 ; CHECK-NEXT: xvld $xr1, $a2, 0
40 ; CHECK-NEXT: xvfcmp.ceq.s $xr0, $xr0, $xr1
41 ; CHECK-NEXT: xvst $xr0, $a0, 0
43 %v0 = load <8 x float>, ptr %a0
44 %v1 = load <8 x float>, ptr %a1
45 %cmp = fcmp oeq <8 x float> %v0, %v1
46 %ext = sext <8 x i1> %cmp to <8 x i32>
47 store <8 x i32> %ext, ptr %res
51 define void @v4f64_fcmp_oeq(ptr %res, ptr %a0, ptr %a1) nounwind {
52 ; CHECK-LABEL: v4f64_fcmp_oeq:
54 ; CHECK-NEXT: xvld $xr0, $a1, 0
55 ; CHECK-NEXT: xvld $xr1, $a2, 0
56 ; CHECK-NEXT: xvfcmp.ceq.d $xr0, $xr0, $xr1
57 ; CHECK-NEXT: xvst $xr0, $a0, 0
59 %v0 = load <4 x double>, ptr %a0
60 %v1 = load <4 x double>, ptr %a1
61 %cmp = fcmp oeq <4 x double> %v0, %v1
62 %ext = sext <4 x i1> %cmp to <4 x i64>
63 store <4 x i64> %ext, ptr %res
68 define void @v8f32_fcmp_ueq(ptr %res, ptr %a0, ptr %a1) nounwind {
69 ; CHECK-LABEL: v8f32_fcmp_ueq:
71 ; CHECK-NEXT: xvld $xr0, $a1, 0
72 ; CHECK-NEXT: xvld $xr1, $a2, 0
73 ; CHECK-NEXT: xvfcmp.cueq.s $xr0, $xr0, $xr1
74 ; CHECK-NEXT: xvst $xr0, $a0, 0
76 %v0 = load <8 x float>, ptr %a0
77 %v1 = load <8 x float>, ptr %a1
78 %cmp = fcmp ueq <8 x float> %v0, %v1
79 %ext = sext <8 x i1> %cmp to <8 x i32>
80 store <8 x i32> %ext, ptr %res
84 define void @v4f64_fcmp_ueq(ptr %res, ptr %a0, ptr %a1) nounwind {
85 ; CHECK-LABEL: v4f64_fcmp_ueq:
87 ; CHECK-NEXT: xvld $xr0, $a1, 0
88 ; CHECK-NEXT: xvld $xr1, $a2, 0
89 ; CHECK-NEXT: xvfcmp.cueq.d $xr0, $xr0, $xr1
90 ; CHECK-NEXT: xvst $xr0, $a0, 0
92 %v0 = load <4 x double>, ptr %a0
93 %v1 = load <4 x double>, ptr %a1
94 %cmp = fcmp ueq <4 x double> %v0, %v1
95 %ext = sext <4 x i1> %cmp to <4 x i64>
96 store <4 x i64> %ext, ptr %res
101 define void @v8f32_fcmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
102 ; CHECK-LABEL: v8f32_fcmp_eq:
104 ; CHECK-NEXT: xvld $xr0, $a1, 0
105 ; CHECK-NEXT: xvld $xr1, $a2, 0
106 ; CHECK-NEXT: xvfcmp.ceq.s $xr0, $xr0, $xr1
107 ; CHECK-NEXT: xvst $xr0, $a0, 0
109 %v0 = load <8 x float>, ptr %a0
110 %v1 = load <8 x float>, ptr %a1
111 %cmp = fcmp fast oeq <8 x float> %v0, %v1
112 %ext = sext <8 x i1> %cmp to <8 x i32>
113 store <8 x i32> %ext, ptr %res
117 define void @v4f64_fcmp_eq(ptr %res, ptr %a0, ptr %a1) nounwind {
118 ; CHECK-LABEL: v4f64_fcmp_eq:
120 ; CHECK-NEXT: xvld $xr0, $a1, 0
121 ; CHECK-NEXT: xvld $xr1, $a2, 0
122 ; CHECK-NEXT: xvfcmp.ceq.d $xr0, $xr0, $xr1
123 ; CHECK-NEXT: xvst $xr0, $a0, 0
125 %v0 = load <4 x double>, ptr %a0
126 %v1 = load <4 x double>, ptr %a1
127 %cmp = fcmp fast ueq <4 x double> %v0, %v1
128 %ext = sext <4 x i1> %cmp to <4 x i64>
129 store <4 x i64> %ext, ptr %res
134 define void @v8f32_fcmp_ole(ptr %res, ptr %a0, ptr %a1) nounwind {
135 ; CHECK-LABEL: v8f32_fcmp_ole:
137 ; CHECK-NEXT: xvld $xr0, $a1, 0
138 ; CHECK-NEXT: xvld $xr1, $a2, 0
139 ; CHECK-NEXT: xvfcmp.cle.s $xr0, $xr0, $xr1
140 ; CHECK-NEXT: xvst $xr0, $a0, 0
142 %v0 = load <8 x float>, ptr %a0
143 %v1 = load <8 x float>, ptr %a1
144 %cmp = fcmp ole <8 x float> %v0, %v1
145 %ext = sext <8 x i1> %cmp to <8 x i32>
146 store <8 x i32> %ext, ptr %res
150 define void @v4f64_fcmp_ole(ptr %res, ptr %a0, ptr %a1) nounwind {
151 ; CHECK-LABEL: v4f64_fcmp_ole:
153 ; CHECK-NEXT: xvld $xr0, $a1, 0
154 ; CHECK-NEXT: xvld $xr1, $a2, 0
155 ; CHECK-NEXT: xvfcmp.cle.d $xr0, $xr0, $xr1
156 ; CHECK-NEXT: xvst $xr0, $a0, 0
158 %v0 = load <4 x double>, ptr %a0
159 %v1 = load <4 x double>, ptr %a1
160 %cmp = fcmp ole <4 x double> %v0, %v1
161 %ext = sext <4 x i1> %cmp to <4 x i64>
162 store <4 x i64> %ext, ptr %res
167 define void @v8f32_fcmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
168 ; CHECK-LABEL: v8f32_fcmp_ule:
170 ; CHECK-NEXT: xvld $xr0, $a1, 0
171 ; CHECK-NEXT: xvld $xr1, $a2, 0
172 ; CHECK-NEXT: xvfcmp.cule.s $xr0, $xr0, $xr1
173 ; CHECK-NEXT: xvst $xr0, $a0, 0
175 %v0 = load <8 x float>, ptr %a0
176 %v1 = load <8 x float>, ptr %a1
177 %cmp = fcmp ule <8 x float> %v0, %v1
178 %ext = sext <8 x i1> %cmp to <8 x i32>
179 store <8 x i32> %ext, ptr %res
183 define void @v4f64_fcmp_ule(ptr %res, ptr %a0, ptr %a1) nounwind {
184 ; CHECK-LABEL: v4f64_fcmp_ule:
186 ; CHECK-NEXT: xvld $xr0, $a1, 0
187 ; CHECK-NEXT: xvld $xr1, $a2, 0
188 ; CHECK-NEXT: xvfcmp.cule.d $xr0, $xr0, $xr1
189 ; CHECK-NEXT: xvst $xr0, $a0, 0
191 %v0 = load <4 x double>, ptr %a0
192 %v1 = load <4 x double>, ptr %a1
193 %cmp = fcmp ule <4 x double> %v0, %v1
194 %ext = sext <4 x i1> %cmp to <4 x i64>
195 store <4 x i64> %ext, ptr %res
200 define void @v8f32_fcmp_le(ptr %res, ptr %a0, ptr %a1) nounwind {
201 ; CHECK-LABEL: v8f32_fcmp_le:
203 ; CHECK-NEXT: xvld $xr0, $a1, 0
204 ; CHECK-NEXT: xvld $xr1, $a2, 0
205 ; CHECK-NEXT: xvfcmp.cle.s $xr0, $xr0, $xr1
206 ; CHECK-NEXT: xvst $xr0, $a0, 0
208 %v0 = load <8 x float>, ptr %a0
209 %v1 = load <8 x float>, ptr %a1
210 %cmp = fcmp fast ole <8 x float> %v0, %v1
211 %ext = sext <8 x i1> %cmp to <8 x i32>
212 store <8 x i32> %ext, ptr %res
216 define void @v4f64_fcmp_le(ptr %res, ptr %a0, ptr %a1) nounwind {
217 ; CHECK-LABEL: v4f64_fcmp_le:
219 ; CHECK-NEXT: xvld $xr0, $a1, 0
220 ; CHECK-NEXT: xvld $xr1, $a2, 0
221 ; CHECK-NEXT: xvfcmp.cle.d $xr0, $xr0, $xr1
222 ; CHECK-NEXT: xvst $xr0, $a0, 0
224 %v0 = load <4 x double>, ptr %a0
225 %v1 = load <4 x double>, ptr %a1
226 %cmp = fcmp fast ule <4 x double> %v0, %v1
227 %ext = sext <4 x i1> %cmp to <4 x i64>
228 store <4 x i64> %ext, ptr %res
233 define void @v8f32_fcmp_olt(ptr %res, ptr %a0, ptr %a1) nounwind {
234 ; CHECK-LABEL: v8f32_fcmp_olt:
236 ; CHECK-NEXT: xvld $xr0, $a1, 0
237 ; CHECK-NEXT: xvld $xr1, $a2, 0
238 ; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr0, $xr1
239 ; CHECK-NEXT: xvst $xr0, $a0, 0
241 %v0 = load <8 x float>, ptr %a0
242 %v1 = load <8 x float>, ptr %a1
243 %cmp = fcmp olt <8 x float> %v0, %v1
244 %ext = sext <8 x i1> %cmp to <8 x i32>
245 store <8 x i32> %ext, ptr %res
249 define void @v4f64_fcmp_olt(ptr %res, ptr %a0, ptr %a1) nounwind {
250 ; CHECK-LABEL: v4f64_fcmp_olt:
252 ; CHECK-NEXT: xvld $xr0, $a1, 0
253 ; CHECK-NEXT: xvld $xr1, $a2, 0
254 ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr0, $xr1
255 ; CHECK-NEXT: xvst $xr0, $a0, 0
257 %v0 = load <4 x double>, ptr %a0
258 %v1 = load <4 x double>, ptr %a1
259 %cmp = fcmp olt <4 x double> %v0, %v1
260 %ext = sext <4 x i1> %cmp to <4 x i64>
261 store <4 x i64> %ext, ptr %res
266 define void @v8f32_fcmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
267 ; CHECK-LABEL: v8f32_fcmp_ult:
269 ; CHECK-NEXT: xvld $xr0, $a1, 0
270 ; CHECK-NEXT: xvld $xr1, $a2, 0
271 ; CHECK-NEXT: xvfcmp.cult.s $xr0, $xr0, $xr1
272 ; CHECK-NEXT: xvst $xr0, $a0, 0
274 %v0 = load <8 x float>, ptr %a0
275 %v1 = load <8 x float>, ptr %a1
276 %cmp = fcmp ult <8 x float> %v0, %v1
277 %ext = sext <8 x i1> %cmp to <8 x i32>
278 store <8 x i32> %ext, ptr %res
282 define void @v4f64_fcmp_ult(ptr %res, ptr %a0, ptr %a1) nounwind {
283 ; CHECK-LABEL: v4f64_fcmp_ult:
285 ; CHECK-NEXT: xvld $xr0, $a1, 0
286 ; CHECK-NEXT: xvld $xr1, $a2, 0
287 ; CHECK-NEXT: xvfcmp.cult.d $xr0, $xr0, $xr1
288 ; CHECK-NEXT: xvst $xr0, $a0, 0
290 %v0 = load <4 x double>, ptr %a0
291 %v1 = load <4 x double>, ptr %a1
292 %cmp = fcmp ult <4 x double> %v0, %v1
293 %ext = sext <4 x i1> %cmp to <4 x i64>
294 store <4 x i64> %ext, ptr %res
299 define void @v8f32_fcmp_lt(ptr %res, ptr %a0, ptr %a1) nounwind {
300 ; CHECK-LABEL: v8f32_fcmp_lt:
302 ; CHECK-NEXT: xvld $xr0, $a1, 0
303 ; CHECK-NEXT: xvld $xr1, $a2, 0
304 ; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr0, $xr1
305 ; CHECK-NEXT: xvst $xr0, $a0, 0
307 %v0 = load <8 x float>, ptr %a0
308 %v1 = load <8 x float>, ptr %a1
309 %cmp = fcmp fast olt <8 x float> %v0, %v1
310 %ext = sext <8 x i1> %cmp to <8 x i32>
311 store <8 x i32> %ext, ptr %res
315 define void @v4f64_fcmp_lt(ptr %res, ptr %a0, ptr %a1) nounwind {
316 ; CHECK-LABEL: v4f64_fcmp_lt:
318 ; CHECK-NEXT: xvld $xr0, $a1, 0
319 ; CHECK-NEXT: xvld $xr1, $a2, 0
320 ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr0, $xr1
321 ; CHECK-NEXT: xvst $xr0, $a0, 0
323 %v0 = load <4 x double>, ptr %a0
324 %v1 = load <4 x double>, ptr %a1
325 %cmp = fcmp fast ult <4 x double> %v0, %v1
326 %ext = sext <4 x i1> %cmp to <4 x i64>
327 store <4 x i64> %ext, ptr %res
332 define void @v8f32_fcmp_one(ptr %res, ptr %a0, ptr %a1) nounwind {
333 ; CHECK-LABEL: v8f32_fcmp_one:
335 ; CHECK-NEXT: xvld $xr0, $a1, 0
336 ; CHECK-NEXT: xvld $xr1, $a2, 0
337 ; CHECK-NEXT: xvfcmp.cne.s $xr0, $xr0, $xr1
338 ; CHECK-NEXT: xvst $xr0, $a0, 0
340 %v0 = load <8 x float>, ptr %a0
341 %v1 = load <8 x float>, ptr %a1
342 %cmp = fcmp one <8 x float> %v0, %v1
343 %ext = sext <8 x i1> %cmp to <8 x i32>
344 store <8 x i32> %ext, ptr %res
348 define void @v4f64_fcmp_one(ptr %res, ptr %a0, ptr %a1) nounwind {
349 ; CHECK-LABEL: v4f64_fcmp_one:
351 ; CHECK-NEXT: xvld $xr0, $a1, 0
352 ; CHECK-NEXT: xvld $xr1, $a2, 0
353 ; CHECK-NEXT: xvfcmp.cne.d $xr0, $xr0, $xr1
354 ; CHECK-NEXT: xvst $xr0, $a0, 0
356 %v0 = load <4 x double>, ptr %a0
357 %v1 = load <4 x double>, ptr %a1
358 %cmp = fcmp one <4 x double> %v0, %v1
359 %ext = sext <4 x i1> %cmp to <4 x i64>
360 store <4 x i64> %ext, ptr %res
365 define void @v8f32_fcmp_une(ptr %res, ptr %a0, ptr %a1) nounwind {
366 ; CHECK-LABEL: v8f32_fcmp_une:
368 ; CHECK-NEXT: xvld $xr0, $a1, 0
369 ; CHECK-NEXT: xvld $xr1, $a2, 0
370 ; CHECK-NEXT: xvfcmp.cune.s $xr0, $xr0, $xr1
371 ; CHECK-NEXT: xvst $xr0, $a0, 0
373 %v0 = load <8 x float>, ptr %a0
374 %v1 = load <8 x float>, ptr %a1
375 %cmp = fcmp une <8 x float> %v0, %v1
376 %ext = sext <8 x i1> %cmp to <8 x i32>
377 store <8 x i32> %ext, ptr %res
381 define void @v4f64_fcmp_une(ptr %res, ptr %a0, ptr %a1) nounwind {
382 ; CHECK-LABEL: v4f64_fcmp_une:
384 ; CHECK-NEXT: xvld $xr0, $a1, 0
385 ; CHECK-NEXT: xvld $xr1, $a2, 0
386 ; CHECK-NEXT: xvfcmp.cune.d $xr0, $xr0, $xr1
387 ; CHECK-NEXT: xvst $xr0, $a0, 0
389 %v0 = load <4 x double>, ptr %a0
390 %v1 = load <4 x double>, ptr %a1
391 %cmp = fcmp une <4 x double> %v0, %v1
392 %ext = sext <4 x i1> %cmp to <4 x i64>
393 store <4 x i64> %ext, ptr %res
398 define void @v8f32_fcmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
399 ; CHECK-LABEL: v8f32_fcmp_ne:
401 ; CHECK-NEXT: xvld $xr0, $a1, 0
402 ; CHECK-NEXT: xvld $xr1, $a2, 0
403 ; CHECK-NEXT: xvfcmp.cne.s $xr0, $xr0, $xr1
404 ; CHECK-NEXT: xvst $xr0, $a0, 0
406 %v0 = load <8 x float>, ptr %a0
407 %v1 = load <8 x float>, ptr %a1
408 %cmp = fcmp fast one <8 x float> %v0, %v1
409 %ext = sext <8 x i1> %cmp to <8 x i32>
410 store <8 x i32> %ext, ptr %res
414 define void @v4f64_fcmp_ne(ptr %res, ptr %a0, ptr %a1) nounwind {
415 ; CHECK-LABEL: v4f64_fcmp_ne:
417 ; CHECK-NEXT: xvld $xr0, $a1, 0
418 ; CHECK-NEXT: xvld $xr1, $a2, 0
419 ; CHECK-NEXT: xvfcmp.cne.d $xr0, $xr0, $xr1
420 ; CHECK-NEXT: xvst $xr0, $a0, 0
422 %v0 = load <4 x double>, ptr %a0
423 %v1 = load <4 x double>, ptr %a1
424 %cmp = fcmp fast une <4 x double> %v0, %v1
425 %ext = sext <4 x i1> %cmp to <4 x i64>
426 store <4 x i64> %ext, ptr %res
431 define void @v8f32_fcmp_ord(ptr %res, ptr %a0, ptr %a1) nounwind {
432 ; CHECK-LABEL: v8f32_fcmp_ord:
434 ; CHECK-NEXT: xvld $xr0, $a1, 0
435 ; CHECK-NEXT: xvld $xr1, $a2, 0
436 ; CHECK-NEXT: xvfcmp.cor.s $xr0, $xr0, $xr1
437 ; CHECK-NEXT: xvst $xr0, $a0, 0
439 %v0 = load <8 x float>, ptr %a0
440 %v1 = load <8 x float>, ptr %a1
441 %cmp = fcmp ord <8 x float> %v0, %v1
442 %ext = sext <8 x i1> %cmp to <8 x i32>
443 store <8 x i32> %ext, ptr %res
447 define void @v4f64_fcmp_ord(ptr %res, ptr %a0, ptr %a1) nounwind {
448 ; CHECK-LABEL: v4f64_fcmp_ord:
450 ; CHECK-NEXT: xvld $xr0, $a1, 0
451 ; CHECK-NEXT: xvld $xr1, $a2, 0
452 ; CHECK-NEXT: xvfcmp.cor.d $xr0, $xr0, $xr1
453 ; CHECK-NEXT: xvst $xr0, $a0, 0
455 %v0 = load <4 x double>, ptr %a0
456 %v1 = load <4 x double>, ptr %a1
457 %cmp = fcmp ord <4 x double> %v0, %v1
458 %ext = sext <4 x i1> %cmp to <4 x i64>
459 store <4 x i64> %ext, ptr %res
464 define void @v8f32_fcmp_uno(ptr %res, ptr %a0, ptr %a1) nounwind {
465 ; CHECK-LABEL: v8f32_fcmp_uno:
467 ; CHECK-NEXT: xvld $xr0, $a1, 0
468 ; CHECK-NEXT: xvld $xr1, $a2, 0
469 ; CHECK-NEXT: xvfcmp.cun.s $xr0, $xr0, $xr1
470 ; CHECK-NEXT: xvst $xr0, $a0, 0
472 %v0 = load <8 x float>, ptr %a0
473 %v1 = load <8 x float>, ptr %a1
474 %cmp = fcmp uno <8 x float> %v0, %v1
475 %ext = sext <8 x i1> %cmp to <8 x i32>
476 store <8 x i32> %ext, ptr %res
480 define void @v4f64_fcmp_uno(ptr %res, ptr %a0, ptr %a1) nounwind {
481 ; CHECK-LABEL: v4f64_fcmp_uno:
483 ; CHECK-NEXT: xvld $xr0, $a1, 0
484 ; CHECK-NEXT: xvld $xr1, $a2, 0
485 ; CHECK-NEXT: xvfcmp.cun.d $xr0, $xr0, $xr1
486 ; CHECK-NEXT: xvst $xr0, $a0, 0
488 %v0 = load <4 x double>, ptr %a0
489 %v1 = load <4 x double>, ptr %a1
490 %cmp = fcmp uno <4 x double> %v0, %v1
491 %ext = sext <4 x i1> %cmp to <4 x i64>
492 store <4 x i64> %ext, ptr %res
497 define void @v8f32_fcmp_ogt(ptr %res, ptr %a0, ptr %a1) nounwind {
498 ; CHECK-LABEL: v8f32_fcmp_ogt:
500 ; CHECK-NEXT: xvld $xr0, $a1, 0
501 ; CHECK-NEXT: xvld $xr1, $a2, 0
502 ; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0
503 ; CHECK-NEXT: xvst $xr0, $a0, 0
505 %v0 = load <8 x float>, ptr %a0
506 %v1 = load <8 x float>, ptr %a1
507 %cmp = fcmp ogt <8 x float> %v0, %v1
508 %ext = sext <8 x i1> %cmp to <8 x i32>
509 store <8 x i32> %ext, ptr %res
513 define void @v4f64_fcmp_ogt(ptr %res, ptr %a0, ptr %a1) nounwind {
514 ; CHECK-LABEL: v4f64_fcmp_ogt:
516 ; CHECK-NEXT: xvld $xr0, $a1, 0
517 ; CHECK-NEXT: xvld $xr1, $a2, 0
518 ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0
519 ; CHECK-NEXT: xvst $xr0, $a0, 0
521 %v0 = load <4 x double>, ptr %a0
522 %v1 = load <4 x double>, ptr %a1
523 %cmp = fcmp ogt <4 x double> %v0, %v1
524 %ext = sext <4 x i1> %cmp to <4 x i64>
525 store <4 x i64> %ext, ptr %res
530 define void @v8f32_fcmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
531 ; CHECK-LABEL: v8f32_fcmp_ugt:
533 ; CHECK-NEXT: xvld $xr0, $a1, 0
534 ; CHECK-NEXT: xvld $xr1, $a2, 0
535 ; CHECK-NEXT: xvfcmp.cult.s $xr0, $xr1, $xr0
536 ; CHECK-NEXT: xvst $xr0, $a0, 0
538 %v0 = load <8 x float>, ptr %a0
539 %v1 = load <8 x float>, ptr %a1
540 %cmp = fcmp ugt <8 x float> %v0, %v1
541 %ext = sext <8 x i1> %cmp to <8 x i32>
542 store <8 x i32> %ext, ptr %res
546 define void @v4f64_fcmp_ugt(ptr %res, ptr %a0, ptr %a1) nounwind {
547 ; CHECK-LABEL: v4f64_fcmp_ugt:
549 ; CHECK-NEXT: xvld $xr0, $a1, 0
550 ; CHECK-NEXT: xvld $xr1, $a2, 0
551 ; CHECK-NEXT: xvfcmp.cult.d $xr0, $xr1, $xr0
552 ; CHECK-NEXT: xvst $xr0, $a0, 0
554 %v0 = load <4 x double>, ptr %a0
555 %v1 = load <4 x double>, ptr %a1
556 %cmp = fcmp ugt <4 x double> %v0, %v1
557 %ext = sext <4 x i1> %cmp to <4 x i64>
558 store <4 x i64> %ext, ptr %res
563 define void @v8f32_fcmp_gt(ptr %res, ptr %a0, ptr %a1) nounwind {
564 ; CHECK-LABEL: v8f32_fcmp_gt:
566 ; CHECK-NEXT: xvld $xr0, $a1, 0
567 ; CHECK-NEXT: xvld $xr1, $a2, 0
568 ; CHECK-NEXT: xvfcmp.clt.s $xr0, $xr1, $xr0
569 ; CHECK-NEXT: xvst $xr0, $a0, 0
571 %v0 = load <8 x float>, ptr %a0
572 %v1 = load <8 x float>, ptr %a1
573 %cmp = fcmp fast ogt <8 x float> %v0, %v1
574 %ext = sext <8 x i1> %cmp to <8 x i32>
575 store <8 x i32> %ext, ptr %res
579 define void @v4f64_fcmp_gt(ptr %res, ptr %a0, ptr %a1) nounwind {
580 ; CHECK-LABEL: v4f64_fcmp_gt:
582 ; CHECK-NEXT: xvld $xr0, $a1, 0
583 ; CHECK-NEXT: xvld $xr1, $a2, 0
584 ; CHECK-NEXT: xvfcmp.clt.d $xr0, $xr1, $xr0
585 ; CHECK-NEXT: xvst $xr0, $a0, 0
587 %v0 = load <4 x double>, ptr %a0
588 %v1 = load <4 x double>, ptr %a1
589 %cmp = fcmp fast ugt <4 x double> %v0, %v1
590 %ext = sext <4 x i1> %cmp to <4 x i64>
591 store <4 x i64> %ext, ptr %res
596 define void @v8f32_fcmp_oge(ptr %res, ptr %a0, ptr %a1) nounwind {
597 ; CHECK-LABEL: v8f32_fcmp_oge:
599 ; CHECK-NEXT: xvld $xr0, $a1, 0
600 ; CHECK-NEXT: xvld $xr1, $a2, 0
601 ; CHECK-NEXT: xvfcmp.cle.s $xr0, $xr1, $xr0
602 ; CHECK-NEXT: xvst $xr0, $a0, 0
604 %v0 = load <8 x float>, ptr %a0
605 %v1 = load <8 x float>, ptr %a1
606 %cmp = fcmp oge <8 x float> %v0, %v1
607 %ext = sext <8 x i1> %cmp to <8 x i32>
608 store <8 x i32> %ext, ptr %res
612 define void @v4f64_fcmp_oge(ptr %res, ptr %a0, ptr %a1) nounwind {
613 ; CHECK-LABEL: v4f64_fcmp_oge:
615 ; CHECK-NEXT: xvld $xr0, $a1, 0
616 ; CHECK-NEXT: xvld $xr1, $a2, 0
617 ; CHECK-NEXT: xvfcmp.cle.d $xr0, $xr1, $xr0
618 ; CHECK-NEXT: xvst $xr0, $a0, 0
620 %v0 = load <4 x double>, ptr %a0
621 %v1 = load <4 x double>, ptr %a1
622 %cmp = fcmp oge <4 x double> %v0, %v1
623 %ext = sext <4 x i1> %cmp to <4 x i64>
624 store <4 x i64> %ext, ptr %res
629 define void @v8f32_fcmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
630 ; CHECK-LABEL: v8f32_fcmp_uge:
632 ; CHECK-NEXT: xvld $xr0, $a1, 0
633 ; CHECK-NEXT: xvld $xr1, $a2, 0
634 ; CHECK-NEXT: xvfcmp.cule.s $xr0, $xr1, $xr0
635 ; CHECK-NEXT: xvst $xr0, $a0, 0
637 %v0 = load <8 x float>, ptr %a0
638 %v1 = load <8 x float>, ptr %a1
639 %cmp = fcmp uge <8 x float> %v0, %v1
640 %ext = sext <8 x i1> %cmp to <8 x i32>
641 store <8 x i32> %ext, ptr %res
645 define void @v4f64_fcmp_uge(ptr %res, ptr %a0, ptr %a1) nounwind {
646 ; CHECK-LABEL: v4f64_fcmp_uge:
648 ; CHECK-NEXT: xvld $xr0, $a1, 0
649 ; CHECK-NEXT: xvld $xr1, $a2, 0
650 ; CHECK-NEXT: xvfcmp.cule.d $xr0, $xr1, $xr0
651 ; CHECK-NEXT: xvst $xr0, $a0, 0
653 %v0 = load <4 x double>, ptr %a0
654 %v1 = load <4 x double>, ptr %a1
655 %cmp = fcmp uge <4 x double> %v0, %v1
656 %ext = sext <4 x i1> %cmp to <4 x i64>
657 store <4 x i64> %ext, ptr %res
662 define void @v8f32_fcmp_ge(ptr %res, ptr %a0, ptr %a1) nounwind {
663 ; CHECK-LABEL: v8f32_fcmp_ge:
665 ; CHECK-NEXT: xvld $xr0, $a1, 0
666 ; CHECK-NEXT: xvld $xr1, $a2, 0
667 ; CHECK-NEXT: xvfcmp.cle.s $xr0, $xr1, $xr0
668 ; CHECK-NEXT: xvst $xr0, $a0, 0
670 %v0 = load <8 x float>, ptr %a0
671 %v1 = load <8 x float>, ptr %a1
672 %cmp = fcmp fast oge <8 x float> %v0, %v1
673 %ext = sext <8 x i1> %cmp to <8 x i32>
674 store <8 x i32> %ext, ptr %res
678 define void @v4f64_fcmp_ge(ptr %res, ptr %a0, ptr %a1) nounwind {
679 ; CHECK-LABEL: v4f64_fcmp_ge:
681 ; CHECK-NEXT: xvld $xr0, $a1, 0
682 ; CHECK-NEXT: xvld $xr1, $a2, 0
683 ; CHECK-NEXT: xvfcmp.cle.d $xr0, $xr1, $xr0
684 ; CHECK-NEXT: xvst $xr0, $a0, 0
686 %v0 = load <4 x double>, ptr %a0
687 %v1 = load <4 x double>, ptr %a1
688 %cmp = fcmp fast uge <4 x double> %v0, %v1
689 %ext = sext <4 x i1> %cmp to <4 x i64>
690 store <4 x i64> %ext, ptr %res