1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32f \
4 ; RUN: | FileCheck -check-prefix=CHECKIF %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64f \
7 ; RUN: | FileCheck -check-prefix=CHECKIF %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=CHECKIZFINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=CHECKIZFINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
20 ; CHECKIF-LABEL: fcmp_oeq:
22 ; CHECKIF-NEXT: feq.s a0, fa0, fa1
25 ; CHECKIZFINX-LABEL: fcmp_oeq:
26 ; CHECKIZFINX: # %bb.0:
27 ; CHECKIZFINX-NEXT: feq.s a0, a0, a1
28 ; CHECKIZFINX-NEXT: ret
30 ; RV32I-LABEL: fcmp_oeq:
32 ; RV32I-NEXT: addi sp, sp, -16
33 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
34 ; RV32I-NEXT: call __eqsf2
35 ; RV32I-NEXT: seqz a0, a0
36 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
37 ; RV32I-NEXT: addi sp, sp, 16
40 ; RV64I-LABEL: fcmp_oeq:
42 ; RV64I-NEXT: addi sp, sp, -16
43 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
44 ; RV64I-NEXT: call __eqsf2
45 ; RV64I-NEXT: seqz a0, a0
46 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
47 ; RV64I-NEXT: addi sp, sp, 16
49 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
50 %2 = zext i1 %1 to i32
53 declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
55 define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
56 ; CHECKIF-LABEL: fcmp_ogt:
58 ; CHECKIF-NEXT: frflags a1
59 ; CHECKIF-NEXT: flt.s a0, fa1, fa0
60 ; CHECKIF-NEXT: fsflags a1
61 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
64 ; CHECKIZFINX-LABEL: fcmp_ogt:
65 ; CHECKIZFINX: # %bb.0:
66 ; CHECKIZFINX-NEXT: csrr a3, fflags
67 ; CHECKIZFINX-NEXT: flt.s a2, a1, a0
68 ; CHECKIZFINX-NEXT: csrw fflags, a3
69 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
70 ; CHECKIZFINX-NEXT: mv a0, a2
71 ; CHECKIZFINX-NEXT: ret
73 ; RV32I-LABEL: fcmp_ogt:
75 ; RV32I-NEXT: addi sp, sp, -16
76 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
77 ; RV32I-NEXT: call __gtsf2
78 ; RV32I-NEXT: sgtz a0, a0
79 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
80 ; RV32I-NEXT: addi sp, sp, 16
83 ; RV64I-LABEL: fcmp_ogt:
85 ; RV64I-NEXT: addi sp, sp, -16
86 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
87 ; RV64I-NEXT: call __gtsf2
88 ; RV64I-NEXT: sgtz a0, a0
89 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
90 ; RV64I-NEXT: addi sp, sp, 16
92 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
93 %2 = zext i1 %1 to i32
97 define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
98 ; CHECKIF-LABEL: fcmp_oge:
100 ; CHECKIF-NEXT: frflags a1
101 ; CHECKIF-NEXT: fle.s a0, fa1, fa0
102 ; CHECKIF-NEXT: fsflags a1
103 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
106 ; CHECKIZFINX-LABEL: fcmp_oge:
107 ; CHECKIZFINX: # %bb.0:
108 ; CHECKIZFINX-NEXT: csrr a3, fflags
109 ; CHECKIZFINX-NEXT: fle.s a2, a1, a0
110 ; CHECKIZFINX-NEXT: csrw fflags, a3
111 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
112 ; CHECKIZFINX-NEXT: mv a0, a2
113 ; CHECKIZFINX-NEXT: ret
115 ; RV32I-LABEL: fcmp_oge:
117 ; RV32I-NEXT: addi sp, sp, -16
118 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
119 ; RV32I-NEXT: call __gesf2
120 ; RV32I-NEXT: slti a0, a0, 0
121 ; RV32I-NEXT: xori a0, a0, 1
122 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
123 ; RV32I-NEXT: addi sp, sp, 16
126 ; RV64I-LABEL: fcmp_oge:
128 ; RV64I-NEXT: addi sp, sp, -16
129 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
130 ; RV64I-NEXT: call __gesf2
131 ; RV64I-NEXT: slti a0, a0, 0
132 ; RV64I-NEXT: xori a0, a0, 1
133 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
134 ; RV64I-NEXT: addi sp, sp, 16
136 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
137 %2 = zext i1 %1 to i32
141 define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
142 ; CHECKIF-LABEL: fcmp_olt:
144 ; CHECKIF-NEXT: frflags a1
145 ; CHECKIF-NEXT: flt.s a0, fa0, fa1
146 ; CHECKIF-NEXT: fsflags a1
147 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
150 ; CHECKIZFINX-LABEL: fcmp_olt:
151 ; CHECKIZFINX: # %bb.0:
152 ; CHECKIZFINX-NEXT: csrr a3, fflags
153 ; CHECKIZFINX-NEXT: flt.s a2, a0, a1
154 ; CHECKIZFINX-NEXT: csrw fflags, a3
155 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
156 ; CHECKIZFINX-NEXT: mv a0, a2
157 ; CHECKIZFINX-NEXT: ret
159 ; RV32I-LABEL: fcmp_olt:
161 ; RV32I-NEXT: addi sp, sp, -16
162 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
163 ; RV32I-NEXT: call __ltsf2
164 ; RV32I-NEXT: slti a0, a0, 0
165 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
166 ; RV32I-NEXT: addi sp, sp, 16
169 ; RV64I-LABEL: fcmp_olt:
171 ; RV64I-NEXT: addi sp, sp, -16
172 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
173 ; RV64I-NEXT: call __ltsf2
174 ; RV64I-NEXT: slti a0, a0, 0
175 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
176 ; RV64I-NEXT: addi sp, sp, 16
178 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
179 %2 = zext i1 %1 to i32
183 define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
184 ; CHECKIF-LABEL: fcmp_ole:
186 ; CHECKIF-NEXT: frflags a1
187 ; CHECKIF-NEXT: fle.s a0, fa0, fa1
188 ; CHECKIF-NEXT: fsflags a1
189 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
192 ; CHECKIZFINX-LABEL: fcmp_ole:
193 ; CHECKIZFINX: # %bb.0:
194 ; CHECKIZFINX-NEXT: csrr a3, fflags
195 ; CHECKIZFINX-NEXT: fle.s a2, a0, a1
196 ; CHECKIZFINX-NEXT: csrw fflags, a3
197 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
198 ; CHECKIZFINX-NEXT: mv a0, a2
199 ; CHECKIZFINX-NEXT: ret
201 ; RV32I-LABEL: fcmp_ole:
203 ; RV32I-NEXT: addi sp, sp, -16
204 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
205 ; RV32I-NEXT: call __lesf2
206 ; RV32I-NEXT: slti a0, a0, 1
207 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
208 ; RV32I-NEXT: addi sp, sp, 16
211 ; RV64I-LABEL: fcmp_ole:
213 ; RV64I-NEXT: addi sp, sp, -16
214 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
215 ; RV64I-NEXT: call __lesf2
216 ; RV64I-NEXT: slti a0, a0, 1
217 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
218 ; RV64I-NEXT: addi sp, sp, 16
220 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
221 %2 = zext i1 %1 to i32
225 ; FIXME: We only need one frflags before the two flts and one fsflags after the
227 define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
228 ; CHECKIF-LABEL: fcmp_one:
230 ; CHECKIF-NEXT: frflags a0
231 ; CHECKIF-NEXT: flt.s a1, fa0, fa1
232 ; CHECKIF-NEXT: fsflags a0
233 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
234 ; CHECKIF-NEXT: frflags a0
235 ; CHECKIF-NEXT: flt.s a2, fa1, fa0
236 ; CHECKIF-NEXT: fsflags a0
237 ; CHECKIF-NEXT: or a0, a2, a1
238 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
241 ; CHECKIZFINX-LABEL: fcmp_one:
242 ; CHECKIZFINX: # %bb.0:
243 ; CHECKIZFINX-NEXT: csrr a2, fflags
244 ; CHECKIZFINX-NEXT: flt.s a3, a0, a1
245 ; CHECKIZFINX-NEXT: csrw fflags, a2
246 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
247 ; CHECKIZFINX-NEXT: csrr a2, fflags
248 ; CHECKIZFINX-NEXT: flt.s a4, a1, a0
249 ; CHECKIZFINX-NEXT: csrw fflags, a2
250 ; CHECKIZFINX-NEXT: or a2, a4, a3
251 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
252 ; CHECKIZFINX-NEXT: mv a0, a2
253 ; CHECKIZFINX-NEXT: ret
255 ; RV32I-LABEL: fcmp_one:
257 ; RV32I-NEXT: addi sp, sp, -16
258 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
259 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
260 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
261 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
262 ; RV32I-NEXT: mv s0, a1
263 ; RV32I-NEXT: mv s1, a0
264 ; RV32I-NEXT: call __eqsf2
265 ; RV32I-NEXT: snez s2, a0
266 ; RV32I-NEXT: mv a0, s1
267 ; RV32I-NEXT: mv a1, s0
268 ; RV32I-NEXT: call __unordsf2
269 ; RV32I-NEXT: seqz a0, a0
270 ; RV32I-NEXT: and a0, a0, s2
271 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
272 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
273 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
274 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
275 ; RV32I-NEXT: addi sp, sp, 16
278 ; RV64I-LABEL: fcmp_one:
280 ; RV64I-NEXT: addi sp, sp, -32
281 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
282 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
283 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
284 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
285 ; RV64I-NEXT: mv s0, a1
286 ; RV64I-NEXT: mv s1, a0
287 ; RV64I-NEXT: call __eqsf2
288 ; RV64I-NEXT: snez s2, a0
289 ; RV64I-NEXT: mv a0, s1
290 ; RV64I-NEXT: mv a1, s0
291 ; RV64I-NEXT: call __unordsf2
292 ; RV64I-NEXT: seqz a0, a0
293 ; RV64I-NEXT: and a0, a0, s2
294 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
295 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
296 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
297 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
298 ; RV64I-NEXT: addi sp, sp, 32
300 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
301 %2 = zext i1 %1 to i32
305 define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
306 ; CHECKIF-LABEL: fcmp_ord:
308 ; CHECKIF-NEXT: feq.s a0, fa1, fa1
309 ; CHECKIF-NEXT: feq.s a1, fa0, fa0
310 ; CHECKIF-NEXT: and a0, a1, a0
313 ; CHECKIZFINX-LABEL: fcmp_ord:
314 ; CHECKIZFINX: # %bb.0:
315 ; CHECKIZFINX-NEXT: feq.s a1, a1, a1
316 ; CHECKIZFINX-NEXT: feq.s a0, a0, a0
317 ; CHECKIZFINX-NEXT: and a0, a0, a1
318 ; CHECKIZFINX-NEXT: ret
320 ; RV32I-LABEL: fcmp_ord:
322 ; RV32I-NEXT: addi sp, sp, -16
323 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
324 ; RV32I-NEXT: call __unordsf2
325 ; RV32I-NEXT: seqz a0, a0
326 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
327 ; RV32I-NEXT: addi sp, sp, 16
330 ; RV64I-LABEL: fcmp_ord:
332 ; RV64I-NEXT: addi sp, sp, -16
333 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
334 ; RV64I-NEXT: call __unordsf2
335 ; RV64I-NEXT: seqz a0, a0
336 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
337 ; RV64I-NEXT: addi sp, sp, 16
339 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
340 %2 = zext i1 %1 to i32
344 ; FIXME: We only need one frflags before the two flts and one fsflags after the
346 define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
347 ; CHECKIF-LABEL: fcmp_ueq:
349 ; CHECKIF-NEXT: frflags a0
350 ; CHECKIF-NEXT: flt.s a1, fa0, fa1
351 ; CHECKIF-NEXT: fsflags a0
352 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
353 ; CHECKIF-NEXT: frflags a0
354 ; CHECKIF-NEXT: flt.s a2, fa1, fa0
355 ; CHECKIF-NEXT: fsflags a0
356 ; CHECKIF-NEXT: or a1, a2, a1
357 ; CHECKIF-NEXT: xori a0, a1, 1
358 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
361 ; CHECKIZFINX-LABEL: fcmp_ueq:
362 ; CHECKIZFINX: # %bb.0:
363 ; CHECKIZFINX-NEXT: csrr a2, fflags
364 ; CHECKIZFINX-NEXT: flt.s a3, a0, a1
365 ; CHECKIZFINX-NEXT: csrw fflags, a2
366 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
367 ; CHECKIZFINX-NEXT: csrr a2, fflags
368 ; CHECKIZFINX-NEXT: flt.s a4, a1, a0
369 ; CHECKIZFINX-NEXT: csrw fflags, a2
370 ; CHECKIZFINX-NEXT: or a3, a4, a3
371 ; CHECKIZFINX-NEXT: xori a2, a3, 1
372 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
373 ; CHECKIZFINX-NEXT: mv a0, a2
374 ; CHECKIZFINX-NEXT: ret
376 ; RV32I-LABEL: fcmp_ueq:
378 ; RV32I-NEXT: addi sp, sp, -16
379 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
380 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
381 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
382 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
383 ; RV32I-NEXT: mv s0, a1
384 ; RV32I-NEXT: mv s1, a0
385 ; RV32I-NEXT: call __eqsf2
386 ; RV32I-NEXT: seqz s2, a0
387 ; RV32I-NEXT: mv a0, s1
388 ; RV32I-NEXT: mv a1, s0
389 ; RV32I-NEXT: call __unordsf2
390 ; RV32I-NEXT: snez a0, a0
391 ; RV32I-NEXT: or a0, a0, s2
392 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
393 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
394 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
395 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
396 ; RV32I-NEXT: addi sp, sp, 16
399 ; RV64I-LABEL: fcmp_ueq:
401 ; RV64I-NEXT: addi sp, sp, -32
402 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
403 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
404 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
405 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
406 ; RV64I-NEXT: mv s0, a1
407 ; RV64I-NEXT: mv s1, a0
408 ; RV64I-NEXT: call __eqsf2
409 ; RV64I-NEXT: seqz s2, a0
410 ; RV64I-NEXT: mv a0, s1
411 ; RV64I-NEXT: mv a1, s0
412 ; RV64I-NEXT: call __unordsf2
413 ; RV64I-NEXT: snez a0, a0
414 ; RV64I-NEXT: or a0, a0, s2
415 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
416 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
417 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
418 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
419 ; RV64I-NEXT: addi sp, sp, 32
421 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
422 %2 = zext i1 %1 to i32
426 define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
427 ; CHECKIF-LABEL: fcmp_ugt:
429 ; CHECKIF-NEXT: frflags a0
430 ; CHECKIF-NEXT: fle.s a1, fa0, fa1
431 ; CHECKIF-NEXT: fsflags a0
432 ; CHECKIF-NEXT: xori a0, a1, 1
433 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
436 ; CHECKIZFINX-LABEL: fcmp_ugt:
437 ; CHECKIZFINX: # %bb.0:
438 ; CHECKIZFINX-NEXT: csrr a2, fflags
439 ; CHECKIZFINX-NEXT: fle.s a3, a0, a1
440 ; CHECKIZFINX-NEXT: csrw fflags, a2
441 ; CHECKIZFINX-NEXT: xori a2, a3, 1
442 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
443 ; CHECKIZFINX-NEXT: mv a0, a2
444 ; CHECKIZFINX-NEXT: ret
446 ; RV32I-LABEL: fcmp_ugt:
448 ; RV32I-NEXT: addi sp, sp, -16
449 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
450 ; RV32I-NEXT: call __lesf2
451 ; RV32I-NEXT: sgtz a0, a0
452 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
453 ; RV32I-NEXT: addi sp, sp, 16
456 ; RV64I-LABEL: fcmp_ugt:
458 ; RV64I-NEXT: addi sp, sp, -16
459 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
460 ; RV64I-NEXT: call __lesf2
461 ; RV64I-NEXT: sgtz a0, a0
462 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
463 ; RV64I-NEXT: addi sp, sp, 16
465 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
466 %2 = zext i1 %1 to i32
470 define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
471 ; CHECKIF-LABEL: fcmp_uge:
473 ; CHECKIF-NEXT: frflags a0
474 ; CHECKIF-NEXT: flt.s a1, fa0, fa1
475 ; CHECKIF-NEXT: fsflags a0
476 ; CHECKIF-NEXT: xori a0, a1, 1
477 ; CHECKIF-NEXT: feq.s zero, fa0, fa1
480 ; CHECKIZFINX-LABEL: fcmp_uge:
481 ; CHECKIZFINX: # %bb.0:
482 ; CHECKIZFINX-NEXT: csrr a2, fflags
483 ; CHECKIZFINX-NEXT: flt.s a3, a0, a1
484 ; CHECKIZFINX-NEXT: csrw fflags, a2
485 ; CHECKIZFINX-NEXT: xori a2, a3, 1
486 ; CHECKIZFINX-NEXT: feq.s zero, a0, a1
487 ; CHECKIZFINX-NEXT: mv a0, a2
488 ; CHECKIZFINX-NEXT: ret
490 ; RV32I-LABEL: fcmp_uge:
492 ; RV32I-NEXT: addi sp, sp, -16
493 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
494 ; RV32I-NEXT: call __ltsf2
495 ; RV32I-NEXT: slti a0, a0, 0
496 ; RV32I-NEXT: xori a0, a0, 1
497 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
498 ; RV32I-NEXT: addi sp, sp, 16
501 ; RV64I-LABEL: fcmp_uge:
503 ; RV64I-NEXT: addi sp, sp, -16
504 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
505 ; RV64I-NEXT: call __ltsf2
506 ; RV64I-NEXT: slti a0, a0, 0
507 ; RV64I-NEXT: xori a0, a0, 1
508 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
509 ; RV64I-NEXT: addi sp, sp, 16
511 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
512 %2 = zext i1 %1 to i32
516 define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
517 ; CHECKIF-LABEL: fcmp_ult:
519 ; CHECKIF-NEXT: frflags a0
520 ; CHECKIF-NEXT: fle.s a1, fa1, fa0
521 ; CHECKIF-NEXT: fsflags a0
522 ; CHECKIF-NEXT: xori a0, a1, 1
523 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
526 ; CHECKIZFINX-LABEL: fcmp_ult:
527 ; CHECKIZFINX: # %bb.0:
528 ; CHECKIZFINX-NEXT: csrr a2, fflags
529 ; CHECKIZFINX-NEXT: fle.s a3, a1, a0
530 ; CHECKIZFINX-NEXT: csrw fflags, a2
531 ; CHECKIZFINX-NEXT: xori a2, a3, 1
532 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
533 ; CHECKIZFINX-NEXT: mv a0, a2
534 ; CHECKIZFINX-NEXT: ret
536 ; RV32I-LABEL: fcmp_ult:
538 ; RV32I-NEXT: addi sp, sp, -16
539 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
540 ; RV32I-NEXT: call __gesf2
541 ; RV32I-NEXT: slti a0, a0, 0
542 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
543 ; RV32I-NEXT: addi sp, sp, 16
546 ; RV64I-LABEL: fcmp_ult:
548 ; RV64I-NEXT: addi sp, sp, -16
549 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
550 ; RV64I-NEXT: call __gesf2
551 ; RV64I-NEXT: slti a0, a0, 0
552 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
553 ; RV64I-NEXT: addi sp, sp, 16
555 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
556 %2 = zext i1 %1 to i32
560 define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
561 ; CHECKIF-LABEL: fcmp_ule:
563 ; CHECKIF-NEXT: frflags a0
564 ; CHECKIF-NEXT: flt.s a1, fa1, fa0
565 ; CHECKIF-NEXT: fsflags a0
566 ; CHECKIF-NEXT: xori a0, a1, 1
567 ; CHECKIF-NEXT: feq.s zero, fa1, fa0
570 ; CHECKIZFINX-LABEL: fcmp_ule:
571 ; CHECKIZFINX: # %bb.0:
572 ; CHECKIZFINX-NEXT: csrr a2, fflags
573 ; CHECKIZFINX-NEXT: flt.s a3, a1, a0
574 ; CHECKIZFINX-NEXT: csrw fflags, a2
575 ; CHECKIZFINX-NEXT: xori a2, a3, 1
576 ; CHECKIZFINX-NEXT: feq.s zero, a1, a0
577 ; CHECKIZFINX-NEXT: mv a0, a2
578 ; CHECKIZFINX-NEXT: ret
580 ; RV32I-LABEL: fcmp_ule:
582 ; RV32I-NEXT: addi sp, sp, -16
583 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
584 ; RV32I-NEXT: call __gtsf2
585 ; RV32I-NEXT: slti a0, a0, 1
586 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
587 ; RV32I-NEXT: addi sp, sp, 16
590 ; RV64I-LABEL: fcmp_ule:
592 ; RV64I-NEXT: addi sp, sp, -16
593 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
594 ; RV64I-NEXT: call __gtsf2
595 ; RV64I-NEXT: slti a0, a0, 1
596 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
597 ; RV64I-NEXT: addi sp, sp, 16
599 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
600 %2 = zext i1 %1 to i32
604 define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
605 ; CHECKIF-LABEL: fcmp_une:
607 ; CHECKIF-NEXT: feq.s a0, fa0, fa1
608 ; CHECKIF-NEXT: xori a0, a0, 1
611 ; CHECKIZFINX-LABEL: fcmp_une:
612 ; CHECKIZFINX: # %bb.0:
613 ; CHECKIZFINX-NEXT: feq.s a0, a0, a1
614 ; CHECKIZFINX-NEXT: xori a0, a0, 1
615 ; CHECKIZFINX-NEXT: ret
617 ; RV32I-LABEL: fcmp_une:
619 ; RV32I-NEXT: addi sp, sp, -16
620 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
621 ; RV32I-NEXT: call __nesf2
622 ; RV32I-NEXT: snez a0, a0
623 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
624 ; RV32I-NEXT: addi sp, sp, 16
627 ; RV64I-LABEL: fcmp_une:
629 ; RV64I-NEXT: addi sp, sp, -16
630 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
631 ; RV64I-NEXT: call __nesf2
632 ; RV64I-NEXT: snez a0, a0
633 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
634 ; RV64I-NEXT: addi sp, sp, 16
636 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
637 %2 = zext i1 %1 to i32
641 define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
642 ; CHECKIF-LABEL: fcmp_uno:
644 ; CHECKIF-NEXT: feq.s a0, fa1, fa1
645 ; CHECKIF-NEXT: feq.s a1, fa0, fa0
646 ; CHECKIF-NEXT: and a0, a1, a0
647 ; CHECKIF-NEXT: xori a0, a0, 1
650 ; CHECKIZFINX-LABEL: fcmp_uno:
651 ; CHECKIZFINX: # %bb.0:
652 ; CHECKIZFINX-NEXT: feq.s a1, a1, a1
653 ; CHECKIZFINX-NEXT: feq.s a0, a0, a0
654 ; CHECKIZFINX-NEXT: and a0, a0, a1
655 ; CHECKIZFINX-NEXT: xori a0, a0, 1
656 ; CHECKIZFINX-NEXT: ret
658 ; RV32I-LABEL: fcmp_uno:
660 ; RV32I-NEXT: addi sp, sp, -16
661 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
662 ; RV32I-NEXT: call __unordsf2
663 ; RV32I-NEXT: snez a0, a0
664 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
665 ; RV32I-NEXT: addi sp, sp, 16
668 ; RV64I-LABEL: fcmp_uno:
670 ; RV64I-NEXT: addi sp, sp, -16
671 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
672 ; RV64I-NEXT: call __unordsf2
673 ; RV64I-NEXT: snez a0, a0
674 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
675 ; RV64I-NEXT: addi sp, sp, 16
677 %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
678 %2 = zext i1 %1 to i32
682 define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
683 ; CHECKIF-LABEL: fcmps_oeq:
685 ; CHECKIF-NEXT: fle.s a0, fa1, fa0
686 ; CHECKIF-NEXT: fle.s a1, fa0, fa1
687 ; CHECKIF-NEXT: and a0, a1, a0
690 ; CHECKIZFINX-LABEL: fcmps_oeq:
691 ; CHECKIZFINX: # %bb.0:
692 ; CHECKIZFINX-NEXT: fle.s a2, a1, a0
693 ; CHECKIZFINX-NEXT: fle.s a0, a0, a1
694 ; CHECKIZFINX-NEXT: and a0, a0, a2
695 ; CHECKIZFINX-NEXT: ret
697 ; RV32I-LABEL: fcmps_oeq:
699 ; RV32I-NEXT: addi sp, sp, -16
700 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
701 ; RV32I-NEXT: call __eqsf2
702 ; RV32I-NEXT: seqz a0, a0
703 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
704 ; RV32I-NEXT: addi sp, sp, 16
707 ; RV64I-LABEL: fcmps_oeq:
709 ; RV64I-NEXT: addi sp, sp, -16
710 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
711 ; RV64I-NEXT: call __eqsf2
712 ; RV64I-NEXT: seqz a0, a0
713 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
714 ; RV64I-NEXT: addi sp, sp, 16
716 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
717 %2 = zext i1 %1 to i32
720 declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
722 define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
723 ; CHECKIF-LABEL: fcmps_ogt:
725 ; CHECKIF-NEXT: flt.s a0, fa1, fa0
728 ; CHECKIZFINX-LABEL: fcmps_ogt:
729 ; CHECKIZFINX: # %bb.0:
730 ; CHECKIZFINX-NEXT: flt.s a0, a1, a0
731 ; CHECKIZFINX-NEXT: ret
733 ; RV32I-LABEL: fcmps_ogt:
735 ; RV32I-NEXT: addi sp, sp, -16
736 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
737 ; RV32I-NEXT: call __gtsf2
738 ; RV32I-NEXT: sgtz a0, a0
739 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
740 ; RV32I-NEXT: addi sp, sp, 16
743 ; RV64I-LABEL: fcmps_ogt:
745 ; RV64I-NEXT: addi sp, sp, -16
746 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
747 ; RV64I-NEXT: call __gtsf2
748 ; RV64I-NEXT: sgtz a0, a0
749 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
750 ; RV64I-NEXT: addi sp, sp, 16
752 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
753 %2 = zext i1 %1 to i32
757 define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
758 ; CHECKIF-LABEL: fcmps_oge:
760 ; CHECKIF-NEXT: fle.s a0, fa1, fa0
763 ; CHECKIZFINX-LABEL: fcmps_oge:
764 ; CHECKIZFINX: # %bb.0:
765 ; CHECKIZFINX-NEXT: fle.s a0, a1, a0
766 ; CHECKIZFINX-NEXT: ret
768 ; RV32I-LABEL: fcmps_oge:
770 ; RV32I-NEXT: addi sp, sp, -16
771 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
772 ; RV32I-NEXT: call __gesf2
773 ; RV32I-NEXT: slti a0, a0, 0
774 ; RV32I-NEXT: xori a0, a0, 1
775 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
776 ; RV32I-NEXT: addi sp, sp, 16
779 ; RV64I-LABEL: fcmps_oge:
781 ; RV64I-NEXT: addi sp, sp, -16
782 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
783 ; RV64I-NEXT: call __gesf2
784 ; RV64I-NEXT: slti a0, a0, 0
785 ; RV64I-NEXT: xori a0, a0, 1
786 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
787 ; RV64I-NEXT: addi sp, sp, 16
789 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
790 %2 = zext i1 %1 to i32
794 define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
795 ; CHECKIF-LABEL: fcmps_olt:
797 ; CHECKIF-NEXT: flt.s a0, fa0, fa1
800 ; CHECKIZFINX-LABEL: fcmps_olt:
801 ; CHECKIZFINX: # %bb.0:
802 ; CHECKIZFINX-NEXT: flt.s a0, a0, a1
803 ; CHECKIZFINX-NEXT: ret
805 ; RV32I-LABEL: fcmps_olt:
807 ; RV32I-NEXT: addi sp, sp, -16
808 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
809 ; RV32I-NEXT: call __ltsf2
810 ; RV32I-NEXT: slti a0, a0, 0
811 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
812 ; RV32I-NEXT: addi sp, sp, 16
815 ; RV64I-LABEL: fcmps_olt:
817 ; RV64I-NEXT: addi sp, sp, -16
818 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
819 ; RV64I-NEXT: call __ltsf2
820 ; RV64I-NEXT: slti a0, a0, 0
821 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
822 ; RV64I-NEXT: addi sp, sp, 16
824 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
825 %2 = zext i1 %1 to i32
829 define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
830 ; CHECKIF-LABEL: fcmps_ole:
832 ; CHECKIF-NEXT: fle.s a0, fa0, fa1
835 ; CHECKIZFINX-LABEL: fcmps_ole:
836 ; CHECKIZFINX: # %bb.0:
837 ; CHECKIZFINX-NEXT: fle.s a0, a0, a1
838 ; CHECKIZFINX-NEXT: ret
840 ; RV32I-LABEL: fcmps_ole:
842 ; RV32I-NEXT: addi sp, sp, -16
843 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
844 ; RV32I-NEXT: call __lesf2
845 ; RV32I-NEXT: slti a0, a0, 1
846 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
847 ; RV32I-NEXT: addi sp, sp, 16
850 ; RV64I-LABEL: fcmps_ole:
852 ; RV64I-NEXT: addi sp, sp, -16
853 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
854 ; RV64I-NEXT: call __lesf2
855 ; RV64I-NEXT: slti a0, a0, 1
856 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
857 ; RV64I-NEXT: addi sp, sp, 16
859 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
860 %2 = zext i1 %1 to i32
864 define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
865 ; CHECKIF-LABEL: fcmps_one:
867 ; CHECKIF-NEXT: flt.s a0, fa0, fa1
868 ; CHECKIF-NEXT: flt.s a1, fa1, fa0
869 ; CHECKIF-NEXT: or a0, a1, a0
872 ; CHECKIZFINX-LABEL: fcmps_one:
873 ; CHECKIZFINX: # %bb.0:
874 ; CHECKIZFINX-NEXT: flt.s a2, a0, a1
875 ; CHECKIZFINX-NEXT: flt.s a0, a1, a0
876 ; CHECKIZFINX-NEXT: or a0, a0, a2
877 ; CHECKIZFINX-NEXT: ret
879 ; RV32I-LABEL: fcmps_one:
881 ; RV32I-NEXT: addi sp, sp, -16
882 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
883 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
884 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
885 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
886 ; RV32I-NEXT: mv s0, a1
887 ; RV32I-NEXT: mv s1, a0
888 ; RV32I-NEXT: call __eqsf2
889 ; RV32I-NEXT: snez s2, a0
890 ; RV32I-NEXT: mv a0, s1
891 ; RV32I-NEXT: mv a1, s0
892 ; RV32I-NEXT: call __unordsf2
893 ; RV32I-NEXT: seqz a0, a0
894 ; RV32I-NEXT: and a0, a0, s2
895 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
896 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
897 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
898 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
899 ; RV32I-NEXT: addi sp, sp, 16
902 ; RV64I-LABEL: fcmps_one:
904 ; RV64I-NEXT: addi sp, sp, -32
905 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
906 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
907 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
908 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
909 ; RV64I-NEXT: mv s0, a1
910 ; RV64I-NEXT: mv s1, a0
911 ; RV64I-NEXT: call __eqsf2
912 ; RV64I-NEXT: snez s2, a0
913 ; RV64I-NEXT: mv a0, s1
914 ; RV64I-NEXT: mv a1, s0
915 ; RV64I-NEXT: call __unordsf2
916 ; RV64I-NEXT: seqz a0, a0
917 ; RV64I-NEXT: and a0, a0, s2
918 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
919 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
920 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
921 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
922 ; RV64I-NEXT: addi sp, sp, 32
924 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
925 %2 = zext i1 %1 to i32
929 define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
930 ; CHECKIF-LABEL: fcmps_ord:
932 ; CHECKIF-NEXT: fle.s a0, fa1, fa1
933 ; CHECKIF-NEXT: fle.s a1, fa0, fa0
934 ; CHECKIF-NEXT: and a0, a1, a0
937 ; CHECKIZFINX-LABEL: fcmps_ord:
938 ; CHECKIZFINX: # %bb.0:
939 ; CHECKIZFINX-NEXT: fle.s a1, a1, a1
940 ; CHECKIZFINX-NEXT: fle.s a0, a0, a0
941 ; CHECKIZFINX-NEXT: and a0, a0, a1
942 ; CHECKIZFINX-NEXT: ret
944 ; RV32I-LABEL: fcmps_ord:
946 ; RV32I-NEXT: addi sp, sp, -16
947 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
948 ; RV32I-NEXT: call __unordsf2
949 ; RV32I-NEXT: seqz a0, a0
950 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
951 ; RV32I-NEXT: addi sp, sp, 16
954 ; RV64I-LABEL: fcmps_ord:
956 ; RV64I-NEXT: addi sp, sp, -16
957 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
958 ; RV64I-NEXT: call __unordsf2
959 ; RV64I-NEXT: seqz a0, a0
960 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
961 ; RV64I-NEXT: addi sp, sp, 16
963 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
964 %2 = zext i1 %1 to i32
968 define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
969 ; CHECKIF-LABEL: fcmps_ueq:
971 ; CHECKIF-NEXT: flt.s a0, fa0, fa1
972 ; CHECKIF-NEXT: flt.s a1, fa1, fa0
973 ; CHECKIF-NEXT: or a0, a1, a0
974 ; CHECKIF-NEXT: xori a0, a0, 1
977 ; CHECKIZFINX-LABEL: fcmps_ueq:
978 ; CHECKIZFINX: # %bb.0:
979 ; CHECKIZFINX-NEXT: flt.s a2, a0, a1
980 ; CHECKIZFINX-NEXT: flt.s a0, a1, a0
981 ; CHECKIZFINX-NEXT: or a0, a0, a2
982 ; CHECKIZFINX-NEXT: xori a0, a0, 1
983 ; CHECKIZFINX-NEXT: ret
985 ; RV32I-LABEL: fcmps_ueq:
987 ; RV32I-NEXT: addi sp, sp, -16
988 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
989 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
990 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
991 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
992 ; RV32I-NEXT: mv s0, a1
993 ; RV32I-NEXT: mv s1, a0
994 ; RV32I-NEXT: call __eqsf2
995 ; RV32I-NEXT: seqz s2, a0
996 ; RV32I-NEXT: mv a0, s1
997 ; RV32I-NEXT: mv a1, s0
998 ; RV32I-NEXT: call __unordsf2
999 ; RV32I-NEXT: snez a0, a0
1000 ; RV32I-NEXT: or a0, a0, s2
1001 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1002 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1003 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1004 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
1005 ; RV32I-NEXT: addi sp, sp, 16
1008 ; RV64I-LABEL: fcmps_ueq:
1010 ; RV64I-NEXT: addi sp, sp, -32
1011 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1012 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1013 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1014 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1015 ; RV64I-NEXT: mv s0, a1
1016 ; RV64I-NEXT: mv s1, a0
1017 ; RV64I-NEXT: call __eqsf2
1018 ; RV64I-NEXT: seqz s2, a0
1019 ; RV64I-NEXT: mv a0, s1
1020 ; RV64I-NEXT: mv a1, s0
1021 ; RV64I-NEXT: call __unordsf2
1022 ; RV64I-NEXT: snez a0, a0
1023 ; RV64I-NEXT: or a0, a0, s2
1024 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1025 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1026 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1027 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1028 ; RV64I-NEXT: addi sp, sp, 32
1030 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
1031 %2 = zext i1 %1 to i32
1035 define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
1036 ; CHECKIF-LABEL: fcmps_ugt:
1038 ; CHECKIF-NEXT: fle.s a0, fa0, fa1
1039 ; CHECKIF-NEXT: xori a0, a0, 1
1042 ; CHECKIZFINX-LABEL: fcmps_ugt:
1043 ; CHECKIZFINX: # %bb.0:
1044 ; CHECKIZFINX-NEXT: fle.s a0, a0, a1
1045 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1046 ; CHECKIZFINX-NEXT: ret
1048 ; RV32I-LABEL: fcmps_ugt:
1050 ; RV32I-NEXT: addi sp, sp, -16
1051 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1052 ; RV32I-NEXT: call __lesf2
1053 ; RV32I-NEXT: sgtz a0, a0
1054 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1055 ; RV32I-NEXT: addi sp, sp, 16
1058 ; RV64I-LABEL: fcmps_ugt:
1060 ; RV64I-NEXT: addi sp, sp, -16
1061 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1062 ; RV64I-NEXT: call __lesf2
1063 ; RV64I-NEXT: sgtz a0, a0
1064 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1065 ; RV64I-NEXT: addi sp, sp, 16
1067 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
1068 %2 = zext i1 %1 to i32
1072 define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
1073 ; CHECKIF-LABEL: fcmps_uge:
1075 ; CHECKIF-NEXT: flt.s a0, fa0, fa1
1076 ; CHECKIF-NEXT: xori a0, a0, 1
1079 ; CHECKIZFINX-LABEL: fcmps_uge:
1080 ; CHECKIZFINX: # %bb.0:
1081 ; CHECKIZFINX-NEXT: flt.s a0, a0, a1
1082 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1083 ; CHECKIZFINX-NEXT: ret
1085 ; RV32I-LABEL: fcmps_uge:
1087 ; RV32I-NEXT: addi sp, sp, -16
1088 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1089 ; RV32I-NEXT: call __ltsf2
1090 ; RV32I-NEXT: slti a0, a0, 0
1091 ; RV32I-NEXT: xori a0, a0, 1
1092 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1093 ; RV32I-NEXT: addi sp, sp, 16
1096 ; RV64I-LABEL: fcmps_uge:
1098 ; RV64I-NEXT: addi sp, sp, -16
1099 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1100 ; RV64I-NEXT: call __ltsf2
1101 ; RV64I-NEXT: slti a0, a0, 0
1102 ; RV64I-NEXT: xori a0, a0, 1
1103 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1104 ; RV64I-NEXT: addi sp, sp, 16
1106 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
1107 %2 = zext i1 %1 to i32
1111 define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
1112 ; CHECKIF-LABEL: fcmps_ult:
1114 ; CHECKIF-NEXT: fle.s a0, fa1, fa0
1115 ; CHECKIF-NEXT: xori a0, a0, 1
1118 ; CHECKIZFINX-LABEL: fcmps_ult:
1119 ; CHECKIZFINX: # %bb.0:
1120 ; CHECKIZFINX-NEXT: fle.s a0, a1, a0
1121 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1122 ; CHECKIZFINX-NEXT: ret
1124 ; RV32I-LABEL: fcmps_ult:
1126 ; RV32I-NEXT: addi sp, sp, -16
1127 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1128 ; RV32I-NEXT: call __gesf2
1129 ; RV32I-NEXT: slti a0, a0, 0
1130 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1131 ; RV32I-NEXT: addi sp, sp, 16
1134 ; RV64I-LABEL: fcmps_ult:
1136 ; RV64I-NEXT: addi sp, sp, -16
1137 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1138 ; RV64I-NEXT: call __gesf2
1139 ; RV64I-NEXT: slti a0, a0, 0
1140 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1141 ; RV64I-NEXT: addi sp, sp, 16
1143 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
1144 %2 = zext i1 %1 to i32
1148 define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
1149 ; CHECKIF-LABEL: fcmps_ule:
1151 ; CHECKIF-NEXT: flt.s a0, fa1, fa0
1152 ; CHECKIF-NEXT: xori a0, a0, 1
1155 ; CHECKIZFINX-LABEL: fcmps_ule:
1156 ; CHECKIZFINX: # %bb.0:
1157 ; CHECKIZFINX-NEXT: flt.s a0, a1, a0
1158 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1159 ; CHECKIZFINX-NEXT: ret
1161 ; RV32I-LABEL: fcmps_ule:
1163 ; RV32I-NEXT: addi sp, sp, -16
1164 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1165 ; RV32I-NEXT: call __gtsf2
1166 ; RV32I-NEXT: slti a0, a0, 1
1167 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1168 ; RV32I-NEXT: addi sp, sp, 16
1171 ; RV64I-LABEL: fcmps_ule:
1173 ; RV64I-NEXT: addi sp, sp, -16
1174 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1175 ; RV64I-NEXT: call __gtsf2
1176 ; RV64I-NEXT: slti a0, a0, 1
1177 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1178 ; RV64I-NEXT: addi sp, sp, 16
1180 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
1181 %2 = zext i1 %1 to i32
1185 define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
1186 ; CHECKIF-LABEL: fcmps_une:
1188 ; CHECKIF-NEXT: fle.s a0, fa1, fa0
1189 ; CHECKIF-NEXT: fle.s a1, fa0, fa1
1190 ; CHECKIF-NEXT: and a0, a1, a0
1191 ; CHECKIF-NEXT: xori a0, a0, 1
1194 ; CHECKIZFINX-LABEL: fcmps_une:
1195 ; CHECKIZFINX: # %bb.0:
1196 ; CHECKIZFINX-NEXT: fle.s a2, a1, a0
1197 ; CHECKIZFINX-NEXT: fle.s a0, a0, a1
1198 ; CHECKIZFINX-NEXT: and a0, a0, a2
1199 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1200 ; CHECKIZFINX-NEXT: ret
1202 ; RV32I-LABEL: fcmps_une:
1204 ; RV32I-NEXT: addi sp, sp, -16
1205 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1206 ; RV32I-NEXT: call __nesf2
1207 ; RV32I-NEXT: snez a0, a0
1208 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1209 ; RV32I-NEXT: addi sp, sp, 16
1212 ; RV64I-LABEL: fcmps_une:
1214 ; RV64I-NEXT: addi sp, sp, -16
1215 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1216 ; RV64I-NEXT: call __nesf2
1217 ; RV64I-NEXT: snez a0, a0
1218 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1219 ; RV64I-NEXT: addi sp, sp, 16
1221 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
1222 %2 = zext i1 %1 to i32
1226 define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
1227 ; CHECKIF-LABEL: fcmps_uno:
1229 ; CHECKIF-NEXT: fle.s a0, fa1, fa1
1230 ; CHECKIF-NEXT: fle.s a1, fa0, fa0
1231 ; CHECKIF-NEXT: and a0, a1, a0
1232 ; CHECKIF-NEXT: xori a0, a0, 1
1235 ; CHECKIZFINX-LABEL: fcmps_uno:
1236 ; CHECKIZFINX: # %bb.0:
1237 ; CHECKIZFINX-NEXT: fle.s a1, a1, a1
1238 ; CHECKIZFINX-NEXT: fle.s a0, a0, a0
1239 ; CHECKIZFINX-NEXT: and a0, a0, a1
1240 ; CHECKIZFINX-NEXT: xori a0, a0, 1
1241 ; CHECKIZFINX-NEXT: ret
1243 ; RV32I-LABEL: fcmps_uno:
1245 ; RV32I-NEXT: addi sp, sp, -16
1246 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1247 ; RV32I-NEXT: call __unordsf2
1248 ; RV32I-NEXT: snez a0, a0
1249 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1250 ; RV32I-NEXT: addi sp, sp, 16
1253 ; RV64I-LABEL: fcmps_uno:
1255 ; RV64I-NEXT: addi sp, sp, -16
1256 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1257 ; RV64I-NEXT: call __unordsf2
1258 ; RV64I-NEXT: snez a0, a0
1259 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1260 ; RV64I-NEXT: addi sp, sp, 16
1262 %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
1263 %2 = zext i1 %1 to i32