1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefix=CHECKIFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefix=CHECKIFD %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
20 ; CHECKIFD-LABEL: fcmp_oeq:
22 ; CHECKIFD-NEXT: feq.d a0, fa0, fa1
25 ; RV32IZFINXZDINX-LABEL: fcmp_oeq:
26 ; RV32IZFINXZDINX: # %bb.0:
27 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
28 ; RV32IZFINXZDINX-NEXT: ret
30 ; RV64IZFINXZDINX-LABEL: fcmp_oeq:
31 ; RV64IZFINXZDINX: # %bb.0:
32 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1
33 ; RV64IZFINXZDINX-NEXT: ret
35 ; RV32I-LABEL: fcmp_oeq:
37 ; RV32I-NEXT: addi sp, sp, -16
38 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
39 ; RV32I-NEXT: call __eqdf2
40 ; RV32I-NEXT: seqz a0, a0
41 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
42 ; RV32I-NEXT: addi sp, sp, 16
45 ; RV64I-LABEL: fcmp_oeq:
47 ; RV64I-NEXT: addi sp, sp, -16
48 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
49 ; RV64I-NEXT: call __eqdf2
50 ; RV64I-NEXT: seqz a0, a0
51 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
52 ; RV64I-NEXT: addi sp, sp, 16
54 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
55 %2 = zext i1 %1 to i32
58 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
60 define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
61 ; CHECKIFD-LABEL: fcmp_ogt:
63 ; CHECKIFD-NEXT: frflags a1
64 ; CHECKIFD-NEXT: flt.d a0, fa1, fa0
65 ; CHECKIFD-NEXT: fsflags a1
66 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
69 ; RV32IZFINXZDINX-LABEL: fcmp_ogt:
70 ; RV32IZFINXZDINX: # %bb.0:
71 ; RV32IZFINXZDINX-NEXT: csrr a5, fflags
72 ; RV32IZFINXZDINX-NEXT: flt.d a4, a2, a0
73 ; RV32IZFINXZDINX-NEXT: csrw fflags, a5
74 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
75 ; RV32IZFINXZDINX-NEXT: mv a0, a4
76 ; RV32IZFINXZDINX-NEXT: ret
78 ; RV64IZFINXZDINX-LABEL: fcmp_ogt:
79 ; RV64IZFINXZDINX: # %bb.0:
80 ; RV64IZFINXZDINX-NEXT: csrr a3, fflags
81 ; RV64IZFINXZDINX-NEXT: flt.d a2, a1, a0
82 ; RV64IZFINXZDINX-NEXT: csrw fflags, a3
83 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
84 ; RV64IZFINXZDINX-NEXT: mv a0, a2
85 ; RV64IZFINXZDINX-NEXT: ret
87 ; RV32I-LABEL: fcmp_ogt:
89 ; RV32I-NEXT: addi sp, sp, -16
90 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
91 ; RV32I-NEXT: call __gtdf2
92 ; RV32I-NEXT: sgtz a0, a0
93 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
94 ; RV32I-NEXT: addi sp, sp, 16
97 ; RV64I-LABEL: fcmp_ogt:
99 ; RV64I-NEXT: addi sp, sp, -16
100 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
101 ; RV64I-NEXT: call __gtdf2
102 ; RV64I-NEXT: sgtz a0, a0
103 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
104 ; RV64I-NEXT: addi sp, sp, 16
106 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
107 %2 = zext i1 %1 to i32
111 define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
112 ; CHECKIFD-LABEL: fcmp_oge:
114 ; CHECKIFD-NEXT: frflags a1
115 ; CHECKIFD-NEXT: fle.d a0, fa1, fa0
116 ; CHECKIFD-NEXT: fsflags a1
117 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
120 ; RV32IZFINXZDINX-LABEL: fcmp_oge:
121 ; RV32IZFINXZDINX: # %bb.0:
122 ; RV32IZFINXZDINX-NEXT: csrr a5, fflags
123 ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
124 ; RV32IZFINXZDINX-NEXT: csrw fflags, a5
125 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
126 ; RV32IZFINXZDINX-NEXT: mv a0, a4
127 ; RV32IZFINXZDINX-NEXT: ret
129 ; RV64IZFINXZDINX-LABEL: fcmp_oge:
130 ; RV64IZFINXZDINX: # %bb.0:
131 ; RV64IZFINXZDINX-NEXT: csrr a3, fflags
132 ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0
133 ; RV64IZFINXZDINX-NEXT: csrw fflags, a3
134 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
135 ; RV64IZFINXZDINX-NEXT: mv a0, a2
136 ; RV64IZFINXZDINX-NEXT: ret
138 ; RV32I-LABEL: fcmp_oge:
140 ; RV32I-NEXT: addi sp, sp, -16
141 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
142 ; RV32I-NEXT: call __gedf2
143 ; RV32I-NEXT: slti a0, a0, 0
144 ; RV32I-NEXT: xori a0, a0, 1
145 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
146 ; RV32I-NEXT: addi sp, sp, 16
149 ; RV64I-LABEL: fcmp_oge:
151 ; RV64I-NEXT: addi sp, sp, -16
152 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
153 ; RV64I-NEXT: call __gedf2
154 ; RV64I-NEXT: slti a0, a0, 0
155 ; RV64I-NEXT: xori a0, a0, 1
156 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
157 ; RV64I-NEXT: addi sp, sp, 16
159 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
160 %2 = zext i1 %1 to i32
164 define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
165 ; CHECKIFD-LABEL: fcmp_olt:
167 ; CHECKIFD-NEXT: frflags a1
168 ; CHECKIFD-NEXT: flt.d a0, fa0, fa1
169 ; CHECKIFD-NEXT: fsflags a1
170 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
173 ; RV32IZFINXZDINX-LABEL: fcmp_olt:
174 ; RV32IZFINXZDINX: # %bb.0:
175 ; RV32IZFINXZDINX-NEXT: csrr a5, fflags
176 ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
177 ; RV32IZFINXZDINX-NEXT: csrw fflags, a5
178 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
179 ; RV32IZFINXZDINX-NEXT: mv a0, a4
180 ; RV32IZFINXZDINX-NEXT: ret
182 ; RV64IZFINXZDINX-LABEL: fcmp_olt:
183 ; RV64IZFINXZDINX: # %bb.0:
184 ; RV64IZFINXZDINX-NEXT: csrr a3, fflags
185 ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1
186 ; RV64IZFINXZDINX-NEXT: csrw fflags, a3
187 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
188 ; RV64IZFINXZDINX-NEXT: mv a0, a2
189 ; RV64IZFINXZDINX-NEXT: ret
191 ; RV32I-LABEL: fcmp_olt:
193 ; RV32I-NEXT: addi sp, sp, -16
194 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
195 ; RV32I-NEXT: call __ltdf2
196 ; RV32I-NEXT: slti a0, a0, 0
197 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
198 ; RV32I-NEXT: addi sp, sp, 16
201 ; RV64I-LABEL: fcmp_olt:
203 ; RV64I-NEXT: addi sp, sp, -16
204 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
205 ; RV64I-NEXT: call __ltdf2
206 ; RV64I-NEXT: slti a0, a0, 0
207 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
208 ; RV64I-NEXT: addi sp, sp, 16
210 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
211 %2 = zext i1 %1 to i32
215 define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
216 ; CHECKIFD-LABEL: fcmp_ole:
218 ; CHECKIFD-NEXT: frflags a1
219 ; CHECKIFD-NEXT: fle.d a0, fa0, fa1
220 ; CHECKIFD-NEXT: fsflags a1
221 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
224 ; RV32IZFINXZDINX-LABEL: fcmp_ole:
225 ; RV32IZFINXZDINX: # %bb.0:
226 ; RV32IZFINXZDINX-NEXT: csrr a5, fflags
227 ; RV32IZFINXZDINX-NEXT: fle.d a4, a0, a2
228 ; RV32IZFINXZDINX-NEXT: csrw fflags, a5
229 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
230 ; RV32IZFINXZDINX-NEXT: mv a0, a4
231 ; RV32IZFINXZDINX-NEXT: ret
233 ; RV64IZFINXZDINX-LABEL: fcmp_ole:
234 ; RV64IZFINXZDINX: # %bb.0:
235 ; RV64IZFINXZDINX-NEXT: csrr a3, fflags
236 ; RV64IZFINXZDINX-NEXT: fle.d a2, a0, a1
237 ; RV64IZFINXZDINX-NEXT: csrw fflags, a3
238 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
239 ; RV64IZFINXZDINX-NEXT: mv a0, a2
240 ; RV64IZFINXZDINX-NEXT: ret
242 ; RV32I-LABEL: fcmp_ole:
244 ; RV32I-NEXT: addi sp, sp, -16
245 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
246 ; RV32I-NEXT: call __ledf2
247 ; RV32I-NEXT: slti a0, a0, 1
248 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
249 ; RV32I-NEXT: addi sp, sp, 16
252 ; RV64I-LABEL: fcmp_ole:
254 ; RV64I-NEXT: addi sp, sp, -16
255 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
256 ; RV64I-NEXT: call __ledf2
257 ; RV64I-NEXT: slti a0, a0, 1
258 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
259 ; RV64I-NEXT: addi sp, sp, 16
261 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
262 %2 = zext i1 %1 to i32
266 ; FIXME: We only need one frflags before the two flts and one fsflags after the
268 define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
269 ; CHECKIFD-LABEL: fcmp_one:
271 ; CHECKIFD-NEXT: frflags a0
272 ; CHECKIFD-NEXT: flt.d a1, fa0, fa1
273 ; CHECKIFD-NEXT: fsflags a0
274 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
275 ; CHECKIFD-NEXT: frflags a0
276 ; CHECKIFD-NEXT: flt.d a2, fa1, fa0
277 ; CHECKIFD-NEXT: fsflags a0
278 ; CHECKIFD-NEXT: or a0, a2, a1
279 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
282 ; RV32IZFINXZDINX-LABEL: fcmp_one:
283 ; RV32IZFINXZDINX: # %bb.0:
284 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
285 ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
286 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
287 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
288 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
289 ; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
290 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
291 ; RV32IZFINXZDINX-NEXT: or a4, a6, a5
292 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
293 ; RV32IZFINXZDINX-NEXT: mv a0, a4
294 ; RV32IZFINXZDINX-NEXT: ret
296 ; RV64IZFINXZDINX-LABEL: fcmp_one:
297 ; RV64IZFINXZDINX: # %bb.0:
298 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
299 ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1
300 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
301 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
302 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
303 ; RV64IZFINXZDINX-NEXT: flt.d a4, a1, a0
304 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
305 ; RV64IZFINXZDINX-NEXT: or a2, a4, a3
306 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
307 ; RV64IZFINXZDINX-NEXT: mv a0, a2
308 ; RV64IZFINXZDINX-NEXT: ret
310 ; RV32I-LABEL: fcmp_one:
312 ; RV32I-NEXT: addi sp, sp, -32
313 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
314 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
315 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
316 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
317 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
318 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
319 ; RV32I-NEXT: mv s0, a3
320 ; RV32I-NEXT: mv s1, a2
321 ; RV32I-NEXT: mv s2, a1
322 ; RV32I-NEXT: mv s3, a0
323 ; RV32I-NEXT: call __eqdf2
324 ; RV32I-NEXT: snez s4, a0
325 ; RV32I-NEXT: mv a0, s3
326 ; RV32I-NEXT: mv a1, s2
327 ; RV32I-NEXT: mv a2, s1
328 ; RV32I-NEXT: mv a3, s0
329 ; RV32I-NEXT: call __unorddf2
330 ; RV32I-NEXT: seqz a0, a0
331 ; RV32I-NEXT: and a0, a0, s4
332 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
333 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
334 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
335 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
336 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
337 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
338 ; RV32I-NEXT: addi sp, sp, 32
341 ; RV64I-LABEL: fcmp_one:
343 ; RV64I-NEXT: addi sp, sp, -32
344 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
345 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
346 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
347 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
348 ; RV64I-NEXT: mv s0, a1
349 ; RV64I-NEXT: mv s1, a0
350 ; RV64I-NEXT: call __eqdf2
351 ; RV64I-NEXT: snez s2, a0
352 ; RV64I-NEXT: mv a0, s1
353 ; RV64I-NEXT: mv a1, s0
354 ; RV64I-NEXT: call __unorddf2
355 ; RV64I-NEXT: seqz a0, a0
356 ; RV64I-NEXT: and a0, a0, s2
357 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
358 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
359 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
360 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
361 ; RV64I-NEXT: addi sp, sp, 32
363 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
364 %2 = zext i1 %1 to i32
368 define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
369 ; CHECKIFD-LABEL: fcmp_ord:
371 ; CHECKIFD-NEXT: feq.d a0, fa1, fa1
372 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
373 ; CHECKIFD-NEXT: and a0, a1, a0
376 ; RV32IZFINXZDINX-LABEL: fcmp_ord:
377 ; RV32IZFINXZDINX: # %bb.0:
378 ; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
379 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
380 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
381 ; RV32IZFINXZDINX-NEXT: ret
383 ; RV64IZFINXZDINX-LABEL: fcmp_ord:
384 ; RV64IZFINXZDINX: # %bb.0:
385 ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1
386 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
387 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
388 ; RV64IZFINXZDINX-NEXT: ret
390 ; RV32I-LABEL: fcmp_ord:
392 ; RV32I-NEXT: addi sp, sp, -16
393 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
394 ; RV32I-NEXT: call __unorddf2
395 ; RV32I-NEXT: seqz a0, a0
396 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
397 ; RV32I-NEXT: addi sp, sp, 16
400 ; RV64I-LABEL: fcmp_ord:
402 ; RV64I-NEXT: addi sp, sp, -16
403 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
404 ; RV64I-NEXT: call __unorddf2
405 ; RV64I-NEXT: seqz a0, a0
406 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
407 ; RV64I-NEXT: addi sp, sp, 16
409 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
410 %2 = zext i1 %1 to i32
414 ; FIXME: We only need one frflags before the two flts and one fsflags after the
416 define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
417 ; CHECKIFD-LABEL: fcmp_ueq:
419 ; CHECKIFD-NEXT: frflags a0
420 ; CHECKIFD-NEXT: flt.d a1, fa0, fa1
421 ; CHECKIFD-NEXT: fsflags a0
422 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
423 ; CHECKIFD-NEXT: frflags a0
424 ; CHECKIFD-NEXT: flt.d a2, fa1, fa0
425 ; CHECKIFD-NEXT: fsflags a0
426 ; CHECKIFD-NEXT: or a1, a2, a1
427 ; CHECKIFD-NEXT: xori a0, a1, 1
428 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
431 ; RV32IZFINXZDINX-LABEL: fcmp_ueq:
432 ; RV32IZFINXZDINX: # %bb.0:
433 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
434 ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
435 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
436 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
437 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
438 ; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a0
439 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
440 ; RV32IZFINXZDINX-NEXT: or a4, a6, a5
441 ; RV32IZFINXZDINX-NEXT: xori a4, a4, 1
442 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
443 ; RV32IZFINXZDINX-NEXT: mv a0, a4
444 ; RV32IZFINXZDINX-NEXT: ret
446 ; RV64IZFINXZDINX-LABEL: fcmp_ueq:
447 ; RV64IZFINXZDINX: # %bb.0:
448 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
449 ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1
450 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
451 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
452 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
453 ; RV64IZFINXZDINX-NEXT: flt.d a4, a1, a0
454 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
455 ; RV64IZFINXZDINX-NEXT: or a3, a4, a3
456 ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1
457 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
458 ; RV64IZFINXZDINX-NEXT: mv a0, a2
459 ; RV64IZFINXZDINX-NEXT: ret
461 ; RV32I-LABEL: fcmp_ueq:
463 ; RV32I-NEXT: addi sp, sp, -32
464 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
465 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
466 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
467 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
468 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
469 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
470 ; RV32I-NEXT: mv s0, a3
471 ; RV32I-NEXT: mv s1, a2
472 ; RV32I-NEXT: mv s2, a1
473 ; RV32I-NEXT: mv s3, a0
474 ; RV32I-NEXT: call __eqdf2
475 ; RV32I-NEXT: seqz s4, a0
476 ; RV32I-NEXT: mv a0, s3
477 ; RV32I-NEXT: mv a1, s2
478 ; RV32I-NEXT: mv a2, s1
479 ; RV32I-NEXT: mv a3, s0
480 ; RV32I-NEXT: call __unorddf2
481 ; RV32I-NEXT: snez a0, a0
482 ; RV32I-NEXT: or a0, a0, s4
483 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
484 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
485 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
486 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
487 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
488 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
489 ; RV32I-NEXT: addi sp, sp, 32
492 ; RV64I-LABEL: fcmp_ueq:
494 ; RV64I-NEXT: addi sp, sp, -32
495 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
496 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
497 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
498 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
499 ; RV64I-NEXT: mv s0, a1
500 ; RV64I-NEXT: mv s1, a0
501 ; RV64I-NEXT: call __eqdf2
502 ; RV64I-NEXT: seqz s2, a0
503 ; RV64I-NEXT: mv a0, s1
504 ; RV64I-NEXT: mv a1, s0
505 ; RV64I-NEXT: call __unorddf2
506 ; RV64I-NEXT: snez a0, a0
507 ; RV64I-NEXT: or a0, a0, s2
508 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
509 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
510 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
511 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
512 ; RV64I-NEXT: addi sp, sp, 32
514 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
515 %2 = zext i1 %1 to i32
519 define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
520 ; CHECKIFD-LABEL: fcmp_ugt:
522 ; CHECKIFD-NEXT: frflags a0
523 ; CHECKIFD-NEXT: fle.d a1, fa0, fa1
524 ; CHECKIFD-NEXT: fsflags a0
525 ; CHECKIFD-NEXT: xori a0, a1, 1
526 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
529 ; RV32IZFINXZDINX-LABEL: fcmp_ugt:
530 ; RV32IZFINXZDINX: # %bb.0:
531 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
532 ; RV32IZFINXZDINX-NEXT: fle.d a5, a0, a2
533 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
534 ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
535 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
536 ; RV32IZFINXZDINX-NEXT: mv a0, a4
537 ; RV32IZFINXZDINX-NEXT: ret
539 ; RV64IZFINXZDINX-LABEL: fcmp_ugt:
540 ; RV64IZFINXZDINX: # %bb.0:
541 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
542 ; RV64IZFINXZDINX-NEXT: fle.d a3, a0, a1
543 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
544 ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1
545 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
546 ; RV64IZFINXZDINX-NEXT: mv a0, a2
547 ; RV64IZFINXZDINX-NEXT: ret
549 ; RV32I-LABEL: fcmp_ugt:
551 ; RV32I-NEXT: addi sp, sp, -16
552 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
553 ; RV32I-NEXT: call __ledf2
554 ; RV32I-NEXT: sgtz a0, a0
555 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
556 ; RV32I-NEXT: addi sp, sp, 16
559 ; RV64I-LABEL: fcmp_ugt:
561 ; RV64I-NEXT: addi sp, sp, -16
562 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
563 ; RV64I-NEXT: call __ledf2
564 ; RV64I-NEXT: sgtz a0, a0
565 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
566 ; RV64I-NEXT: addi sp, sp, 16
568 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
569 %2 = zext i1 %1 to i32
573 define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
574 ; CHECKIFD-LABEL: fcmp_uge:
576 ; CHECKIFD-NEXT: frflags a0
577 ; CHECKIFD-NEXT: flt.d a1, fa0, fa1
578 ; CHECKIFD-NEXT: fsflags a0
579 ; CHECKIFD-NEXT: xori a0, a1, 1
580 ; CHECKIFD-NEXT: feq.d zero, fa0, fa1
583 ; RV32IZFINXZDINX-LABEL: fcmp_uge:
584 ; RV32IZFINXZDINX: # %bb.0:
585 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
586 ; RV32IZFINXZDINX-NEXT: flt.d a5, a0, a2
587 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
588 ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
589 ; RV32IZFINXZDINX-NEXT: feq.d zero, a0, a2
590 ; RV32IZFINXZDINX-NEXT: mv a0, a4
591 ; RV32IZFINXZDINX-NEXT: ret
593 ; RV64IZFINXZDINX-LABEL: fcmp_uge:
594 ; RV64IZFINXZDINX: # %bb.0:
595 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
596 ; RV64IZFINXZDINX-NEXT: flt.d a3, a0, a1
597 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
598 ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1
599 ; RV64IZFINXZDINX-NEXT: feq.d zero, a0, a1
600 ; RV64IZFINXZDINX-NEXT: mv a0, a2
601 ; RV64IZFINXZDINX-NEXT: ret
603 ; RV32I-LABEL: fcmp_uge:
605 ; RV32I-NEXT: addi sp, sp, -16
606 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
607 ; RV32I-NEXT: call __ltdf2
608 ; RV32I-NEXT: slti a0, a0, 0
609 ; RV32I-NEXT: xori a0, a0, 1
610 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
611 ; RV32I-NEXT: addi sp, sp, 16
614 ; RV64I-LABEL: fcmp_uge:
616 ; RV64I-NEXT: addi sp, sp, -16
617 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
618 ; RV64I-NEXT: call __ltdf2
619 ; RV64I-NEXT: slti a0, a0, 0
620 ; RV64I-NEXT: xori a0, a0, 1
621 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
622 ; RV64I-NEXT: addi sp, sp, 16
624 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
625 %2 = zext i1 %1 to i32
629 define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
630 ; CHECKIFD-LABEL: fcmp_ult:
632 ; CHECKIFD-NEXT: frflags a0
633 ; CHECKIFD-NEXT: fle.d a1, fa1, fa0
634 ; CHECKIFD-NEXT: fsflags a0
635 ; CHECKIFD-NEXT: xori a0, a1, 1
636 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
639 ; RV32IZFINXZDINX-LABEL: fcmp_ult:
640 ; RV32IZFINXZDINX: # %bb.0:
641 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
642 ; RV32IZFINXZDINX-NEXT: fle.d a5, a2, a0
643 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
644 ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
645 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
646 ; RV32IZFINXZDINX-NEXT: mv a0, a4
647 ; RV32IZFINXZDINX-NEXT: ret
649 ; RV64IZFINXZDINX-LABEL: fcmp_ult:
650 ; RV64IZFINXZDINX: # %bb.0:
651 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
652 ; RV64IZFINXZDINX-NEXT: fle.d a3, a1, a0
653 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
654 ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1
655 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
656 ; RV64IZFINXZDINX-NEXT: mv a0, a2
657 ; RV64IZFINXZDINX-NEXT: ret
659 ; RV32I-LABEL: fcmp_ult:
661 ; RV32I-NEXT: addi sp, sp, -16
662 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
663 ; RV32I-NEXT: call __gedf2
664 ; RV32I-NEXT: slti a0, a0, 0
665 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
666 ; RV32I-NEXT: addi sp, sp, 16
669 ; RV64I-LABEL: fcmp_ult:
671 ; RV64I-NEXT: addi sp, sp, -16
672 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
673 ; RV64I-NEXT: call __gedf2
674 ; RV64I-NEXT: slti a0, a0, 0
675 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
676 ; RV64I-NEXT: addi sp, sp, 16
678 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
679 %2 = zext i1 %1 to i32
683 define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
684 ; CHECKIFD-LABEL: fcmp_ule:
686 ; CHECKIFD-NEXT: frflags a0
687 ; CHECKIFD-NEXT: flt.d a1, fa1, fa0
688 ; CHECKIFD-NEXT: fsflags a0
689 ; CHECKIFD-NEXT: xori a0, a1, 1
690 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0
693 ; RV32IZFINXZDINX-LABEL: fcmp_ule:
694 ; RV32IZFINXZDINX: # %bb.0:
695 ; RV32IZFINXZDINX-NEXT: csrr a4, fflags
696 ; RV32IZFINXZDINX-NEXT: flt.d a5, a2, a0
697 ; RV32IZFINXZDINX-NEXT: csrw fflags, a4
698 ; RV32IZFINXZDINX-NEXT: xori a4, a5, 1
699 ; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a0
700 ; RV32IZFINXZDINX-NEXT: mv a0, a4
701 ; RV32IZFINXZDINX-NEXT: ret
703 ; RV64IZFINXZDINX-LABEL: fcmp_ule:
704 ; RV64IZFINXZDINX: # %bb.0:
705 ; RV64IZFINXZDINX-NEXT: csrr a2, fflags
706 ; RV64IZFINXZDINX-NEXT: flt.d a3, a1, a0
707 ; RV64IZFINXZDINX-NEXT: csrw fflags, a2
708 ; RV64IZFINXZDINX-NEXT: xori a2, a3, 1
709 ; RV64IZFINXZDINX-NEXT: feq.d zero, a1, a0
710 ; RV64IZFINXZDINX-NEXT: mv a0, a2
711 ; RV64IZFINXZDINX-NEXT: ret
713 ; RV32I-LABEL: fcmp_ule:
715 ; RV32I-NEXT: addi sp, sp, -16
716 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
717 ; RV32I-NEXT: call __gtdf2
718 ; RV32I-NEXT: slti a0, a0, 1
719 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
720 ; RV32I-NEXT: addi sp, sp, 16
723 ; RV64I-LABEL: fcmp_ule:
725 ; RV64I-NEXT: addi sp, sp, -16
726 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
727 ; RV64I-NEXT: call __gtdf2
728 ; RV64I-NEXT: slti a0, a0, 1
729 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
730 ; RV64I-NEXT: addi sp, sp, 16
732 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
733 %2 = zext i1 %1 to i32
737 define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
738 ; CHECKIFD-LABEL: fcmp_une:
740 ; CHECKIFD-NEXT: feq.d a0, fa0, fa1
741 ; CHECKIFD-NEXT: xori a0, a0, 1
744 ; RV32IZFINXZDINX-LABEL: fcmp_une:
745 ; RV32IZFINXZDINX: # %bb.0:
746 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2
747 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
748 ; RV32IZFINXZDINX-NEXT: ret
750 ; RV64IZFINXZDINX-LABEL: fcmp_une:
751 ; RV64IZFINXZDINX: # %bb.0:
752 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1
753 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
754 ; RV64IZFINXZDINX-NEXT: ret
756 ; RV32I-LABEL: fcmp_une:
758 ; RV32I-NEXT: addi sp, sp, -16
759 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
760 ; RV32I-NEXT: call __nedf2
761 ; RV32I-NEXT: snez a0, a0
762 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
763 ; RV32I-NEXT: addi sp, sp, 16
766 ; RV64I-LABEL: fcmp_une:
768 ; RV64I-NEXT: addi sp, sp, -16
769 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
770 ; RV64I-NEXT: call __nedf2
771 ; RV64I-NEXT: snez a0, a0
772 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
773 ; RV64I-NEXT: addi sp, sp, 16
775 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
776 %2 = zext i1 %1 to i32
780 define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
781 ; CHECKIFD-LABEL: fcmp_uno:
783 ; CHECKIFD-NEXT: feq.d a0, fa1, fa1
784 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
785 ; CHECKIFD-NEXT: and a0, a1, a0
786 ; CHECKIFD-NEXT: xori a0, a0, 1
789 ; RV32IZFINXZDINX-LABEL: fcmp_uno:
790 ; RV32IZFINXZDINX: # %bb.0:
791 ; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2
792 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
793 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
794 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
795 ; RV32IZFINXZDINX-NEXT: ret
797 ; RV64IZFINXZDINX-LABEL: fcmp_uno:
798 ; RV64IZFINXZDINX: # %bb.0:
799 ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1
800 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
801 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
802 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
803 ; RV64IZFINXZDINX-NEXT: ret
805 ; RV32I-LABEL: fcmp_uno:
807 ; RV32I-NEXT: addi sp, sp, -16
808 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
809 ; RV32I-NEXT: call __unorddf2
810 ; RV32I-NEXT: snez a0, a0
811 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
812 ; RV32I-NEXT: addi sp, sp, 16
815 ; RV64I-LABEL: fcmp_uno:
817 ; RV64I-NEXT: addi sp, sp, -16
818 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
819 ; RV64I-NEXT: call __unorddf2
820 ; RV64I-NEXT: snez a0, a0
821 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
822 ; RV64I-NEXT: addi sp, sp, 16
824 %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
825 %2 = zext i1 %1 to i32
829 define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
830 ; CHECKIFD-LABEL: fcmps_oeq:
832 ; CHECKIFD-NEXT: fle.d a0, fa1, fa0
833 ; CHECKIFD-NEXT: fle.d a1, fa0, fa1
834 ; CHECKIFD-NEXT: and a0, a1, a0
837 ; RV32IZFINXZDINX-LABEL: fcmps_oeq:
838 ; RV32IZFINXZDINX: # %bb.0:
839 ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
840 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
841 ; RV32IZFINXZDINX-NEXT: and a0, a0, a4
842 ; RV32IZFINXZDINX-NEXT: ret
844 ; RV64IZFINXZDINX-LABEL: fcmps_oeq:
845 ; RV64IZFINXZDINX: # %bb.0:
846 ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0
847 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1
848 ; RV64IZFINXZDINX-NEXT: and a0, a0, a2
849 ; RV64IZFINXZDINX-NEXT: ret
851 ; RV32I-LABEL: fcmps_oeq:
853 ; RV32I-NEXT: addi sp, sp, -16
854 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
855 ; RV32I-NEXT: call __eqdf2
856 ; RV32I-NEXT: seqz a0, a0
857 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
858 ; RV32I-NEXT: addi sp, sp, 16
861 ; RV64I-LABEL: fcmps_oeq:
863 ; RV64I-NEXT: addi sp, sp, -16
864 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
865 ; RV64I-NEXT: call __eqdf2
866 ; RV64I-NEXT: seqz a0, a0
867 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
868 ; RV64I-NEXT: addi sp, sp, 16
870 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
871 %2 = zext i1 %1 to i32
874 declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
876 define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
877 ; CHECKIFD-LABEL: fcmps_ogt:
879 ; CHECKIFD-NEXT: flt.d a0, fa1, fa0
882 ; RV32IZFINXZDINX-LABEL: fcmps_ogt:
883 ; RV32IZFINXZDINX: # %bb.0:
884 ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
885 ; RV32IZFINXZDINX-NEXT: ret
887 ; RV64IZFINXZDINX-LABEL: fcmps_ogt:
888 ; RV64IZFINXZDINX: # %bb.0:
889 ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0
890 ; RV64IZFINXZDINX-NEXT: ret
892 ; RV32I-LABEL: fcmps_ogt:
894 ; RV32I-NEXT: addi sp, sp, -16
895 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
896 ; RV32I-NEXT: call __gtdf2
897 ; RV32I-NEXT: sgtz a0, a0
898 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
899 ; RV32I-NEXT: addi sp, sp, 16
902 ; RV64I-LABEL: fcmps_ogt:
904 ; RV64I-NEXT: addi sp, sp, -16
905 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
906 ; RV64I-NEXT: call __gtdf2
907 ; RV64I-NEXT: sgtz a0, a0
908 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
909 ; RV64I-NEXT: addi sp, sp, 16
911 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
912 %2 = zext i1 %1 to i32
916 define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
917 ; CHECKIFD-LABEL: fcmps_oge:
919 ; CHECKIFD-NEXT: fle.d a0, fa1, fa0
922 ; RV32IZFINXZDINX-LABEL: fcmps_oge:
923 ; RV32IZFINXZDINX: # %bb.0:
924 ; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
925 ; RV32IZFINXZDINX-NEXT: ret
927 ; RV64IZFINXZDINX-LABEL: fcmps_oge:
928 ; RV64IZFINXZDINX: # %bb.0:
929 ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0
930 ; RV64IZFINXZDINX-NEXT: ret
932 ; RV32I-LABEL: fcmps_oge:
934 ; RV32I-NEXT: addi sp, sp, -16
935 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
936 ; RV32I-NEXT: call __gedf2
937 ; RV32I-NEXT: slti a0, a0, 0
938 ; RV32I-NEXT: xori a0, a0, 1
939 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
940 ; RV32I-NEXT: addi sp, sp, 16
943 ; RV64I-LABEL: fcmps_oge:
945 ; RV64I-NEXT: addi sp, sp, -16
946 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
947 ; RV64I-NEXT: call __gedf2
948 ; RV64I-NEXT: slti a0, a0, 0
949 ; RV64I-NEXT: xori a0, a0, 1
950 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
951 ; RV64I-NEXT: addi sp, sp, 16
953 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
954 %2 = zext i1 %1 to i32
958 define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
959 ; CHECKIFD-LABEL: fcmps_olt:
961 ; CHECKIFD-NEXT: flt.d a0, fa0, fa1
964 ; RV32IZFINXZDINX-LABEL: fcmps_olt:
965 ; RV32IZFINXZDINX: # %bb.0:
966 ; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
967 ; RV32IZFINXZDINX-NEXT: ret
969 ; RV64IZFINXZDINX-LABEL: fcmps_olt:
970 ; RV64IZFINXZDINX: # %bb.0:
971 ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1
972 ; RV64IZFINXZDINX-NEXT: ret
974 ; RV32I-LABEL: fcmps_olt:
976 ; RV32I-NEXT: addi sp, sp, -16
977 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
978 ; RV32I-NEXT: call __ltdf2
979 ; RV32I-NEXT: slti a0, a0, 0
980 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
981 ; RV32I-NEXT: addi sp, sp, 16
984 ; RV64I-LABEL: fcmps_olt:
986 ; RV64I-NEXT: addi sp, sp, -16
987 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
988 ; RV64I-NEXT: call __ltdf2
989 ; RV64I-NEXT: slti a0, a0, 0
990 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
991 ; RV64I-NEXT: addi sp, sp, 16
993 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
994 %2 = zext i1 %1 to i32
998 define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
999 ; CHECKIFD-LABEL: fcmps_ole:
1000 ; CHECKIFD: # %bb.0:
1001 ; CHECKIFD-NEXT: fle.d a0, fa0, fa1
1002 ; CHECKIFD-NEXT: ret
1004 ; RV32IZFINXZDINX-LABEL: fcmps_ole:
1005 ; RV32IZFINXZDINX: # %bb.0:
1006 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
1007 ; RV32IZFINXZDINX-NEXT: ret
1009 ; RV64IZFINXZDINX-LABEL: fcmps_ole:
1010 ; RV64IZFINXZDINX: # %bb.0:
1011 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1
1012 ; RV64IZFINXZDINX-NEXT: ret
1014 ; RV32I-LABEL: fcmps_ole:
1016 ; RV32I-NEXT: addi sp, sp, -16
1017 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1018 ; RV32I-NEXT: call __ledf2
1019 ; RV32I-NEXT: slti a0, a0, 1
1020 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1021 ; RV32I-NEXT: addi sp, sp, 16
1024 ; RV64I-LABEL: fcmps_ole:
1026 ; RV64I-NEXT: addi sp, sp, -16
1027 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1028 ; RV64I-NEXT: call __ledf2
1029 ; RV64I-NEXT: slti a0, a0, 1
1030 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1031 ; RV64I-NEXT: addi sp, sp, 16
1033 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
1034 %2 = zext i1 %1 to i32
1038 define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
1039 ; CHECKIFD-LABEL: fcmps_one:
1040 ; CHECKIFD: # %bb.0:
1041 ; CHECKIFD-NEXT: flt.d a0, fa0, fa1
1042 ; CHECKIFD-NEXT: flt.d a1, fa1, fa0
1043 ; CHECKIFD-NEXT: or a0, a1, a0
1044 ; CHECKIFD-NEXT: ret
1046 ; RV32IZFINXZDINX-LABEL: fcmps_one:
1047 ; RV32IZFINXZDINX: # %bb.0:
1048 ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
1049 ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
1050 ; RV32IZFINXZDINX-NEXT: or a0, a0, a4
1051 ; RV32IZFINXZDINX-NEXT: ret
1053 ; RV64IZFINXZDINX-LABEL: fcmps_one:
1054 ; RV64IZFINXZDINX: # %bb.0:
1055 ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1
1056 ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0
1057 ; RV64IZFINXZDINX-NEXT: or a0, a0, a2
1058 ; RV64IZFINXZDINX-NEXT: ret
1060 ; RV32I-LABEL: fcmps_one:
1062 ; RV32I-NEXT: addi sp, sp, -32
1063 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1064 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1065 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1066 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
1067 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
1068 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
1069 ; RV32I-NEXT: mv s0, a3
1070 ; RV32I-NEXT: mv s1, a2
1071 ; RV32I-NEXT: mv s2, a1
1072 ; RV32I-NEXT: mv s3, a0
1073 ; RV32I-NEXT: call __eqdf2
1074 ; RV32I-NEXT: snez s4, a0
1075 ; RV32I-NEXT: mv a0, s3
1076 ; RV32I-NEXT: mv a1, s2
1077 ; RV32I-NEXT: mv a2, s1
1078 ; RV32I-NEXT: mv a3, s0
1079 ; RV32I-NEXT: call __unorddf2
1080 ; RV32I-NEXT: seqz a0, a0
1081 ; RV32I-NEXT: and a0, a0, s4
1082 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1083 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1084 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1085 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
1086 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
1087 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
1088 ; RV32I-NEXT: addi sp, sp, 32
1091 ; RV64I-LABEL: fcmps_one:
1093 ; RV64I-NEXT: addi sp, sp, -32
1094 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1095 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1096 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1097 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1098 ; RV64I-NEXT: mv s0, a1
1099 ; RV64I-NEXT: mv s1, a0
1100 ; RV64I-NEXT: call __eqdf2
1101 ; RV64I-NEXT: snez s2, a0
1102 ; RV64I-NEXT: mv a0, s1
1103 ; RV64I-NEXT: mv a1, s0
1104 ; RV64I-NEXT: call __unorddf2
1105 ; RV64I-NEXT: seqz a0, a0
1106 ; RV64I-NEXT: and a0, a0, s2
1107 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1108 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1109 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1110 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1111 ; RV64I-NEXT: addi sp, sp, 32
1113 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
1114 %2 = zext i1 %1 to i32
1118 define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
1119 ; CHECKIFD-LABEL: fcmps_ord:
1120 ; CHECKIFD: # %bb.0:
1121 ; CHECKIFD-NEXT: fle.d a0, fa1, fa1
1122 ; CHECKIFD-NEXT: fle.d a1, fa0, fa0
1123 ; CHECKIFD-NEXT: and a0, a1, a0
1124 ; CHECKIFD-NEXT: ret
1126 ; RV32IZFINXZDINX-LABEL: fcmps_ord:
1127 ; RV32IZFINXZDINX: # %bb.0:
1128 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
1129 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
1130 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1131 ; RV32IZFINXZDINX-NEXT: ret
1133 ; RV64IZFINXZDINX-LABEL: fcmps_ord:
1134 ; RV64IZFINXZDINX: # %bb.0:
1135 ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1
1136 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a0
1137 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1138 ; RV64IZFINXZDINX-NEXT: ret
1140 ; RV32I-LABEL: fcmps_ord:
1142 ; RV32I-NEXT: addi sp, sp, -16
1143 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1144 ; RV32I-NEXT: call __unorddf2
1145 ; RV32I-NEXT: seqz a0, a0
1146 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1147 ; RV32I-NEXT: addi sp, sp, 16
1150 ; RV64I-LABEL: fcmps_ord:
1152 ; RV64I-NEXT: addi sp, sp, -16
1153 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1154 ; RV64I-NEXT: call __unorddf2
1155 ; RV64I-NEXT: seqz a0, a0
1156 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1157 ; RV64I-NEXT: addi sp, sp, 16
1159 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
1160 %2 = zext i1 %1 to i32
1164 define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
1165 ; CHECKIFD-LABEL: fcmps_ueq:
1166 ; CHECKIFD: # %bb.0:
1167 ; CHECKIFD-NEXT: flt.d a0, fa0, fa1
1168 ; CHECKIFD-NEXT: flt.d a1, fa1, fa0
1169 ; CHECKIFD-NEXT: or a0, a1, a0
1170 ; CHECKIFD-NEXT: xori a0, a0, 1
1171 ; CHECKIFD-NEXT: ret
1173 ; RV32IZFINXZDINX-LABEL: fcmps_ueq:
1174 ; RV32IZFINXZDINX: # %bb.0:
1175 ; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2
1176 ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
1177 ; RV32IZFINXZDINX-NEXT: or a0, a0, a4
1178 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1179 ; RV32IZFINXZDINX-NEXT: ret
1181 ; RV64IZFINXZDINX-LABEL: fcmps_ueq:
1182 ; RV64IZFINXZDINX: # %bb.0:
1183 ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1
1184 ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0
1185 ; RV64IZFINXZDINX-NEXT: or a0, a0, a2
1186 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1187 ; RV64IZFINXZDINX-NEXT: ret
1189 ; RV32I-LABEL: fcmps_ueq:
1191 ; RV32I-NEXT: addi sp, sp, -32
1192 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1193 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1194 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1195 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
1196 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
1197 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
1198 ; RV32I-NEXT: mv s0, a3
1199 ; RV32I-NEXT: mv s1, a2
1200 ; RV32I-NEXT: mv s2, a1
1201 ; RV32I-NEXT: mv s3, a0
1202 ; RV32I-NEXT: call __eqdf2
1203 ; RV32I-NEXT: seqz s4, a0
1204 ; RV32I-NEXT: mv a0, s3
1205 ; RV32I-NEXT: mv a1, s2
1206 ; RV32I-NEXT: mv a2, s1
1207 ; RV32I-NEXT: mv a3, s0
1208 ; RV32I-NEXT: call __unorddf2
1209 ; RV32I-NEXT: snez a0, a0
1210 ; RV32I-NEXT: or a0, a0, s4
1211 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1212 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1213 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1214 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
1215 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
1216 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
1217 ; RV32I-NEXT: addi sp, sp, 32
1220 ; RV64I-LABEL: fcmps_ueq:
1222 ; RV64I-NEXT: addi sp, sp, -32
1223 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1224 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1225 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1226 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
1227 ; RV64I-NEXT: mv s0, a1
1228 ; RV64I-NEXT: mv s1, a0
1229 ; RV64I-NEXT: call __eqdf2
1230 ; RV64I-NEXT: seqz s2, a0
1231 ; RV64I-NEXT: mv a0, s1
1232 ; RV64I-NEXT: mv a1, s0
1233 ; RV64I-NEXT: call __unorddf2
1234 ; RV64I-NEXT: snez a0, a0
1235 ; RV64I-NEXT: or a0, a0, s2
1236 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1237 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1238 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1239 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
1240 ; RV64I-NEXT: addi sp, sp, 32
1242 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
1243 %2 = zext i1 %1 to i32
1247 define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
1248 ; CHECKIFD-LABEL: fcmps_ugt:
1249 ; CHECKIFD: # %bb.0:
1250 ; CHECKIFD-NEXT: fle.d a0, fa0, fa1
1251 ; CHECKIFD-NEXT: xori a0, a0, 1
1252 ; CHECKIFD-NEXT: ret
1254 ; RV32IZFINXZDINX-LABEL: fcmps_ugt:
1255 ; RV32IZFINXZDINX: # %bb.0:
1256 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
1257 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1258 ; RV32IZFINXZDINX-NEXT: ret
1260 ; RV64IZFINXZDINX-LABEL: fcmps_ugt:
1261 ; RV64IZFINXZDINX: # %bb.0:
1262 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1
1263 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1264 ; RV64IZFINXZDINX-NEXT: ret
1266 ; RV32I-LABEL: fcmps_ugt:
1268 ; RV32I-NEXT: addi sp, sp, -16
1269 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1270 ; RV32I-NEXT: call __ledf2
1271 ; RV32I-NEXT: sgtz a0, a0
1272 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1273 ; RV32I-NEXT: addi sp, sp, 16
1276 ; RV64I-LABEL: fcmps_ugt:
1278 ; RV64I-NEXT: addi sp, sp, -16
1279 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1280 ; RV64I-NEXT: call __ledf2
1281 ; RV64I-NEXT: sgtz a0, a0
1282 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1283 ; RV64I-NEXT: addi sp, sp, 16
1285 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
1286 %2 = zext i1 %1 to i32
1290 define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
1291 ; CHECKIFD-LABEL: fcmps_uge:
1292 ; CHECKIFD: # %bb.0:
1293 ; CHECKIFD-NEXT: flt.d a0, fa0, fa1
1294 ; CHECKIFD-NEXT: xori a0, a0, 1
1295 ; CHECKIFD-NEXT: ret
1297 ; RV32IZFINXZDINX-LABEL: fcmps_uge:
1298 ; RV32IZFINXZDINX: # %bb.0:
1299 ; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2
1300 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1301 ; RV32IZFINXZDINX-NEXT: ret
1303 ; RV64IZFINXZDINX-LABEL: fcmps_uge:
1304 ; RV64IZFINXZDINX: # %bb.0:
1305 ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1
1306 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1307 ; RV64IZFINXZDINX-NEXT: ret
1309 ; RV32I-LABEL: fcmps_uge:
1311 ; RV32I-NEXT: addi sp, sp, -16
1312 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1313 ; RV32I-NEXT: call __ltdf2
1314 ; RV32I-NEXT: slti a0, a0, 0
1315 ; RV32I-NEXT: xori a0, a0, 1
1316 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1317 ; RV32I-NEXT: addi sp, sp, 16
1320 ; RV64I-LABEL: fcmps_uge:
1322 ; RV64I-NEXT: addi sp, sp, -16
1323 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1324 ; RV64I-NEXT: call __ltdf2
1325 ; RV64I-NEXT: slti a0, a0, 0
1326 ; RV64I-NEXT: xori a0, a0, 1
1327 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1328 ; RV64I-NEXT: addi sp, sp, 16
1330 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
1331 %2 = zext i1 %1 to i32
1335 define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
1336 ; CHECKIFD-LABEL: fcmps_ult:
1337 ; CHECKIFD: # %bb.0:
1338 ; CHECKIFD-NEXT: fle.d a0, fa1, fa0
1339 ; CHECKIFD-NEXT: xori a0, a0, 1
1340 ; CHECKIFD-NEXT: ret
1342 ; RV32IZFINXZDINX-LABEL: fcmps_ult:
1343 ; RV32IZFINXZDINX: # %bb.0:
1344 ; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0
1345 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1346 ; RV32IZFINXZDINX-NEXT: ret
1348 ; RV64IZFINXZDINX-LABEL: fcmps_ult:
1349 ; RV64IZFINXZDINX: # %bb.0:
1350 ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0
1351 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1352 ; RV64IZFINXZDINX-NEXT: ret
1354 ; RV32I-LABEL: fcmps_ult:
1356 ; RV32I-NEXT: addi sp, sp, -16
1357 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1358 ; RV32I-NEXT: call __gedf2
1359 ; RV32I-NEXT: slti a0, a0, 0
1360 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1361 ; RV32I-NEXT: addi sp, sp, 16
1364 ; RV64I-LABEL: fcmps_ult:
1366 ; RV64I-NEXT: addi sp, sp, -16
1367 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1368 ; RV64I-NEXT: call __gedf2
1369 ; RV64I-NEXT: slti a0, a0, 0
1370 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1371 ; RV64I-NEXT: addi sp, sp, 16
1373 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
1374 %2 = zext i1 %1 to i32
1378 define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
1379 ; CHECKIFD-LABEL: fcmps_ule:
1380 ; CHECKIFD: # %bb.0:
1381 ; CHECKIFD-NEXT: flt.d a0, fa1, fa0
1382 ; CHECKIFD-NEXT: xori a0, a0, 1
1383 ; CHECKIFD-NEXT: ret
1385 ; RV32IZFINXZDINX-LABEL: fcmps_ule:
1386 ; RV32IZFINXZDINX: # %bb.0:
1387 ; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
1388 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1389 ; RV32IZFINXZDINX-NEXT: ret
1391 ; RV64IZFINXZDINX-LABEL: fcmps_ule:
1392 ; RV64IZFINXZDINX: # %bb.0:
1393 ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0
1394 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1395 ; RV64IZFINXZDINX-NEXT: ret
1397 ; RV32I-LABEL: fcmps_ule:
1399 ; RV32I-NEXT: addi sp, sp, -16
1400 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1401 ; RV32I-NEXT: call __gtdf2
1402 ; RV32I-NEXT: slti a0, a0, 1
1403 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1404 ; RV32I-NEXT: addi sp, sp, 16
1407 ; RV64I-LABEL: fcmps_ule:
1409 ; RV64I-NEXT: addi sp, sp, -16
1410 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1411 ; RV64I-NEXT: call __gtdf2
1412 ; RV64I-NEXT: slti a0, a0, 1
1413 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1414 ; RV64I-NEXT: addi sp, sp, 16
1416 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
1417 %2 = zext i1 %1 to i32
1421 define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
1422 ; CHECKIFD-LABEL: fcmps_une:
1423 ; CHECKIFD: # %bb.0:
1424 ; CHECKIFD-NEXT: fle.d a0, fa1, fa0
1425 ; CHECKIFD-NEXT: fle.d a1, fa0, fa1
1426 ; CHECKIFD-NEXT: and a0, a1, a0
1427 ; CHECKIFD-NEXT: xori a0, a0, 1
1428 ; CHECKIFD-NEXT: ret
1430 ; RV32IZFINXZDINX-LABEL: fcmps_une:
1431 ; RV32IZFINXZDINX: # %bb.0:
1432 ; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0
1433 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2
1434 ; RV32IZFINXZDINX-NEXT: and a0, a0, a4
1435 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1436 ; RV32IZFINXZDINX-NEXT: ret
1438 ; RV64IZFINXZDINX-LABEL: fcmps_une:
1439 ; RV64IZFINXZDINX: # %bb.0:
1440 ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0
1441 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1
1442 ; RV64IZFINXZDINX-NEXT: and a0, a0, a2
1443 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1444 ; RV64IZFINXZDINX-NEXT: ret
1446 ; RV32I-LABEL: fcmps_une:
1448 ; RV32I-NEXT: addi sp, sp, -16
1449 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1450 ; RV32I-NEXT: call __nedf2
1451 ; RV32I-NEXT: snez a0, a0
1452 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1453 ; RV32I-NEXT: addi sp, sp, 16
1456 ; RV64I-LABEL: fcmps_une:
1458 ; RV64I-NEXT: addi sp, sp, -16
1459 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1460 ; RV64I-NEXT: call __nedf2
1461 ; RV64I-NEXT: snez a0, a0
1462 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1463 ; RV64I-NEXT: addi sp, sp, 16
1465 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
1466 %2 = zext i1 %1 to i32
1470 define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
1471 ; CHECKIFD-LABEL: fcmps_uno:
1472 ; CHECKIFD: # %bb.0:
1473 ; CHECKIFD-NEXT: fle.d a0, fa1, fa1
1474 ; CHECKIFD-NEXT: fle.d a1, fa0, fa0
1475 ; CHECKIFD-NEXT: and a0, a1, a0
1476 ; CHECKIFD-NEXT: xori a0, a0, 1
1477 ; CHECKIFD-NEXT: ret
1479 ; RV32IZFINXZDINX-LABEL: fcmps_uno:
1480 ; RV32IZFINXZDINX: # %bb.0:
1481 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2
1482 ; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0
1483 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1484 ; RV32IZFINXZDINX-NEXT: xori a0, a0, 1
1485 ; RV32IZFINXZDINX-NEXT: ret
1487 ; RV64IZFINXZDINX-LABEL: fcmps_uno:
1488 ; RV64IZFINXZDINX: # %bb.0:
1489 ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1
1490 ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a0
1491 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1492 ; RV64IZFINXZDINX-NEXT: xori a0, a0, 1
1493 ; RV64IZFINXZDINX-NEXT: ret
1495 ; RV32I-LABEL: fcmps_uno:
1497 ; RV32I-NEXT: addi sp, sp, -16
1498 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1499 ; RV32I-NEXT: call __unorddf2
1500 ; RV32I-NEXT: snez a0, a0
1501 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1502 ; RV32I-NEXT: addi sp, sp, 16
1505 ; RV64I-LABEL: fcmps_uno:
1507 ; RV64I-NEXT: addi sp, sp, -16
1508 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1509 ; RV64I-NEXT: call __unorddf2
1510 ; RV64I-NEXT: snez a0, a0
1511 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1512 ; RV64I-NEXT: addi sp, sp, 16
1514 %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
1515 %2 = zext i1 %1 to i32