1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV32IFD %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefix=RV64IFD %s
7 define double @select_fcmp_false(double %a, double %b) nounwind {
8 ; RV32IFD-LABEL: select_fcmp_false:
10 ; RV32IFD-NEXT: mv a1, a3
11 ; RV32IFD-NEXT: mv a0, a2
14 ; RV64IFD-LABEL: select_fcmp_false:
16 ; RV64IFD-NEXT: mv a0, a1
18 %1 = fcmp false double %a, %b
19 %2 = select i1 %1, double %a, double %b
23 define double @select_fcmp_oeq(double %a, double %b) nounwind {
24 ; RV32IFD-LABEL: select_fcmp_oeq:
26 ; RV32IFD-NEXT: addi sp, sp, -16
27 ; RV32IFD-NEXT: sw a2, 8(sp)
28 ; RV32IFD-NEXT: sw a3, 12(sp)
29 ; RV32IFD-NEXT: fld ft0, 8(sp)
30 ; RV32IFD-NEXT: sw a0, 8(sp)
31 ; RV32IFD-NEXT: sw a1, 12(sp)
32 ; RV32IFD-NEXT: fld ft1, 8(sp)
33 ; RV32IFD-NEXT: feq.d a0, ft1, ft0
34 ; RV32IFD-NEXT: bnez a0, .LBB1_2
35 ; RV32IFD-NEXT: # %bb.1:
36 ; RV32IFD-NEXT: fmv.d ft1, ft0
37 ; RV32IFD-NEXT: .LBB1_2:
38 ; RV32IFD-NEXT: fsd ft1, 8(sp)
39 ; RV32IFD-NEXT: lw a0, 8(sp)
40 ; RV32IFD-NEXT: lw a1, 12(sp)
41 ; RV32IFD-NEXT: addi sp, sp, 16
44 ; RV64IFD-LABEL: select_fcmp_oeq:
46 ; RV64IFD-NEXT: fmv.d.x ft1, a1
47 ; RV64IFD-NEXT: fmv.d.x ft0, a0
48 ; RV64IFD-NEXT: feq.d a0, ft0, ft1
49 ; RV64IFD-NEXT: bnez a0, .LBB1_2
50 ; RV64IFD-NEXT: # %bb.1:
51 ; RV64IFD-NEXT: fmv.d ft0, ft1
52 ; RV64IFD-NEXT: .LBB1_2:
53 ; RV64IFD-NEXT: fmv.x.d a0, ft0
55 %1 = fcmp oeq double %a, %b
56 %2 = select i1 %1, double %a, double %b
60 define double @select_fcmp_ogt(double %a, double %b) nounwind {
61 ; RV32IFD-LABEL: select_fcmp_ogt:
63 ; RV32IFD-NEXT: addi sp, sp, -16
64 ; RV32IFD-NEXT: sw a0, 8(sp)
65 ; RV32IFD-NEXT: sw a1, 12(sp)
66 ; RV32IFD-NEXT: fld ft0, 8(sp)
67 ; RV32IFD-NEXT: sw a2, 8(sp)
68 ; RV32IFD-NEXT: sw a3, 12(sp)
69 ; RV32IFD-NEXT: fld ft1, 8(sp)
70 ; RV32IFD-NEXT: flt.d a0, ft1, ft0
71 ; RV32IFD-NEXT: bnez a0, .LBB2_2
72 ; RV32IFD-NEXT: # %bb.1:
73 ; RV32IFD-NEXT: fmv.d ft0, ft1
74 ; RV32IFD-NEXT: .LBB2_2:
75 ; RV32IFD-NEXT: fsd ft0, 8(sp)
76 ; RV32IFD-NEXT: lw a0, 8(sp)
77 ; RV32IFD-NEXT: lw a1, 12(sp)
78 ; RV32IFD-NEXT: addi sp, sp, 16
81 ; RV64IFD-LABEL: select_fcmp_ogt:
83 ; RV64IFD-NEXT: fmv.d.x ft0, a0
84 ; RV64IFD-NEXT: fmv.d.x ft1, a1
85 ; RV64IFD-NEXT: flt.d a0, ft1, ft0
86 ; RV64IFD-NEXT: bnez a0, .LBB2_2
87 ; RV64IFD-NEXT: # %bb.1:
88 ; RV64IFD-NEXT: fmv.d ft0, ft1
89 ; RV64IFD-NEXT: .LBB2_2:
90 ; RV64IFD-NEXT: fmv.x.d a0, ft0
92 %1 = fcmp ogt double %a, %b
93 %2 = select i1 %1, double %a, double %b
97 define double @select_fcmp_oge(double %a, double %b) nounwind {
98 ; RV32IFD-LABEL: select_fcmp_oge:
100 ; RV32IFD-NEXT: addi sp, sp, -16
101 ; RV32IFD-NEXT: sw a0, 8(sp)
102 ; RV32IFD-NEXT: sw a1, 12(sp)
103 ; RV32IFD-NEXT: fld ft0, 8(sp)
104 ; RV32IFD-NEXT: sw a2, 8(sp)
105 ; RV32IFD-NEXT: sw a3, 12(sp)
106 ; RV32IFD-NEXT: fld ft1, 8(sp)
107 ; RV32IFD-NEXT: fle.d a0, ft1, ft0
108 ; RV32IFD-NEXT: bnez a0, .LBB3_2
109 ; RV32IFD-NEXT: # %bb.1:
110 ; RV32IFD-NEXT: fmv.d ft0, ft1
111 ; RV32IFD-NEXT: .LBB3_2:
112 ; RV32IFD-NEXT: fsd ft0, 8(sp)
113 ; RV32IFD-NEXT: lw a0, 8(sp)
114 ; RV32IFD-NEXT: lw a1, 12(sp)
115 ; RV32IFD-NEXT: addi sp, sp, 16
118 ; RV64IFD-LABEL: select_fcmp_oge:
120 ; RV64IFD-NEXT: fmv.d.x ft0, a0
121 ; RV64IFD-NEXT: fmv.d.x ft1, a1
122 ; RV64IFD-NEXT: fle.d a0, ft1, ft0
123 ; RV64IFD-NEXT: bnez a0, .LBB3_2
124 ; RV64IFD-NEXT: # %bb.1:
125 ; RV64IFD-NEXT: fmv.d ft0, ft1
126 ; RV64IFD-NEXT: .LBB3_2:
127 ; RV64IFD-NEXT: fmv.x.d a0, ft0
129 %1 = fcmp oge double %a, %b
130 %2 = select i1 %1, double %a, double %b
134 define double @select_fcmp_olt(double %a, double %b) nounwind {
135 ; RV32IFD-LABEL: select_fcmp_olt:
137 ; RV32IFD-NEXT: addi sp, sp, -16
138 ; RV32IFD-NEXT: sw a2, 8(sp)
139 ; RV32IFD-NEXT: sw a3, 12(sp)
140 ; RV32IFD-NEXT: fld ft0, 8(sp)
141 ; RV32IFD-NEXT: sw a0, 8(sp)
142 ; RV32IFD-NEXT: sw a1, 12(sp)
143 ; RV32IFD-NEXT: fld ft1, 8(sp)
144 ; RV32IFD-NEXT: flt.d a0, ft1, ft0
145 ; RV32IFD-NEXT: bnez a0, .LBB4_2
146 ; RV32IFD-NEXT: # %bb.1:
147 ; RV32IFD-NEXT: fmv.d ft1, ft0
148 ; RV32IFD-NEXT: .LBB4_2:
149 ; RV32IFD-NEXT: fsd ft1, 8(sp)
150 ; RV32IFD-NEXT: lw a0, 8(sp)
151 ; RV32IFD-NEXT: lw a1, 12(sp)
152 ; RV32IFD-NEXT: addi sp, sp, 16
155 ; RV64IFD-LABEL: select_fcmp_olt:
157 ; RV64IFD-NEXT: fmv.d.x ft1, a1
158 ; RV64IFD-NEXT: fmv.d.x ft0, a0
159 ; RV64IFD-NEXT: flt.d a0, ft0, ft1
160 ; RV64IFD-NEXT: bnez a0, .LBB4_2
161 ; RV64IFD-NEXT: # %bb.1:
162 ; RV64IFD-NEXT: fmv.d ft0, ft1
163 ; RV64IFD-NEXT: .LBB4_2:
164 ; RV64IFD-NEXT: fmv.x.d a0, ft0
166 %1 = fcmp olt double %a, %b
167 %2 = select i1 %1, double %a, double %b
171 define double @select_fcmp_ole(double %a, double %b) nounwind {
172 ; RV32IFD-LABEL: select_fcmp_ole:
174 ; RV32IFD-NEXT: addi sp, sp, -16
175 ; RV32IFD-NEXT: sw a2, 8(sp)
176 ; RV32IFD-NEXT: sw a3, 12(sp)
177 ; RV32IFD-NEXT: fld ft0, 8(sp)
178 ; RV32IFD-NEXT: sw a0, 8(sp)
179 ; RV32IFD-NEXT: sw a1, 12(sp)
180 ; RV32IFD-NEXT: fld ft1, 8(sp)
181 ; RV32IFD-NEXT: fle.d a0, ft1, ft0
182 ; RV32IFD-NEXT: bnez a0, .LBB5_2
183 ; RV32IFD-NEXT: # %bb.1:
184 ; RV32IFD-NEXT: fmv.d ft1, ft0
185 ; RV32IFD-NEXT: .LBB5_2:
186 ; RV32IFD-NEXT: fsd ft1, 8(sp)
187 ; RV32IFD-NEXT: lw a0, 8(sp)
188 ; RV32IFD-NEXT: lw a1, 12(sp)
189 ; RV32IFD-NEXT: addi sp, sp, 16
192 ; RV64IFD-LABEL: select_fcmp_ole:
194 ; RV64IFD-NEXT: fmv.d.x ft1, a1
195 ; RV64IFD-NEXT: fmv.d.x ft0, a0
196 ; RV64IFD-NEXT: fle.d a0, ft0, ft1
197 ; RV64IFD-NEXT: bnez a0, .LBB5_2
198 ; RV64IFD-NEXT: # %bb.1:
199 ; RV64IFD-NEXT: fmv.d ft0, ft1
200 ; RV64IFD-NEXT: .LBB5_2:
201 ; RV64IFD-NEXT: fmv.x.d a0, ft0
203 %1 = fcmp ole double %a, %b
204 %2 = select i1 %1, double %a, double %b
208 define double @select_fcmp_one(double %a, double %b) nounwind {
209 ; TODO: feq.s+sltiu+bne sequence could be optimised
210 ; RV32IFD-LABEL: select_fcmp_one:
212 ; RV32IFD-NEXT: addi sp, sp, -16
213 ; RV32IFD-NEXT: sw a0, 8(sp)
214 ; RV32IFD-NEXT: sw a1, 12(sp)
215 ; RV32IFD-NEXT: fld ft0, 8(sp)
216 ; RV32IFD-NEXT: sw a2, 8(sp)
217 ; RV32IFD-NEXT: sw a3, 12(sp)
218 ; RV32IFD-NEXT: fld ft1, 8(sp)
219 ; RV32IFD-NEXT: feq.d a0, ft1, ft1
220 ; RV32IFD-NEXT: feq.d a1, ft0, ft0
221 ; RV32IFD-NEXT: and a0, a1, a0
222 ; RV32IFD-NEXT: feq.d a1, ft0, ft1
223 ; RV32IFD-NEXT: not a1, a1
224 ; RV32IFD-NEXT: and a0, a1, a0
225 ; RV32IFD-NEXT: bnez a0, .LBB6_2
226 ; RV32IFD-NEXT: # %bb.1:
227 ; RV32IFD-NEXT: fmv.d ft0, ft1
228 ; RV32IFD-NEXT: .LBB6_2:
229 ; RV32IFD-NEXT: fsd ft0, 8(sp)
230 ; RV32IFD-NEXT: lw a0, 8(sp)
231 ; RV32IFD-NEXT: lw a1, 12(sp)
232 ; RV32IFD-NEXT: addi sp, sp, 16
235 ; RV64IFD-LABEL: select_fcmp_one:
237 ; RV64IFD-NEXT: fmv.d.x ft0, a0
238 ; RV64IFD-NEXT: fmv.d.x ft1, a1
239 ; RV64IFD-NEXT: feq.d a0, ft1, ft1
240 ; RV64IFD-NEXT: feq.d a1, ft0, ft0
241 ; RV64IFD-NEXT: and a0, a1, a0
242 ; RV64IFD-NEXT: feq.d a1, ft0, ft1
243 ; RV64IFD-NEXT: not a1, a1
244 ; RV64IFD-NEXT: and a0, a1, a0
245 ; RV64IFD-NEXT: bnez a0, .LBB6_2
246 ; RV64IFD-NEXT: # %bb.1:
247 ; RV64IFD-NEXT: fmv.d ft0, ft1
248 ; RV64IFD-NEXT: .LBB6_2:
249 ; RV64IFD-NEXT: fmv.x.d a0, ft0
251 %1 = fcmp one double %a, %b
252 %2 = select i1 %1, double %a, double %b
256 define double @select_fcmp_ord(double %a, double %b) nounwind {
257 ; RV32IFD-LABEL: select_fcmp_ord:
259 ; RV32IFD-NEXT: addi sp, sp, -16
260 ; RV32IFD-NEXT: sw a0, 8(sp)
261 ; RV32IFD-NEXT: sw a1, 12(sp)
262 ; RV32IFD-NEXT: fld ft0, 8(sp)
263 ; RV32IFD-NEXT: sw a2, 8(sp)
264 ; RV32IFD-NEXT: sw a3, 12(sp)
265 ; RV32IFD-NEXT: fld ft1, 8(sp)
266 ; RV32IFD-NEXT: feq.d a0, ft1, ft1
267 ; RV32IFD-NEXT: feq.d a1, ft0, ft0
268 ; RV32IFD-NEXT: and a0, a1, a0
269 ; RV32IFD-NEXT: bnez a0, .LBB7_2
270 ; RV32IFD-NEXT: # %bb.1:
271 ; RV32IFD-NEXT: fmv.d ft0, ft1
272 ; RV32IFD-NEXT: .LBB7_2:
273 ; RV32IFD-NEXT: fsd ft0, 8(sp)
274 ; RV32IFD-NEXT: lw a0, 8(sp)
275 ; RV32IFD-NEXT: lw a1, 12(sp)
276 ; RV32IFD-NEXT: addi sp, sp, 16
279 ; RV64IFD-LABEL: select_fcmp_ord:
281 ; RV64IFD-NEXT: fmv.d.x ft0, a0
282 ; RV64IFD-NEXT: fmv.d.x ft1, a1
283 ; RV64IFD-NEXT: feq.d a0, ft1, ft1
284 ; RV64IFD-NEXT: feq.d a1, ft0, ft0
285 ; RV64IFD-NEXT: and a0, a1, a0
286 ; RV64IFD-NEXT: bnez a0, .LBB7_2
287 ; RV64IFD-NEXT: # %bb.1:
288 ; RV64IFD-NEXT: fmv.d ft0, ft1
289 ; RV64IFD-NEXT: .LBB7_2:
290 ; RV64IFD-NEXT: fmv.x.d a0, ft0
292 %1 = fcmp ord double %a, %b
293 %2 = select i1 %1, double %a, double %b
297 define double @select_fcmp_ueq(double %a, double %b) nounwind {
298 ; RV32IFD-LABEL: select_fcmp_ueq:
300 ; RV32IFD-NEXT: addi sp, sp, -16
301 ; RV32IFD-NEXT: sw a2, 8(sp)
302 ; RV32IFD-NEXT: sw a3, 12(sp)
303 ; RV32IFD-NEXT: fld ft0, 8(sp)
304 ; RV32IFD-NEXT: sw a0, 8(sp)
305 ; RV32IFD-NEXT: sw a1, 12(sp)
306 ; RV32IFD-NEXT: fld ft1, 8(sp)
307 ; RV32IFD-NEXT: feq.d a0, ft1, ft0
308 ; RV32IFD-NEXT: feq.d a1, ft0, ft0
309 ; RV32IFD-NEXT: feq.d a2, ft1, ft1
310 ; RV32IFD-NEXT: and a1, a2, a1
311 ; RV32IFD-NEXT: seqz a1, a1
312 ; RV32IFD-NEXT: or a0, a0, a1
313 ; RV32IFD-NEXT: bnez a0, .LBB8_2
314 ; RV32IFD-NEXT: # %bb.1:
315 ; RV32IFD-NEXT: fmv.d ft1, ft0
316 ; RV32IFD-NEXT: .LBB8_2:
317 ; RV32IFD-NEXT: fsd ft1, 8(sp)
318 ; RV32IFD-NEXT: lw a0, 8(sp)
319 ; RV32IFD-NEXT: lw a1, 12(sp)
320 ; RV32IFD-NEXT: addi sp, sp, 16
323 ; RV64IFD-LABEL: select_fcmp_ueq:
325 ; RV64IFD-NEXT: fmv.d.x ft1, a1
326 ; RV64IFD-NEXT: fmv.d.x ft0, a0
327 ; RV64IFD-NEXT: feq.d a0, ft0, ft1
328 ; RV64IFD-NEXT: feq.d a1, ft1, ft1
329 ; RV64IFD-NEXT: feq.d a2, ft0, ft0
330 ; RV64IFD-NEXT: and a1, a2, a1
331 ; RV64IFD-NEXT: seqz a1, a1
332 ; RV64IFD-NEXT: or a0, a0, a1
333 ; RV64IFD-NEXT: bnez a0, .LBB8_2
334 ; RV64IFD-NEXT: # %bb.1:
335 ; RV64IFD-NEXT: fmv.d ft0, ft1
336 ; RV64IFD-NEXT: .LBB8_2:
337 ; RV64IFD-NEXT: fmv.x.d a0, ft0
339 %1 = fcmp ueq double %a, %b
340 %2 = select i1 %1, double %a, double %b
344 define double @select_fcmp_ugt(double %a, double %b) nounwind {
345 ; RV32IFD-LABEL: select_fcmp_ugt:
347 ; RV32IFD-NEXT: addi sp, sp, -16
348 ; RV32IFD-NEXT: sw a2, 8(sp)
349 ; RV32IFD-NEXT: sw a3, 12(sp)
350 ; RV32IFD-NEXT: fld ft0, 8(sp)
351 ; RV32IFD-NEXT: sw a0, 8(sp)
352 ; RV32IFD-NEXT: sw a1, 12(sp)
353 ; RV32IFD-NEXT: fld ft1, 8(sp)
354 ; RV32IFD-NEXT: fle.d a0, ft1, ft0
355 ; RV32IFD-NEXT: xori a0, a0, 1
356 ; RV32IFD-NEXT: bnez a0, .LBB9_2
357 ; RV32IFD-NEXT: # %bb.1:
358 ; RV32IFD-NEXT: fmv.d ft1, ft0
359 ; RV32IFD-NEXT: .LBB9_2:
360 ; RV32IFD-NEXT: fsd ft1, 8(sp)
361 ; RV32IFD-NEXT: lw a0, 8(sp)
362 ; RV32IFD-NEXT: lw a1, 12(sp)
363 ; RV32IFD-NEXT: addi sp, sp, 16
366 ; RV64IFD-LABEL: select_fcmp_ugt:
368 ; RV64IFD-NEXT: fmv.d.x ft1, a1
369 ; RV64IFD-NEXT: fmv.d.x ft0, a0
370 ; RV64IFD-NEXT: fle.d a0, ft0, ft1
371 ; RV64IFD-NEXT: xori a0, a0, 1
372 ; RV64IFD-NEXT: bnez a0, .LBB9_2
373 ; RV64IFD-NEXT: # %bb.1:
374 ; RV64IFD-NEXT: fmv.d ft0, ft1
375 ; RV64IFD-NEXT: .LBB9_2:
376 ; RV64IFD-NEXT: fmv.x.d a0, ft0
378 %1 = fcmp ugt double %a, %b
379 %2 = select i1 %1, double %a, double %b
383 define double @select_fcmp_uge(double %a, double %b) nounwind {
384 ; RV32IFD-LABEL: select_fcmp_uge:
386 ; RV32IFD-NEXT: addi sp, sp, -16
387 ; RV32IFD-NEXT: sw a2, 8(sp)
388 ; RV32IFD-NEXT: sw a3, 12(sp)
389 ; RV32IFD-NEXT: fld ft0, 8(sp)
390 ; RV32IFD-NEXT: sw a0, 8(sp)
391 ; RV32IFD-NEXT: sw a1, 12(sp)
392 ; RV32IFD-NEXT: fld ft1, 8(sp)
393 ; RV32IFD-NEXT: flt.d a0, ft1, ft0
394 ; RV32IFD-NEXT: xori a0, a0, 1
395 ; RV32IFD-NEXT: bnez a0, .LBB10_2
396 ; RV32IFD-NEXT: # %bb.1:
397 ; RV32IFD-NEXT: fmv.d ft1, ft0
398 ; RV32IFD-NEXT: .LBB10_2:
399 ; RV32IFD-NEXT: fsd ft1, 8(sp)
400 ; RV32IFD-NEXT: lw a0, 8(sp)
401 ; RV32IFD-NEXT: lw a1, 12(sp)
402 ; RV32IFD-NEXT: addi sp, sp, 16
405 ; RV64IFD-LABEL: select_fcmp_uge:
407 ; RV64IFD-NEXT: fmv.d.x ft1, a1
408 ; RV64IFD-NEXT: fmv.d.x ft0, a0
409 ; RV64IFD-NEXT: flt.d a0, ft0, ft1
410 ; RV64IFD-NEXT: xori a0, a0, 1
411 ; RV64IFD-NEXT: bnez a0, .LBB10_2
412 ; RV64IFD-NEXT: # %bb.1:
413 ; RV64IFD-NEXT: fmv.d ft0, ft1
414 ; RV64IFD-NEXT: .LBB10_2:
415 ; RV64IFD-NEXT: fmv.x.d a0, ft0
417 %1 = fcmp uge double %a, %b
418 %2 = select i1 %1, double %a, double %b
422 define double @select_fcmp_ult(double %a, double %b) nounwind {
423 ; RV32IFD-LABEL: select_fcmp_ult:
425 ; RV32IFD-NEXT: addi sp, sp, -16
426 ; RV32IFD-NEXT: sw a0, 8(sp)
427 ; RV32IFD-NEXT: sw a1, 12(sp)
428 ; RV32IFD-NEXT: fld ft0, 8(sp)
429 ; RV32IFD-NEXT: sw a2, 8(sp)
430 ; RV32IFD-NEXT: sw a3, 12(sp)
431 ; RV32IFD-NEXT: fld ft1, 8(sp)
432 ; RV32IFD-NEXT: fle.d a0, ft1, ft0
433 ; RV32IFD-NEXT: xori a0, a0, 1
434 ; RV32IFD-NEXT: bnez a0, .LBB11_2
435 ; RV32IFD-NEXT: # %bb.1:
436 ; RV32IFD-NEXT: fmv.d ft0, ft1
437 ; RV32IFD-NEXT: .LBB11_2:
438 ; RV32IFD-NEXT: fsd ft0, 8(sp)
439 ; RV32IFD-NEXT: lw a0, 8(sp)
440 ; RV32IFD-NEXT: lw a1, 12(sp)
441 ; RV32IFD-NEXT: addi sp, sp, 16
444 ; RV64IFD-LABEL: select_fcmp_ult:
446 ; RV64IFD-NEXT: fmv.d.x ft0, a0
447 ; RV64IFD-NEXT: fmv.d.x ft1, a1
448 ; RV64IFD-NEXT: fle.d a0, ft1, ft0
449 ; RV64IFD-NEXT: xori a0, a0, 1
450 ; RV64IFD-NEXT: bnez a0, .LBB11_2
451 ; RV64IFD-NEXT: # %bb.1:
452 ; RV64IFD-NEXT: fmv.d ft0, ft1
453 ; RV64IFD-NEXT: .LBB11_2:
454 ; RV64IFD-NEXT: fmv.x.d a0, ft0
456 %1 = fcmp ult double %a, %b
457 %2 = select i1 %1, double %a, double %b
461 define double @select_fcmp_ule(double %a, double %b) nounwind {
462 ; RV32IFD-LABEL: select_fcmp_ule:
464 ; RV32IFD-NEXT: addi sp, sp, -16
465 ; RV32IFD-NEXT: sw a0, 8(sp)
466 ; RV32IFD-NEXT: sw a1, 12(sp)
467 ; RV32IFD-NEXT: fld ft0, 8(sp)
468 ; RV32IFD-NEXT: sw a2, 8(sp)
469 ; RV32IFD-NEXT: sw a3, 12(sp)
470 ; RV32IFD-NEXT: fld ft1, 8(sp)
471 ; RV32IFD-NEXT: flt.d a0, ft1, ft0
472 ; RV32IFD-NEXT: xori a0, a0, 1
473 ; RV32IFD-NEXT: bnez a0, .LBB12_2
474 ; RV32IFD-NEXT: # %bb.1:
475 ; RV32IFD-NEXT: fmv.d ft0, ft1
476 ; RV32IFD-NEXT: .LBB12_2:
477 ; RV32IFD-NEXT: fsd ft0, 8(sp)
478 ; RV32IFD-NEXT: lw a0, 8(sp)
479 ; RV32IFD-NEXT: lw a1, 12(sp)
480 ; RV32IFD-NEXT: addi sp, sp, 16
483 ; RV64IFD-LABEL: select_fcmp_ule:
485 ; RV64IFD-NEXT: fmv.d.x ft0, a0
486 ; RV64IFD-NEXT: fmv.d.x ft1, a1
487 ; RV64IFD-NEXT: flt.d a0, ft1, ft0
488 ; RV64IFD-NEXT: xori a0, a0, 1
489 ; RV64IFD-NEXT: bnez a0, .LBB12_2
490 ; RV64IFD-NEXT: # %bb.1:
491 ; RV64IFD-NEXT: fmv.d ft0, ft1
492 ; RV64IFD-NEXT: .LBB12_2:
493 ; RV64IFD-NEXT: fmv.x.d a0, ft0
495 %1 = fcmp ule double %a, %b
496 %2 = select i1 %1, double %a, double %b
500 define double @select_fcmp_une(double %a, double %b) nounwind {
501 ; RV32IFD-LABEL: select_fcmp_une:
503 ; RV32IFD-NEXT: addi sp, sp, -16
504 ; RV32IFD-NEXT: sw a2, 8(sp)
505 ; RV32IFD-NEXT: sw a3, 12(sp)
506 ; RV32IFD-NEXT: fld ft0, 8(sp)
507 ; RV32IFD-NEXT: sw a0, 8(sp)
508 ; RV32IFD-NEXT: sw a1, 12(sp)
509 ; RV32IFD-NEXT: fld ft1, 8(sp)
510 ; RV32IFD-NEXT: feq.d a0, ft1, ft0
511 ; RV32IFD-NEXT: xori a0, a0, 1
512 ; RV32IFD-NEXT: bnez a0, .LBB13_2
513 ; RV32IFD-NEXT: # %bb.1:
514 ; RV32IFD-NEXT: fmv.d ft1, ft0
515 ; RV32IFD-NEXT: .LBB13_2:
516 ; RV32IFD-NEXT: fsd ft1, 8(sp)
517 ; RV32IFD-NEXT: lw a0, 8(sp)
518 ; RV32IFD-NEXT: lw a1, 12(sp)
519 ; RV32IFD-NEXT: addi sp, sp, 16
522 ; RV64IFD-LABEL: select_fcmp_une:
524 ; RV64IFD-NEXT: fmv.d.x ft1, a1
525 ; RV64IFD-NEXT: fmv.d.x ft0, a0
526 ; RV64IFD-NEXT: feq.d a0, ft0, ft1
527 ; RV64IFD-NEXT: xori a0, a0, 1
528 ; RV64IFD-NEXT: bnez a0, .LBB13_2
529 ; RV64IFD-NEXT: # %bb.1:
530 ; RV64IFD-NEXT: fmv.d ft0, ft1
531 ; RV64IFD-NEXT: .LBB13_2:
532 ; RV64IFD-NEXT: fmv.x.d a0, ft0
534 %1 = fcmp une double %a, %b
535 %2 = select i1 %1, double %a, double %b
539 define double @select_fcmp_uno(double %a, double %b) nounwind {
540 ; TODO: sltiu+bne could be optimized
541 ; RV32IFD-LABEL: select_fcmp_uno:
543 ; RV32IFD-NEXT: addi sp, sp, -16
544 ; RV32IFD-NEXT: sw a0, 8(sp)
545 ; RV32IFD-NEXT: sw a1, 12(sp)
546 ; RV32IFD-NEXT: fld ft0, 8(sp)
547 ; RV32IFD-NEXT: sw a2, 8(sp)
548 ; RV32IFD-NEXT: sw a3, 12(sp)
549 ; RV32IFD-NEXT: fld ft1, 8(sp)
550 ; RV32IFD-NEXT: feq.d a0, ft1, ft1
551 ; RV32IFD-NEXT: feq.d a1, ft0, ft0
552 ; RV32IFD-NEXT: and a0, a1, a0
553 ; RV32IFD-NEXT: seqz a0, a0
554 ; RV32IFD-NEXT: bnez a0, .LBB14_2
555 ; RV32IFD-NEXT: # %bb.1:
556 ; RV32IFD-NEXT: fmv.d ft0, ft1
557 ; RV32IFD-NEXT: .LBB14_2:
558 ; RV32IFD-NEXT: fsd ft0, 8(sp)
559 ; RV32IFD-NEXT: lw a0, 8(sp)
560 ; RV32IFD-NEXT: lw a1, 12(sp)
561 ; RV32IFD-NEXT: addi sp, sp, 16
564 ; RV64IFD-LABEL: select_fcmp_uno:
566 ; RV64IFD-NEXT: fmv.d.x ft0, a0
567 ; RV64IFD-NEXT: fmv.d.x ft1, a1
568 ; RV64IFD-NEXT: feq.d a0, ft1, ft1
569 ; RV64IFD-NEXT: feq.d a1, ft0, ft0
570 ; RV64IFD-NEXT: and a0, a1, a0
571 ; RV64IFD-NEXT: seqz a0, a0
572 ; RV64IFD-NEXT: bnez a0, .LBB14_2
573 ; RV64IFD-NEXT: # %bb.1:
574 ; RV64IFD-NEXT: fmv.d ft0, ft1
575 ; RV64IFD-NEXT: .LBB14_2:
576 ; RV64IFD-NEXT: fmv.x.d a0, ft0
578 %1 = fcmp uno double %a, %b
579 %2 = select i1 %1, double %a, double %b
583 define double @select_fcmp_true(double %a, double %b) nounwind {
584 ; RV32IFD-LABEL: select_fcmp_true:
588 ; RV64IFD-LABEL: select_fcmp_true:
591 %1 = fcmp true double %a, %b
592 %2 = select i1 %1, double %a, double %b
596 ; Ensure that ISel succeeds for a select+fcmp that has an i32 result type.
597 define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind {
598 ; RV32IFD-LABEL: i32_select_fcmp_oeq:
600 ; RV32IFD-NEXT: addi sp, sp, -16
601 ; RV32IFD-NEXT: sw a2, 8(sp)
602 ; RV32IFD-NEXT: sw a3, 12(sp)
603 ; RV32IFD-NEXT: fld ft0, 8(sp)
604 ; RV32IFD-NEXT: sw a0, 8(sp)
605 ; RV32IFD-NEXT: sw a1, 12(sp)
606 ; RV32IFD-NEXT: fld ft1, 8(sp)
607 ; RV32IFD-NEXT: feq.d a1, ft1, ft0
608 ; RV32IFD-NEXT: mv a0, a4
609 ; RV32IFD-NEXT: bnez a1, .LBB16_2
610 ; RV32IFD-NEXT: # %bb.1:
611 ; RV32IFD-NEXT: mv a0, a5
612 ; RV32IFD-NEXT: .LBB16_2:
613 ; RV32IFD-NEXT: addi sp, sp, 16
616 ; RV64IFD-LABEL: i32_select_fcmp_oeq:
618 ; RV64IFD-NEXT: fmv.d.x ft0, a1
619 ; RV64IFD-NEXT: fmv.d.x ft1, a0
620 ; RV64IFD-NEXT: feq.d a1, ft1, ft0
621 ; RV64IFD-NEXT: mv a0, a2
622 ; RV64IFD-NEXT: bnez a1, .LBB16_2
623 ; RV64IFD-NEXT: # %bb.1:
624 ; RV64IFD-NEXT: mv a0, a3
625 ; RV64IFD-NEXT: .LBB16_2:
627 %1 = fcmp oeq double %a, %b
628 %2 = select i1 %1, i32 %c, i32 %d