1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV32IFD %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefix=RV64IFD %s
7 ; These tests are each targeted at a particular RISC-V FPU instruction. Most
8 ; other files in this folder exercise LLVM IR instructions that don't directly
9 ; match a RISC-V instruction.
11 define double @fadd_d(double %a, double %b) nounwind {
12 ; RV32IFD-LABEL: fadd_d:
14 ; RV32IFD-NEXT: addi sp, sp, -16
15 ; RV32IFD-NEXT: sw a2, 8(sp)
16 ; RV32IFD-NEXT: sw a3, 12(sp)
17 ; RV32IFD-NEXT: fld ft0, 8(sp)
18 ; RV32IFD-NEXT: sw a0, 8(sp)
19 ; RV32IFD-NEXT: sw a1, 12(sp)
20 ; RV32IFD-NEXT: fld ft1, 8(sp)
21 ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0
22 ; RV32IFD-NEXT: fsd ft0, 8(sp)
23 ; RV32IFD-NEXT: lw a0, 8(sp)
24 ; RV32IFD-NEXT: lw a1, 12(sp)
25 ; RV32IFD-NEXT: addi sp, sp, 16
28 ; RV64IFD-LABEL: fadd_d:
30 ; RV64IFD-NEXT: fmv.d.x ft0, a1
31 ; RV64IFD-NEXT: fmv.d.x ft1, a0
32 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
33 ; RV64IFD-NEXT: fmv.x.d a0, ft0
35 %1 = fadd double %a, %b
39 define double @fsub_d(double %a, double %b) nounwind {
40 ; RV32IFD-LABEL: fsub_d:
42 ; RV32IFD-NEXT: addi sp, sp, -16
43 ; RV32IFD-NEXT: sw a2, 8(sp)
44 ; RV32IFD-NEXT: sw a3, 12(sp)
45 ; RV32IFD-NEXT: fld ft0, 8(sp)
46 ; RV32IFD-NEXT: sw a0, 8(sp)
47 ; RV32IFD-NEXT: sw a1, 12(sp)
48 ; RV32IFD-NEXT: fld ft1, 8(sp)
49 ; RV32IFD-NEXT: fsub.d ft0, ft1, ft0
50 ; RV32IFD-NEXT: fsd ft0, 8(sp)
51 ; RV32IFD-NEXT: lw a0, 8(sp)
52 ; RV32IFD-NEXT: lw a1, 12(sp)
53 ; RV32IFD-NEXT: addi sp, sp, 16
56 ; RV64IFD-LABEL: fsub_d:
58 ; RV64IFD-NEXT: fmv.d.x ft0, a1
59 ; RV64IFD-NEXT: fmv.d.x ft1, a0
60 ; RV64IFD-NEXT: fsub.d ft0, ft1, ft0
61 ; RV64IFD-NEXT: fmv.x.d a0, ft0
63 %1 = fsub double %a, %b
67 define double @fmul_d(double %a, double %b) nounwind {
68 ; RV32IFD-LABEL: fmul_d:
70 ; RV32IFD-NEXT: addi sp, sp, -16
71 ; RV32IFD-NEXT: sw a2, 8(sp)
72 ; RV32IFD-NEXT: sw a3, 12(sp)
73 ; RV32IFD-NEXT: fld ft0, 8(sp)
74 ; RV32IFD-NEXT: sw a0, 8(sp)
75 ; RV32IFD-NEXT: sw a1, 12(sp)
76 ; RV32IFD-NEXT: fld ft1, 8(sp)
77 ; RV32IFD-NEXT: fmul.d ft0, ft1, ft0
78 ; RV32IFD-NEXT: fsd ft0, 8(sp)
79 ; RV32IFD-NEXT: lw a0, 8(sp)
80 ; RV32IFD-NEXT: lw a1, 12(sp)
81 ; RV32IFD-NEXT: addi sp, sp, 16
84 ; RV64IFD-LABEL: fmul_d:
86 ; RV64IFD-NEXT: fmv.d.x ft0, a1
87 ; RV64IFD-NEXT: fmv.d.x ft1, a0
88 ; RV64IFD-NEXT: fmul.d ft0, ft1, ft0
89 ; RV64IFD-NEXT: fmv.x.d a0, ft0
91 %1 = fmul double %a, %b
95 define double @fdiv_d(double %a, double %b) nounwind {
96 ; RV32IFD-LABEL: fdiv_d:
98 ; RV32IFD-NEXT: addi sp, sp, -16
99 ; RV32IFD-NEXT: sw a2, 8(sp)
100 ; RV32IFD-NEXT: sw a3, 12(sp)
101 ; RV32IFD-NEXT: fld ft0, 8(sp)
102 ; RV32IFD-NEXT: sw a0, 8(sp)
103 ; RV32IFD-NEXT: sw a1, 12(sp)
104 ; RV32IFD-NEXT: fld ft1, 8(sp)
105 ; RV32IFD-NEXT: fdiv.d ft0, ft1, ft0
106 ; RV32IFD-NEXT: fsd ft0, 8(sp)
107 ; RV32IFD-NEXT: lw a0, 8(sp)
108 ; RV32IFD-NEXT: lw a1, 12(sp)
109 ; RV32IFD-NEXT: addi sp, sp, 16
112 ; RV64IFD-LABEL: fdiv_d:
114 ; RV64IFD-NEXT: fmv.d.x ft0, a1
115 ; RV64IFD-NEXT: fmv.d.x ft1, a0
116 ; RV64IFD-NEXT: fdiv.d ft0, ft1, ft0
117 ; RV64IFD-NEXT: fmv.x.d a0, ft0
119 %1 = fdiv double %a, %b
123 declare double @llvm.sqrt.f64(double)
125 define double @fsqrt_d(double %a) nounwind {
126 ; RV32IFD-LABEL: fsqrt_d:
128 ; RV32IFD-NEXT: addi sp, sp, -16
129 ; RV32IFD-NEXT: sw a0, 8(sp)
130 ; RV32IFD-NEXT: sw a1, 12(sp)
131 ; RV32IFD-NEXT: fld ft0, 8(sp)
132 ; RV32IFD-NEXT: fsqrt.d ft0, ft0
133 ; RV32IFD-NEXT: fsd ft0, 8(sp)
134 ; RV32IFD-NEXT: lw a0, 8(sp)
135 ; RV32IFD-NEXT: lw a1, 12(sp)
136 ; RV32IFD-NEXT: addi sp, sp, 16
139 ; RV64IFD-LABEL: fsqrt_d:
141 ; RV64IFD-NEXT: fmv.d.x ft0, a0
142 ; RV64IFD-NEXT: fsqrt.d ft0, ft0
143 ; RV64IFD-NEXT: fmv.x.d a0, ft0
145 %1 = call double @llvm.sqrt.f64(double %a)
149 declare double @llvm.copysign.f64(double, double)
151 define double @fsgnj_d(double %a, double %b) nounwind {
152 ; RV32IFD-LABEL: fsgnj_d:
154 ; RV32IFD-NEXT: addi sp, sp, -16
155 ; RV32IFD-NEXT: sw a2, 8(sp)
156 ; RV32IFD-NEXT: sw a3, 12(sp)
157 ; RV32IFD-NEXT: fld ft0, 8(sp)
158 ; RV32IFD-NEXT: sw a0, 8(sp)
159 ; RV32IFD-NEXT: sw a1, 12(sp)
160 ; RV32IFD-NEXT: fld ft1, 8(sp)
161 ; RV32IFD-NEXT: fsgnj.d ft0, ft1, ft0
162 ; RV32IFD-NEXT: fsd ft0, 8(sp)
163 ; RV32IFD-NEXT: lw a0, 8(sp)
164 ; RV32IFD-NEXT: lw a1, 12(sp)
165 ; RV32IFD-NEXT: addi sp, sp, 16
168 ; RV64IFD-LABEL: fsgnj_d:
170 ; RV64IFD-NEXT: fmv.d.x ft0, a1
171 ; RV64IFD-NEXT: fmv.d.x ft1, a0
172 ; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0
173 ; RV64IFD-NEXT: fmv.x.d a0, ft0
175 %1 = call double @llvm.copysign.f64(double %a, double %b)
179 ; This function performs extra work to ensure that
180 ; DAGCombiner::visitBITCAST doesn't replace the fneg with an xor.
181 define i32 @fneg_d(double %a, double %b) nounwind {
182 ; RV32IFD-LABEL: fneg_d:
184 ; RV32IFD-NEXT: addi sp, sp, -16
185 ; RV32IFD-NEXT: sw a0, 8(sp)
186 ; RV32IFD-NEXT: sw a1, 12(sp)
187 ; RV32IFD-NEXT: fld ft0, 8(sp)
188 ; RV32IFD-NEXT: fadd.d ft0, ft0, ft0
189 ; RV32IFD-NEXT: fneg.d ft1, ft0
190 ; RV32IFD-NEXT: feq.d a0, ft0, ft1
191 ; RV32IFD-NEXT: addi sp, sp, 16
194 ; RV64IFD-LABEL: fneg_d:
196 ; RV64IFD-NEXT: fmv.d.x ft0, a0
197 ; RV64IFD-NEXT: fadd.d ft0, ft0, ft0
198 ; RV64IFD-NEXT: fneg.d ft1, ft0
199 ; RV64IFD-NEXT: feq.d a0, ft0, ft1
201 %1 = fadd double %a, %a
203 %3 = fcmp oeq double %1, %2
204 %4 = zext i1 %3 to i32
208 define double @fsgnjn_d(double %a, double %b) nounwind {
209 ; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will
210 ; convert (bitconvert (fneg x)) to a xor.
212 ; RV32IFD-LABEL: fsgnjn_d:
214 ; RV32IFD-NEXT: addi sp, sp, -16
215 ; RV32IFD-NEXT: sw a2, 8(sp)
216 ; RV32IFD-NEXT: sw a3, 12(sp)
217 ; RV32IFD-NEXT: fld ft0, 8(sp)
218 ; RV32IFD-NEXT: sw a0, 8(sp)
219 ; RV32IFD-NEXT: sw a1, 12(sp)
220 ; RV32IFD-NEXT: fld ft1, 8(sp)
221 ; RV32IFD-NEXT: fsgnjn.d ft0, ft1, ft0
222 ; RV32IFD-NEXT: fsd ft0, 8(sp)
223 ; RV32IFD-NEXT: lw a0, 8(sp)
224 ; RV32IFD-NEXT: lw a1, 12(sp)
225 ; RV32IFD-NEXT: addi sp, sp, 16
228 ; RV64IFD-LABEL: fsgnjn_d:
230 ; RV64IFD-NEXT: addi a2, zero, -1
231 ; RV64IFD-NEXT: slli a2, a2, 63
232 ; RV64IFD-NEXT: xor a1, a1, a2
233 ; RV64IFD-NEXT: fmv.d.x ft0, a1
234 ; RV64IFD-NEXT: fmv.d.x ft1, a0
235 ; RV64IFD-NEXT: fsgnj.d ft0, ft1, ft0
236 ; RV64IFD-NEXT: fmv.x.d a0, ft0
238 %1 = fsub double -0.0, %b
239 %2 = call double @llvm.copysign.f64(double %a, double %1)
243 declare double @llvm.fabs.f64(double)
245 ; This function performs extra work to ensure that
246 ; DAGCombiner::visitBITCAST doesn't replace the fabs with an and.
247 define double @fabs_d(double %a, double %b) nounwind {
248 ; RV32IFD-LABEL: fabs_d:
250 ; RV32IFD-NEXT: addi sp, sp, -16
251 ; RV32IFD-NEXT: sw a2, 8(sp)
252 ; RV32IFD-NEXT: sw a3, 12(sp)
253 ; RV32IFD-NEXT: fld ft0, 8(sp)
254 ; RV32IFD-NEXT: sw a0, 8(sp)
255 ; RV32IFD-NEXT: sw a1, 12(sp)
256 ; RV32IFD-NEXT: fld ft1, 8(sp)
257 ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0
258 ; RV32IFD-NEXT: fabs.d ft1, ft0
259 ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0
260 ; RV32IFD-NEXT: fsd ft0, 8(sp)
261 ; RV32IFD-NEXT: lw a0, 8(sp)
262 ; RV32IFD-NEXT: lw a1, 12(sp)
263 ; RV32IFD-NEXT: addi sp, sp, 16
266 ; RV64IFD-LABEL: fabs_d:
268 ; RV64IFD-NEXT: fmv.d.x ft0, a1
269 ; RV64IFD-NEXT: fmv.d.x ft1, a0
270 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
271 ; RV64IFD-NEXT: fabs.d ft1, ft0
272 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0
273 ; RV64IFD-NEXT: fmv.x.d a0, ft0
275 %1 = fadd double %a, %b
276 %2 = call double @llvm.fabs.f64(double %1)
277 %3 = fadd double %2, %1
281 declare double @llvm.minnum.f64(double, double)
283 define double @fmin_d(double %a, double %b) nounwind {
284 ; RV32IFD-LABEL: fmin_d:
286 ; RV32IFD-NEXT: addi sp, sp, -16
287 ; RV32IFD-NEXT: sw a2, 8(sp)
288 ; RV32IFD-NEXT: sw a3, 12(sp)
289 ; RV32IFD-NEXT: fld ft0, 8(sp)
290 ; RV32IFD-NEXT: sw a0, 8(sp)
291 ; RV32IFD-NEXT: sw a1, 12(sp)
292 ; RV32IFD-NEXT: fld ft1, 8(sp)
293 ; RV32IFD-NEXT: fmin.d ft0, ft1, ft0
294 ; RV32IFD-NEXT: fsd ft0, 8(sp)
295 ; RV32IFD-NEXT: lw a0, 8(sp)
296 ; RV32IFD-NEXT: lw a1, 12(sp)
297 ; RV32IFD-NEXT: addi sp, sp, 16
300 ; RV64IFD-LABEL: fmin_d:
302 ; RV64IFD-NEXT: fmv.d.x ft0, a1
303 ; RV64IFD-NEXT: fmv.d.x ft1, a0
304 ; RV64IFD-NEXT: fmin.d ft0, ft1, ft0
305 ; RV64IFD-NEXT: fmv.x.d a0, ft0
307 %1 = call double @llvm.minnum.f64(double %a, double %b)
311 declare double @llvm.maxnum.f64(double, double)
313 define double @fmax_d(double %a, double %b) nounwind {
314 ; RV32IFD-LABEL: fmax_d:
316 ; RV32IFD-NEXT: addi sp, sp, -16
317 ; RV32IFD-NEXT: sw a2, 8(sp)
318 ; RV32IFD-NEXT: sw a3, 12(sp)
319 ; RV32IFD-NEXT: fld ft0, 8(sp)
320 ; RV32IFD-NEXT: sw a0, 8(sp)
321 ; RV32IFD-NEXT: sw a1, 12(sp)
322 ; RV32IFD-NEXT: fld ft1, 8(sp)
323 ; RV32IFD-NEXT: fmax.d ft0, ft1, ft0
324 ; RV32IFD-NEXT: fsd ft0, 8(sp)
325 ; RV32IFD-NEXT: lw a0, 8(sp)
326 ; RV32IFD-NEXT: lw a1, 12(sp)
327 ; RV32IFD-NEXT: addi sp, sp, 16
330 ; RV64IFD-LABEL: fmax_d:
332 ; RV64IFD-NEXT: fmv.d.x ft0, a1
333 ; RV64IFD-NEXT: fmv.d.x ft1, a0
334 ; RV64IFD-NEXT: fmax.d ft0, ft1, ft0
335 ; RV64IFD-NEXT: fmv.x.d a0, ft0
337 %1 = call double @llvm.maxnum.f64(double %a, double %b)
341 define i32 @feq_d(double %a, double %b) nounwind {
342 ; RV32IFD-LABEL: feq_d:
344 ; RV32IFD-NEXT: addi sp, sp, -16
345 ; RV32IFD-NEXT: sw a2, 8(sp)
346 ; RV32IFD-NEXT: sw a3, 12(sp)
347 ; RV32IFD-NEXT: fld ft0, 8(sp)
348 ; RV32IFD-NEXT: sw a0, 8(sp)
349 ; RV32IFD-NEXT: sw a1, 12(sp)
350 ; RV32IFD-NEXT: fld ft1, 8(sp)
351 ; RV32IFD-NEXT: feq.d a0, ft1, ft0
352 ; RV32IFD-NEXT: addi sp, sp, 16
355 ; RV64IFD-LABEL: feq_d:
357 ; RV64IFD-NEXT: fmv.d.x ft0, a1
358 ; RV64IFD-NEXT: fmv.d.x ft1, a0
359 ; RV64IFD-NEXT: feq.d a0, ft1, ft0
361 %1 = fcmp oeq double %a, %b
362 %2 = zext i1 %1 to i32
366 define i32 @flt_d(double %a, double %b) nounwind {
367 ; RV32IFD-LABEL: flt_d:
369 ; RV32IFD-NEXT: addi sp, sp, -16
370 ; RV32IFD-NEXT: sw a2, 8(sp)
371 ; RV32IFD-NEXT: sw a3, 12(sp)
372 ; RV32IFD-NEXT: fld ft0, 8(sp)
373 ; RV32IFD-NEXT: sw a0, 8(sp)
374 ; RV32IFD-NEXT: sw a1, 12(sp)
375 ; RV32IFD-NEXT: fld ft1, 8(sp)
376 ; RV32IFD-NEXT: flt.d a0, ft1, ft0
377 ; RV32IFD-NEXT: addi sp, sp, 16
380 ; RV64IFD-LABEL: flt_d:
382 ; RV64IFD-NEXT: fmv.d.x ft0, a1
383 ; RV64IFD-NEXT: fmv.d.x ft1, a0
384 ; RV64IFD-NEXT: flt.d a0, ft1, ft0
386 %1 = fcmp olt double %a, %b
387 %2 = zext i1 %1 to i32
391 define i32 @fle_d(double %a, double %b) nounwind {
392 ; RV32IFD-LABEL: fle_d:
394 ; RV32IFD-NEXT: addi sp, sp, -16
395 ; RV32IFD-NEXT: sw a2, 8(sp)
396 ; RV32IFD-NEXT: sw a3, 12(sp)
397 ; RV32IFD-NEXT: fld ft0, 8(sp)
398 ; RV32IFD-NEXT: sw a0, 8(sp)
399 ; RV32IFD-NEXT: sw a1, 12(sp)
400 ; RV32IFD-NEXT: fld ft1, 8(sp)
401 ; RV32IFD-NEXT: fle.d a0, ft1, ft0
402 ; RV32IFD-NEXT: addi sp, sp, 16
405 ; RV64IFD-LABEL: fle_d:
407 ; RV64IFD-NEXT: fmv.d.x ft0, a1
408 ; RV64IFD-NEXT: fmv.d.x ft1, a0
409 ; RV64IFD-NEXT: fle.d a0, ft1, ft0
411 %1 = fcmp ole double %a, %b
412 %2 = zext i1 %1 to i32
416 declare double @llvm.fma.f64(double, double, double)
418 define double @fmadd_d(double %a, double %b, double %c) nounwind {
419 ; RV32IFD-LABEL: fmadd_d:
421 ; RV32IFD-NEXT: addi sp, sp, -16
422 ; RV32IFD-NEXT: sw a4, 8(sp)
423 ; RV32IFD-NEXT: sw a5, 12(sp)
424 ; RV32IFD-NEXT: fld ft0, 8(sp)
425 ; RV32IFD-NEXT: sw a2, 8(sp)
426 ; RV32IFD-NEXT: sw a3, 12(sp)
427 ; RV32IFD-NEXT: fld ft1, 8(sp)
428 ; RV32IFD-NEXT: sw a0, 8(sp)
429 ; RV32IFD-NEXT: sw a1, 12(sp)
430 ; RV32IFD-NEXT: fld ft2, 8(sp)
431 ; RV32IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
432 ; RV32IFD-NEXT: fsd ft0, 8(sp)
433 ; RV32IFD-NEXT: lw a0, 8(sp)
434 ; RV32IFD-NEXT: lw a1, 12(sp)
435 ; RV32IFD-NEXT: addi sp, sp, 16
438 ; RV64IFD-LABEL: fmadd_d:
440 ; RV64IFD-NEXT: fmv.d.x ft0, a2
441 ; RV64IFD-NEXT: fmv.d.x ft1, a1
442 ; RV64IFD-NEXT: fmv.d.x ft2, a0
443 ; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0
444 ; RV64IFD-NEXT: fmv.x.d a0, ft0
446 %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
450 define double @fmsub_d(double %a, double %b, double %c) nounwind {
451 ; RV32IFD-LABEL: fmsub_d:
453 ; RV32IFD-NEXT: addi sp, sp, -16
454 ; RV32IFD-NEXT: sw a2, 8(sp)
455 ; RV32IFD-NEXT: sw a3, 12(sp)
456 ; RV32IFD-NEXT: fld ft0, 8(sp)
457 ; RV32IFD-NEXT: sw a0, 8(sp)
458 ; RV32IFD-NEXT: sw a1, 12(sp)
459 ; RV32IFD-NEXT: fld ft1, 8(sp)
460 ; RV32IFD-NEXT: sw a4, 8(sp)
461 ; RV32IFD-NEXT: sw a5, 12(sp)
462 ; RV32IFD-NEXT: fld ft2, 8(sp)
463 ; RV32IFD-NEXT: lui a0, %hi(.LCPI15_0)
464 ; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI15_0)
465 ; RV32IFD-NEXT: fld ft3, 0(a0)
466 ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
467 ; RV32IFD-NEXT: fmsub.d ft0, ft1, ft0, ft2
468 ; RV32IFD-NEXT: fsd ft0, 8(sp)
469 ; RV32IFD-NEXT: lw a0, 8(sp)
470 ; RV32IFD-NEXT: lw a1, 12(sp)
471 ; RV32IFD-NEXT: addi sp, sp, 16
474 ; RV64IFD-LABEL: fmsub_d:
476 ; RV64IFD-NEXT: lui a3, %hi(.LCPI15_0)
477 ; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI15_0)
478 ; RV64IFD-NEXT: fld ft0, 0(a3)
479 ; RV64IFD-NEXT: fmv.d.x ft1, a1
480 ; RV64IFD-NEXT: fmv.d.x ft2, a0
481 ; RV64IFD-NEXT: fmv.d.x ft3, a2
482 ; RV64IFD-NEXT: fadd.d ft0, ft3, ft0
483 ; RV64IFD-NEXT: fmsub.d ft0, ft2, ft1, ft0
484 ; RV64IFD-NEXT: fmv.x.d a0, ft0
486 %c_ = fadd double 0.0, %c ; avoid negation using xor
487 %negc = fsub double -0.0, %c_
488 %1 = call double @llvm.fma.f64(double %a, double %b, double %negc)
492 define double @fnmadd_d(double %a, double %b, double %c) nounwind {
493 ; RV32IFD-LABEL: fnmadd_d:
495 ; RV32IFD-NEXT: addi sp, sp, -16
496 ; RV32IFD-NEXT: sw a2, 8(sp)
497 ; RV32IFD-NEXT: sw a3, 12(sp)
498 ; RV32IFD-NEXT: fld ft0, 8(sp)
499 ; RV32IFD-NEXT: sw a4, 8(sp)
500 ; RV32IFD-NEXT: sw a5, 12(sp)
501 ; RV32IFD-NEXT: fld ft1, 8(sp)
502 ; RV32IFD-NEXT: sw a0, 8(sp)
503 ; RV32IFD-NEXT: sw a1, 12(sp)
504 ; RV32IFD-NEXT: fld ft2, 8(sp)
505 ; RV32IFD-NEXT: lui a0, %hi(.LCPI16_0)
506 ; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI16_0)
507 ; RV32IFD-NEXT: fld ft3, 0(a0)
508 ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
509 ; RV32IFD-NEXT: fadd.d ft1, ft1, ft3
510 ; RV32IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1
511 ; RV32IFD-NEXT: fsd ft0, 8(sp)
512 ; RV32IFD-NEXT: lw a0, 8(sp)
513 ; RV32IFD-NEXT: lw a1, 12(sp)
514 ; RV32IFD-NEXT: addi sp, sp, 16
517 ; RV64IFD-LABEL: fnmadd_d:
519 ; RV64IFD-NEXT: lui a3, %hi(.LCPI16_0)
520 ; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI16_0)
521 ; RV64IFD-NEXT: fld ft0, 0(a3)
522 ; RV64IFD-NEXT: fmv.d.x ft1, a1
523 ; RV64IFD-NEXT: fmv.d.x ft2, a2
524 ; RV64IFD-NEXT: fmv.d.x ft3, a0
525 ; RV64IFD-NEXT: fadd.d ft3, ft3, ft0
526 ; RV64IFD-NEXT: fadd.d ft0, ft2, ft0
527 ; RV64IFD-NEXT: fnmadd.d ft0, ft3, ft1, ft0
528 ; RV64IFD-NEXT: fmv.x.d a0, ft0
530 %a_ = fadd double 0.0, %a
531 %c_ = fadd double 0.0, %c
532 %nega = fsub double -0.0, %a_
533 %negc = fsub double -0.0, %c_
534 %1 = call double @llvm.fma.f64(double %nega, double %b, double %negc)
538 define double @fnmsub_d(double %a, double %b, double %c) nounwind {
539 ; RV32IFD-LABEL: fnmsub_d:
541 ; RV32IFD-NEXT: addi sp, sp, -16
542 ; RV32IFD-NEXT: sw a4, 8(sp)
543 ; RV32IFD-NEXT: sw a5, 12(sp)
544 ; RV32IFD-NEXT: fld ft0, 8(sp)
545 ; RV32IFD-NEXT: sw a2, 8(sp)
546 ; RV32IFD-NEXT: sw a3, 12(sp)
547 ; RV32IFD-NEXT: fld ft1, 8(sp)
548 ; RV32IFD-NEXT: sw a0, 8(sp)
549 ; RV32IFD-NEXT: sw a1, 12(sp)
550 ; RV32IFD-NEXT: fld ft2, 8(sp)
551 ; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
552 ; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI17_0)
553 ; RV32IFD-NEXT: fld ft3, 0(a0)
554 ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3
555 ; RV32IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0
556 ; RV32IFD-NEXT: fsd ft0, 8(sp)
557 ; RV32IFD-NEXT: lw a0, 8(sp)
558 ; RV32IFD-NEXT: lw a1, 12(sp)
559 ; RV32IFD-NEXT: addi sp, sp, 16
562 ; RV64IFD-LABEL: fnmsub_d:
564 ; RV64IFD-NEXT: lui a3, %hi(.LCPI17_0)
565 ; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI17_0)
566 ; RV64IFD-NEXT: fld ft0, 0(a3)
567 ; RV64IFD-NEXT: fmv.d.x ft1, a2
568 ; RV64IFD-NEXT: fmv.d.x ft2, a1
569 ; RV64IFD-NEXT: fmv.d.x ft3, a0
570 ; RV64IFD-NEXT: fadd.d ft0, ft3, ft0
571 ; RV64IFD-NEXT: fnmsub.d ft0, ft0, ft2, ft1
572 ; RV64IFD-NEXT: fmv.x.d a0, ft0
574 %a_ = fadd double 0.0, %a
575 %nega = fsub double -0.0, %a_
576 %1 = call double @llvm.fma.f64(double %nega, double %b, double %c)