1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 define double @fadd_d(double %a, double %b) nounwind strictfp {
20 ; CHECKIFD-LABEL: fadd_d:
22 ; CHECKIFD-NEXT: fadd.d fa0, fa0, fa1
25 ; RV32IZFINXZDINX-LABEL: fadd_d:
26 ; RV32IZFINXZDINX: # %bb.0:
27 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
28 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
29 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
30 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
31 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
32 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
33 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
34 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
35 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
36 ; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2
37 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
38 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
39 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
40 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
41 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
42 ; RV32IZFINXZDINX-NEXT: ret
44 ; RV64IZFINXZDINX-LABEL: fadd_d:
45 ; RV64IZFINXZDINX: # %bb.0:
46 ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1
47 ; RV64IZFINXZDINX-NEXT: ret
49 ; RV32I-LABEL: fadd_d:
51 ; RV32I-NEXT: addi sp, sp, -16
52 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
53 ; RV32I-NEXT: call __adddf3@plt
54 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
55 ; RV32I-NEXT: addi sp, sp, 16
58 ; RV64I-LABEL: fadd_d:
60 ; RV64I-NEXT: addi sp, sp, -16
61 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
62 ; RV64I-NEXT: call __adddf3@plt
63 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
64 ; RV64I-NEXT: addi sp, sp, 16
66 %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
69 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
71 define double @fsub_d(double %a, double %b) nounwind strictfp {
72 ; CHECKIFD-LABEL: fsub_d:
74 ; CHECKIFD-NEXT: fsub.d fa0, fa0, fa1
77 ; RV32IZFINXZDINX-LABEL: fsub_d:
78 ; RV32IZFINXZDINX: # %bb.0:
79 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
80 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
81 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
82 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
83 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
84 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
85 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
86 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
87 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
88 ; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2
89 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
90 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
91 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
92 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
93 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
94 ; RV32IZFINXZDINX-NEXT: ret
96 ; RV64IZFINXZDINX-LABEL: fsub_d:
97 ; RV64IZFINXZDINX: # %bb.0:
98 ; RV64IZFINXZDINX-NEXT: fsub.d a0, a0, a1
99 ; RV64IZFINXZDINX-NEXT: ret
101 ; RV32I-LABEL: fsub_d:
103 ; RV32I-NEXT: addi sp, sp, -16
104 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
105 ; RV32I-NEXT: call __subdf3@plt
106 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
107 ; RV32I-NEXT: addi sp, sp, 16
110 ; RV64I-LABEL: fsub_d:
112 ; RV64I-NEXT: addi sp, sp, -16
113 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
114 ; RV64I-NEXT: call __subdf3@plt
115 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
116 ; RV64I-NEXT: addi sp, sp, 16
118 %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
121 declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
123 define double @fmul_d(double %a, double %b) nounwind strictfp {
124 ; CHECKIFD-LABEL: fmul_d:
126 ; CHECKIFD-NEXT: fmul.d fa0, fa0, fa1
129 ; RV32IZFINXZDINX-LABEL: fmul_d:
130 ; RV32IZFINXZDINX: # %bb.0:
131 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
132 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
133 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
134 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
135 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
136 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
137 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
138 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
139 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
140 ; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2
141 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
142 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
143 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
144 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
145 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
146 ; RV32IZFINXZDINX-NEXT: ret
148 ; RV64IZFINXZDINX-LABEL: fmul_d:
149 ; RV64IZFINXZDINX: # %bb.0:
150 ; RV64IZFINXZDINX-NEXT: fmul.d a0, a0, a1
151 ; RV64IZFINXZDINX-NEXT: ret
153 ; RV32I-LABEL: fmul_d:
155 ; RV32I-NEXT: addi sp, sp, -16
156 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
157 ; RV32I-NEXT: call __muldf3@plt
158 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
159 ; RV32I-NEXT: addi sp, sp, 16
162 ; RV64I-LABEL: fmul_d:
164 ; RV64I-NEXT: addi sp, sp, -16
165 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
166 ; RV64I-NEXT: call __muldf3@plt
167 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
168 ; RV64I-NEXT: addi sp, sp, 16
170 %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
173 declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
175 define double @fdiv_d(double %a, double %b) nounwind strictfp {
176 ; CHECKIFD-LABEL: fdiv_d:
178 ; CHECKIFD-NEXT: fdiv.d fa0, fa0, fa1
181 ; RV32IZFINXZDINX-LABEL: fdiv_d:
182 ; RV32IZFINXZDINX: # %bb.0:
183 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
184 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
185 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
186 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
187 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
188 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
189 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
190 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
191 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
192 ; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2
193 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
194 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
195 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
196 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
197 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
198 ; RV32IZFINXZDINX-NEXT: ret
200 ; RV64IZFINXZDINX-LABEL: fdiv_d:
201 ; RV64IZFINXZDINX: # %bb.0:
202 ; RV64IZFINXZDINX-NEXT: fdiv.d a0, a0, a1
203 ; RV64IZFINXZDINX-NEXT: ret
205 ; RV32I-LABEL: fdiv_d:
207 ; RV32I-NEXT: addi sp, sp, -16
208 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
209 ; RV32I-NEXT: call __divdf3@plt
210 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
211 ; RV32I-NEXT: addi sp, sp, 16
214 ; RV64I-LABEL: fdiv_d:
216 ; RV64I-NEXT: addi sp, sp, -16
217 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
218 ; RV64I-NEXT: call __divdf3@plt
219 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
220 ; RV64I-NEXT: addi sp, sp, 16
222 %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
225 declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
227 define double @fsqrt_d(double %a) nounwind strictfp {
228 ; CHECKIFD-LABEL: fsqrt_d:
230 ; CHECKIFD-NEXT: fsqrt.d fa0, fa0
233 ; RV32IZFINXZDINX-LABEL: fsqrt_d:
234 ; RV32IZFINXZDINX: # %bb.0:
235 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
236 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
237 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
238 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
239 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
240 ; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0
241 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
242 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
243 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
244 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
245 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
246 ; RV32IZFINXZDINX-NEXT: ret
248 ; RV64IZFINXZDINX-LABEL: fsqrt_d:
249 ; RV64IZFINXZDINX: # %bb.0:
250 ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0
251 ; RV64IZFINXZDINX-NEXT: ret
253 ; RV32I-LABEL: fsqrt_d:
255 ; RV32I-NEXT: addi sp, sp, -16
256 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
257 ; RV32I-NEXT: call sqrt@plt
258 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
259 ; RV32I-NEXT: addi sp, sp, 16
262 ; RV64I-LABEL: fsqrt_d:
264 ; RV64I-NEXT: addi sp, sp, -16
265 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
266 ; RV64I-NEXT: call sqrt@plt
267 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
268 ; RV64I-NEXT: addi sp, sp, 16
270 %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
273 declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
275 define double @fmin_d(double %a, double %b) nounwind strictfp {
276 ; RV32IFD-LABEL: fmin_d:
278 ; RV32IFD-NEXT: addi sp, sp, -16
279 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
280 ; RV32IFD-NEXT: call fmin@plt
281 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
282 ; RV32IFD-NEXT: addi sp, sp, 16
285 ; RV64IFD-LABEL: fmin_d:
287 ; RV64IFD-NEXT: addi sp, sp, -16
288 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
289 ; RV64IFD-NEXT: call fmin@plt
290 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
291 ; RV64IFD-NEXT: addi sp, sp, 16
294 ; RV32IZFINXZDINX-LABEL: fmin_d:
295 ; RV32IZFINXZDINX: # %bb.0:
296 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
297 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
298 ; RV32IZFINXZDINX-NEXT: call fmin@plt
299 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
300 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
301 ; RV32IZFINXZDINX-NEXT: ret
303 ; RV64IZFINXZDINX-LABEL: fmin_d:
304 ; RV64IZFINXZDINX: # %bb.0:
305 ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
306 ; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
307 ; RV64IZFINXZDINX-NEXT: call fmin@plt
308 ; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
309 ; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
310 ; RV64IZFINXZDINX-NEXT: ret
312 ; RV32I-LABEL: fmin_d:
314 ; RV32I-NEXT: addi sp, sp, -16
315 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
316 ; RV32I-NEXT: call fmin@plt
317 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
318 ; RV32I-NEXT: addi sp, sp, 16
321 ; RV64I-LABEL: fmin_d:
323 ; RV64I-NEXT: addi sp, sp, -16
324 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
325 ; RV64I-NEXT: call fmin@plt
326 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
327 ; RV64I-NEXT: addi sp, sp, 16
329 %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
332 declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) strictfp
334 define double @fmax_d(double %a, double %b) nounwind strictfp {
335 ; RV32IFD-LABEL: fmax_d:
337 ; RV32IFD-NEXT: addi sp, sp, -16
338 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
339 ; RV32IFD-NEXT: call fmax@plt
340 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
341 ; RV32IFD-NEXT: addi sp, sp, 16
344 ; RV64IFD-LABEL: fmax_d:
346 ; RV64IFD-NEXT: addi sp, sp, -16
347 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
348 ; RV64IFD-NEXT: call fmax@plt
349 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
350 ; RV64IFD-NEXT: addi sp, sp, 16
353 ; RV32IZFINXZDINX-LABEL: fmax_d:
354 ; RV32IZFINXZDINX: # %bb.0:
355 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
356 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
357 ; RV32IZFINXZDINX-NEXT: call fmax@plt
358 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
359 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
360 ; RV32IZFINXZDINX-NEXT: ret
362 ; RV64IZFINXZDINX-LABEL: fmax_d:
363 ; RV64IZFINXZDINX: # %bb.0:
364 ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
365 ; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
366 ; RV64IZFINXZDINX-NEXT: call fmax@plt
367 ; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
368 ; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
369 ; RV64IZFINXZDINX-NEXT: ret
371 ; RV32I-LABEL: fmax_d:
373 ; RV32I-NEXT: addi sp, sp, -16
374 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
375 ; RV32I-NEXT: call fmax@plt
376 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
377 ; RV32I-NEXT: addi sp, sp, 16
380 ; RV64I-LABEL: fmax_d:
382 ; RV64I-NEXT: addi sp, sp, -16
383 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
384 ; RV64I-NEXT: call fmax@plt
385 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
386 ; RV64I-NEXT: addi sp, sp, 16
388 %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp
391 declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) strictfp
393 define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
394 ; CHECKIFD-LABEL: fmadd_d:
396 ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2
399 ; RV32IZFINXZDINX-LABEL: fmadd_d:
400 ; RV32IZFINXZDINX: # %bb.0:
401 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
402 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
403 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
404 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
405 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
406 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
407 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
408 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
409 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
410 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
411 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
412 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
413 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
414 ; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4
415 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
416 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
417 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
418 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
419 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
420 ; RV32IZFINXZDINX-NEXT: ret
422 ; RV64IZFINXZDINX-LABEL: fmadd_d:
423 ; RV64IZFINXZDINX: # %bb.0:
424 ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2
425 ; RV64IZFINXZDINX-NEXT: ret
427 ; RV32I-LABEL: fmadd_d:
429 ; RV32I-NEXT: addi sp, sp, -16
430 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
431 ; RV32I-NEXT: call fma@plt
432 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
433 ; RV32I-NEXT: addi sp, sp, 16
436 ; RV64I-LABEL: fmadd_d:
438 ; RV64I-NEXT: addi sp, sp, -16
439 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
440 ; RV64I-NEXT: call fma@plt
441 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
442 ; RV64I-NEXT: addi sp, sp, 16
444 %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
447 declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) strictfp
449 define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
450 ; RV32IFD-LABEL: fmsub_d:
452 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
453 ; RV32IFD-NEXT: fadd.d fa5, fa2, fa5
454 ; RV32IFD-NEXT: fmsub.d fa0, fa0, fa1, fa5
457 ; RV64IFD-LABEL: fmsub_d:
459 ; RV64IFD-NEXT: fmv.d.x fa5, zero
460 ; RV64IFD-NEXT: fadd.d fa5, fa2, fa5
461 ; RV64IFD-NEXT: fmsub.d fa0, fa0, fa1, fa5
464 ; RV32IZFINXZDINX-LABEL: fmsub_d:
465 ; RV32IZFINXZDINX: # %bb.0:
466 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
467 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
468 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
469 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
470 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
471 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
472 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
473 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
474 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
475 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
476 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
477 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
478 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
479 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
480 ; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
481 ; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4
482 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
483 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
484 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
485 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
486 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
487 ; RV32IZFINXZDINX-NEXT: ret
489 ; RV64IZFINXZDINX-LABEL: fmsub_d:
490 ; RV64IZFINXZDINX: # %bb.0:
491 ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero
492 ; RV64IZFINXZDINX-NEXT: fmsub.d a0, a0, a1, a2
493 ; RV64IZFINXZDINX-NEXT: ret
495 ; RV32I-LABEL: fmsub_d:
497 ; RV32I-NEXT: addi sp, sp, -32
498 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
499 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
500 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
501 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
502 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
503 ; RV32I-NEXT: mv s0, a3
504 ; RV32I-NEXT: mv s1, a2
505 ; RV32I-NEXT: mv s2, a1
506 ; RV32I-NEXT: mv s3, a0
507 ; RV32I-NEXT: mv a0, a4
508 ; RV32I-NEXT: mv a1, a5
509 ; RV32I-NEXT: li a2, 0
510 ; RV32I-NEXT: li a3, 0
511 ; RV32I-NEXT: call __adddf3@plt
512 ; RV32I-NEXT: mv a4, a0
513 ; RV32I-NEXT: lui a5, 524288
514 ; RV32I-NEXT: xor a5, a1, a5
515 ; RV32I-NEXT: mv a0, s3
516 ; RV32I-NEXT: mv a1, s2
517 ; RV32I-NEXT: mv a2, s1
518 ; RV32I-NEXT: mv a3, s0
519 ; RV32I-NEXT: call fma@plt
520 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
521 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
522 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
523 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
524 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
525 ; RV32I-NEXT: addi sp, sp, 32
528 ; RV64I-LABEL: fmsub_d:
530 ; RV64I-NEXT: addi sp, sp, -32
531 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
532 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
533 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
534 ; RV64I-NEXT: mv s0, a1
535 ; RV64I-NEXT: mv s1, a0
536 ; RV64I-NEXT: mv a0, a2
537 ; RV64I-NEXT: li a1, 0
538 ; RV64I-NEXT: call __adddf3@plt
539 ; RV64I-NEXT: li a1, -1
540 ; RV64I-NEXT: slli a1, a1, 63
541 ; RV64I-NEXT: xor a2, a0, a1
542 ; RV64I-NEXT: mv a0, s1
543 ; RV64I-NEXT: mv a1, s0
544 ; RV64I-NEXT: call fma@plt
545 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
546 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
547 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
548 ; RV64I-NEXT: addi sp, sp, 32
550 %c_ = fadd double 0.0, %c ; avoid negation using xor
551 %negc = fneg double %c_
552 %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
556 define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
557 ; RV32IFD-LABEL: fnmadd_d:
559 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
560 ; RV32IFD-NEXT: fadd.d fa4, fa0, fa5
561 ; RV32IFD-NEXT: fadd.d fa5, fa2, fa5
562 ; RV32IFD-NEXT: fnmadd.d fa0, fa4, fa1, fa5
565 ; RV64IFD-LABEL: fnmadd_d:
567 ; RV64IFD-NEXT: fmv.d.x fa5, zero
568 ; RV64IFD-NEXT: fadd.d fa4, fa0, fa5
569 ; RV64IFD-NEXT: fadd.d fa5, fa2, fa5
570 ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa1, fa5
573 ; RV32IZFINXZDINX-LABEL: fnmadd_d:
574 ; RV32IZFINXZDINX: # %bb.0:
575 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
576 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
577 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
578 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
579 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
580 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
581 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
582 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
583 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
584 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
585 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
586 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
587 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
588 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
589 ; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
590 ; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
591 ; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4
592 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
593 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
594 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
595 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
596 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
597 ; RV32IZFINXZDINX-NEXT: ret
599 ; RV64IZFINXZDINX-LABEL: fnmadd_d:
600 ; RV64IZFINXZDINX: # %bb.0:
601 ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero
602 ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero
603 ; RV64IZFINXZDINX-NEXT: fnmadd.d a0, a0, a1, a2
604 ; RV64IZFINXZDINX-NEXT: ret
606 ; RV32I-LABEL: fnmadd_d:
608 ; RV32I-NEXT: addi sp, sp, -32
609 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
610 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
611 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
612 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
613 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
614 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
615 ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
616 ; RV32I-NEXT: mv s0, a5
617 ; RV32I-NEXT: mv s1, a4
618 ; RV32I-NEXT: mv s2, a3
619 ; RV32I-NEXT: mv s3, a2
620 ; RV32I-NEXT: li a2, 0
621 ; RV32I-NEXT: li a3, 0
622 ; RV32I-NEXT: call __adddf3@plt
623 ; RV32I-NEXT: mv s4, a0
624 ; RV32I-NEXT: mv s5, a1
625 ; RV32I-NEXT: mv a0, s1
626 ; RV32I-NEXT: mv a1, s0
627 ; RV32I-NEXT: li a2, 0
628 ; RV32I-NEXT: li a3, 0
629 ; RV32I-NEXT: call __adddf3@plt
630 ; RV32I-NEXT: mv a4, a0
631 ; RV32I-NEXT: lui a5, 524288
632 ; RV32I-NEXT: xor a2, s5, a5
633 ; RV32I-NEXT: xor a5, a1, a5
634 ; RV32I-NEXT: mv a0, s4
635 ; RV32I-NEXT: mv a1, a2
636 ; RV32I-NEXT: mv a2, s3
637 ; RV32I-NEXT: mv a3, s2
638 ; RV32I-NEXT: call fma@plt
639 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
640 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
641 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
642 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
643 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
644 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
645 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
646 ; RV32I-NEXT: addi sp, sp, 32
649 ; RV64I-LABEL: fnmadd_d:
651 ; RV64I-NEXT: addi sp, sp, -32
652 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
653 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
654 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
655 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
656 ; RV64I-NEXT: mv s0, a2
657 ; RV64I-NEXT: mv s1, a1
658 ; RV64I-NEXT: li a1, 0
659 ; RV64I-NEXT: call __adddf3@plt
660 ; RV64I-NEXT: mv s2, a0
661 ; RV64I-NEXT: mv a0, s0
662 ; RV64I-NEXT: li a1, 0
663 ; RV64I-NEXT: call __adddf3@plt
664 ; RV64I-NEXT: li a1, -1
665 ; RV64I-NEXT: slli a2, a1, 63
666 ; RV64I-NEXT: xor a1, s2, a2
667 ; RV64I-NEXT: xor a2, a0, a2
668 ; RV64I-NEXT: mv a0, a1
669 ; RV64I-NEXT: mv a1, s1
670 ; RV64I-NEXT: call fma@plt
671 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
672 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
673 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
674 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
675 ; RV64I-NEXT: addi sp, sp, 32
677 %a_ = fadd double 0.0, %a
678 %c_ = fadd double 0.0, %c
679 %nega = fneg double %a_
680 %negc = fneg double %c_
681 %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
685 define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
686 ; RV32IFD-LABEL: fnmadd_d_2:
688 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
689 ; RV32IFD-NEXT: fadd.d fa4, fa1, fa5
690 ; RV32IFD-NEXT: fadd.d fa5, fa2, fa5
691 ; RV32IFD-NEXT: fnmadd.d fa0, fa4, fa0, fa5
694 ; RV64IFD-LABEL: fnmadd_d_2:
696 ; RV64IFD-NEXT: fmv.d.x fa5, zero
697 ; RV64IFD-NEXT: fadd.d fa4, fa1, fa5
698 ; RV64IFD-NEXT: fadd.d fa5, fa2, fa5
699 ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa0, fa5
702 ; RV32IZFINXZDINX-LABEL: fnmadd_d_2:
703 ; RV32IZFINXZDINX: # %bb.0:
704 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
705 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
706 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
707 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
708 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
709 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
710 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
711 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
712 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
713 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
714 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
715 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
716 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
717 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
718 ; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
719 ; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6
720 ; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4
721 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
722 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
723 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
724 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
725 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
726 ; RV32IZFINXZDINX-NEXT: ret
728 ; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
729 ; RV64IZFINXZDINX: # %bb.0:
730 ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero
731 ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero
732 ; RV64IZFINXZDINX-NEXT: fnmadd.d a0, a1, a0, a2
733 ; RV64IZFINXZDINX-NEXT: ret
735 ; RV32I-LABEL: fnmadd_d_2:
737 ; RV32I-NEXT: addi sp, sp, -32
738 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
739 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
740 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
741 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
742 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
743 ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
744 ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
745 ; RV32I-NEXT: mv s0, a5
746 ; RV32I-NEXT: mv s1, a4
747 ; RV32I-NEXT: mv s2, a1
748 ; RV32I-NEXT: mv s3, a0
749 ; RV32I-NEXT: mv a0, a2
750 ; RV32I-NEXT: mv a1, a3
751 ; RV32I-NEXT: li a2, 0
752 ; RV32I-NEXT: li a3, 0
753 ; RV32I-NEXT: call __adddf3@plt
754 ; RV32I-NEXT: mv s4, a0
755 ; RV32I-NEXT: mv s5, a1
756 ; RV32I-NEXT: mv a0, s1
757 ; RV32I-NEXT: mv a1, s0
758 ; RV32I-NEXT: li a2, 0
759 ; RV32I-NEXT: li a3, 0
760 ; RV32I-NEXT: call __adddf3@plt
761 ; RV32I-NEXT: mv a4, a0
762 ; RV32I-NEXT: lui a5, 524288
763 ; RV32I-NEXT: xor a3, s5, a5
764 ; RV32I-NEXT: xor a5, a1, a5
765 ; RV32I-NEXT: mv a0, s3
766 ; RV32I-NEXT: mv a1, s2
767 ; RV32I-NEXT: mv a2, s4
768 ; RV32I-NEXT: call fma@plt
769 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
770 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
771 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
772 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
773 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
774 ; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
775 ; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload
776 ; RV32I-NEXT: addi sp, sp, 32
779 ; RV64I-LABEL: fnmadd_d_2:
781 ; RV64I-NEXT: addi sp, sp, -32
782 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
783 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
784 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
785 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
786 ; RV64I-NEXT: mv s0, a2
787 ; RV64I-NEXT: mv s1, a0
788 ; RV64I-NEXT: mv a0, a1
789 ; RV64I-NEXT: li a1, 0
790 ; RV64I-NEXT: call __adddf3@plt
791 ; RV64I-NEXT: mv s2, a0
792 ; RV64I-NEXT: mv a0, s0
793 ; RV64I-NEXT: li a1, 0
794 ; RV64I-NEXT: call __adddf3@plt
795 ; RV64I-NEXT: li a1, -1
796 ; RV64I-NEXT: slli a2, a1, 63
797 ; RV64I-NEXT: xor a1, s2, a2
798 ; RV64I-NEXT: xor a2, a0, a2
799 ; RV64I-NEXT: mv a0, s1
800 ; RV64I-NEXT: call fma@plt
801 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
802 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
803 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
804 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
805 ; RV64I-NEXT: addi sp, sp, 32
807 %b_ = fadd double 0.0, %b
808 %c_ = fadd double 0.0, %c
809 %negb = fneg double %b_
810 %negc = fneg double %c_
811 %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
815 define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
816 ; RV32IFD-LABEL: fnmsub_d:
818 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
819 ; RV32IFD-NEXT: fadd.d fa5, fa0, fa5
820 ; RV32IFD-NEXT: fnmsub.d fa0, fa5, fa1, fa2
823 ; RV64IFD-LABEL: fnmsub_d:
825 ; RV64IFD-NEXT: fmv.d.x fa5, zero
826 ; RV64IFD-NEXT: fadd.d fa5, fa0, fa5
827 ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa1, fa2
830 ; RV32IZFINXZDINX-LABEL: fnmsub_d:
831 ; RV32IZFINXZDINX: # %bb.0:
832 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
833 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
834 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
835 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
836 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
837 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
838 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
839 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
840 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
841 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
842 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
843 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
844 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
845 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
846 ; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6
847 ; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4
848 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
849 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
850 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
851 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
852 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
853 ; RV32IZFINXZDINX-NEXT: ret
855 ; RV64IZFINXZDINX-LABEL: fnmsub_d:
856 ; RV64IZFINXZDINX: # %bb.0:
857 ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero
858 ; RV64IZFINXZDINX-NEXT: fnmsub.d a0, a0, a1, a2
859 ; RV64IZFINXZDINX-NEXT: ret
861 ; RV32I-LABEL: fnmsub_d:
863 ; RV32I-NEXT: addi sp, sp, -32
864 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
865 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
866 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
867 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
868 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
869 ; RV32I-NEXT: mv s0, a5
870 ; RV32I-NEXT: mv s1, a4
871 ; RV32I-NEXT: mv s2, a3
872 ; RV32I-NEXT: mv s3, a2
873 ; RV32I-NEXT: li a2, 0
874 ; RV32I-NEXT: li a3, 0
875 ; RV32I-NEXT: call __adddf3@plt
876 ; RV32I-NEXT: lui a2, 524288
877 ; RV32I-NEXT: xor a1, a1, a2
878 ; RV32I-NEXT: mv a2, s3
879 ; RV32I-NEXT: mv a3, s2
880 ; RV32I-NEXT: mv a4, s1
881 ; RV32I-NEXT: mv a5, s0
882 ; RV32I-NEXT: call fma@plt
883 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
884 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
885 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
886 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
887 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
888 ; RV32I-NEXT: addi sp, sp, 32
891 ; RV64I-LABEL: fnmsub_d:
893 ; RV64I-NEXT: addi sp, sp, -32
894 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
895 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
896 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
897 ; RV64I-NEXT: mv s0, a2
898 ; RV64I-NEXT: mv s1, a1
899 ; RV64I-NEXT: li a1, 0
900 ; RV64I-NEXT: call __adddf3@plt
901 ; RV64I-NEXT: li a1, -1
902 ; RV64I-NEXT: slli a1, a1, 63
903 ; RV64I-NEXT: xor a0, a0, a1
904 ; RV64I-NEXT: mv a1, s1
905 ; RV64I-NEXT: mv a2, s0
906 ; RV64I-NEXT: call fma@plt
907 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
908 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
909 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
910 ; RV64I-NEXT: addi sp, sp, 32
912 %a_ = fadd double 0.0, %a
913 %nega = fneg double %a_
914 %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
918 define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
919 ; RV32IFD-LABEL: fnmsub_d_2:
921 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
922 ; RV32IFD-NEXT: fadd.d fa5, fa1, fa5
923 ; RV32IFD-NEXT: fnmsub.d fa0, fa5, fa0, fa2
926 ; RV64IFD-LABEL: fnmsub_d_2:
928 ; RV64IFD-NEXT: fmv.d.x fa5, zero
929 ; RV64IFD-NEXT: fadd.d fa5, fa1, fa5
930 ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa0, fa2
933 ; RV32IZFINXZDINX-LABEL: fnmsub_d_2:
934 ; RV32IZFINXZDINX: # %bb.0:
935 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
936 ; RV32IZFINXZDINX-NEXT: sw a4, 8(sp)
937 ; RV32IZFINXZDINX-NEXT: sw a5, 12(sp)
938 ; RV32IZFINXZDINX-NEXT: lw a4, 8(sp)
939 ; RV32IZFINXZDINX-NEXT: lw a5, 12(sp)
940 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
941 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
942 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
943 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
944 ; RV32IZFINXZDINX-NEXT: sw a2, 8(sp)
945 ; RV32IZFINXZDINX-NEXT: sw a3, 12(sp)
946 ; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
947 ; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
948 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero
949 ; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6
950 ; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4
951 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
952 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
953 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
954 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
955 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
956 ; RV32IZFINXZDINX-NEXT: ret
958 ; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
959 ; RV64IZFINXZDINX: # %bb.0:
960 ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero
961 ; RV64IZFINXZDINX-NEXT: fnmsub.d a0, a1, a0, a2
962 ; RV64IZFINXZDINX-NEXT: ret
964 ; RV32I-LABEL: fnmsub_d_2:
966 ; RV32I-NEXT: addi sp, sp, -32
967 ; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
968 ; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
969 ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
970 ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
971 ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
972 ; RV32I-NEXT: mv s0, a5
973 ; RV32I-NEXT: mv s1, a4
974 ; RV32I-NEXT: mv s2, a1
975 ; RV32I-NEXT: mv s3, a0
976 ; RV32I-NEXT: mv a0, a2
977 ; RV32I-NEXT: mv a1, a3
978 ; RV32I-NEXT: li a2, 0
979 ; RV32I-NEXT: li a3, 0
980 ; RV32I-NEXT: call __adddf3@plt
981 ; RV32I-NEXT: mv a2, a0
982 ; RV32I-NEXT: lui a3, 524288
983 ; RV32I-NEXT: xor a3, a1, a3
984 ; RV32I-NEXT: mv a0, s3
985 ; RV32I-NEXT: mv a1, s2
986 ; RV32I-NEXT: mv a4, s1
987 ; RV32I-NEXT: mv a5, s0
988 ; RV32I-NEXT: call fma@plt
989 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
990 ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
991 ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
992 ; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
993 ; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
994 ; RV32I-NEXT: addi sp, sp, 32
997 ; RV64I-LABEL: fnmsub_d_2:
999 ; RV64I-NEXT: addi sp, sp, -32
1000 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1001 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1002 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1003 ; RV64I-NEXT: mv s0, a2
1004 ; RV64I-NEXT: mv s1, a0
1005 ; RV64I-NEXT: mv a0, a1
1006 ; RV64I-NEXT: li a1, 0
1007 ; RV64I-NEXT: call __adddf3@plt
1008 ; RV64I-NEXT: li a1, -1
1009 ; RV64I-NEXT: slli a1, a1, 63
1010 ; RV64I-NEXT: xor a1, a0, a1
1011 ; RV64I-NEXT: mv a0, s1
1012 ; RV64I-NEXT: mv a2, s0
1013 ; RV64I-NEXT: call fma@plt
1014 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1015 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1016 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1017 ; RV64I-NEXT: addi sp, sp, 32
1019 %b_ = fadd double 0.0, %b
1020 %negb = fneg double %b_
1021 %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp