1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32f \
4 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV32IF %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64f \
7 ; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s
8 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
10 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
11 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
12 ; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
13 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
14 ; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
15 ; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
16 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
17 ; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
19 define float @fadd_s(float %a, float %b) nounwind strictfp {
20 ; CHECKIF-LABEL: fadd_s:
22 ; CHECKIF-NEXT: fadd.s fa0, fa0, fa1
25 ; RV32I-LABEL: fadd_s:
27 ; RV32I-NEXT: addi sp, sp, -16
28 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
29 ; RV32I-NEXT: call __addsf3
30 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
31 ; RV32I-NEXT: addi sp, sp, 16
34 ; RV64I-LABEL: fadd_s:
36 ; RV64I-NEXT: addi sp, sp, -16
37 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
38 ; RV64I-NEXT: call __addsf3
39 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
40 ; RV64I-NEXT: addi sp, sp, 16
43 ; CHECKIZFINX-LABEL: fadd_s:
44 ; CHECKIZFINX: # %bb.0:
45 ; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
46 ; CHECKIZFINX-NEXT: ret
47 %1 = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
50 declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
52 define float @fsub_s(float %a, float %b) nounwind strictfp {
53 ; CHECKIF-LABEL: fsub_s:
55 ; CHECKIF-NEXT: fsub.s fa0, fa0, fa1
58 ; RV32I-LABEL: fsub_s:
60 ; RV32I-NEXT: addi sp, sp, -16
61 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
62 ; RV32I-NEXT: call __subsf3
63 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
64 ; RV32I-NEXT: addi sp, sp, 16
67 ; RV64I-LABEL: fsub_s:
69 ; RV64I-NEXT: addi sp, sp, -16
70 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
71 ; RV64I-NEXT: call __subsf3
72 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
73 ; RV64I-NEXT: addi sp, sp, 16
76 ; CHECKIZFINX-LABEL: fsub_s:
77 ; CHECKIZFINX: # %bb.0:
78 ; CHECKIZFINX-NEXT: fsub.s a0, a0, a1
79 ; CHECKIZFINX-NEXT: ret
80 %1 = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
83 declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
85 define float @fmul_s(float %a, float %b) nounwind strictfp {
86 ; CHECKIF-LABEL: fmul_s:
88 ; CHECKIF-NEXT: fmul.s fa0, fa0, fa1
91 ; RV32I-LABEL: fmul_s:
93 ; RV32I-NEXT: addi sp, sp, -16
94 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
95 ; RV32I-NEXT: call __mulsf3
96 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
97 ; RV32I-NEXT: addi sp, sp, 16
100 ; RV64I-LABEL: fmul_s:
102 ; RV64I-NEXT: addi sp, sp, -16
103 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
104 ; RV64I-NEXT: call __mulsf3
105 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
106 ; RV64I-NEXT: addi sp, sp, 16
109 ; CHECKIZFINX-LABEL: fmul_s:
110 ; CHECKIZFINX: # %bb.0:
111 ; CHECKIZFINX-NEXT: fmul.s a0, a0, a1
112 ; CHECKIZFINX-NEXT: ret
113 %1 = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
116 declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
118 define float @fdiv_s(float %a, float %b) nounwind strictfp {
119 ; CHECKIF-LABEL: fdiv_s:
121 ; CHECKIF-NEXT: fdiv.s fa0, fa0, fa1
124 ; RV32I-LABEL: fdiv_s:
126 ; RV32I-NEXT: addi sp, sp, -16
127 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
128 ; RV32I-NEXT: call __divsf3
129 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
130 ; RV32I-NEXT: addi sp, sp, 16
133 ; RV64I-LABEL: fdiv_s:
135 ; RV64I-NEXT: addi sp, sp, -16
136 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
137 ; RV64I-NEXT: call __divsf3
138 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
139 ; RV64I-NEXT: addi sp, sp, 16
142 ; CHECKIZFINX-LABEL: fdiv_s:
143 ; CHECKIZFINX: # %bb.0:
144 ; CHECKIZFINX-NEXT: fdiv.s a0, a0, a1
145 ; CHECKIZFINX-NEXT: ret
146 %1 = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
149 declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
151 define float @fsqrt_s(float %a) nounwind strictfp {
152 ; CHECKIF-LABEL: fsqrt_s:
154 ; CHECKIF-NEXT: fsqrt.s fa0, fa0
157 ; RV32I-LABEL: fsqrt_s:
159 ; RV32I-NEXT: addi sp, sp, -16
160 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
161 ; RV32I-NEXT: call sqrtf
162 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
163 ; RV32I-NEXT: addi sp, sp, 16
166 ; RV64I-LABEL: fsqrt_s:
168 ; RV64I-NEXT: addi sp, sp, -16
169 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
170 ; RV64I-NEXT: call sqrtf
171 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
172 ; RV64I-NEXT: addi sp, sp, 16
175 ; CHECKIZFINX-LABEL: fsqrt_s:
176 ; CHECKIZFINX: # %bb.0:
177 ; CHECKIZFINX-NEXT: fsqrt.s a0, a0
178 ; CHECKIZFINX-NEXT: ret
179 %1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
182 declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
184 define float @fmin_s(float %a, float %b) nounwind strictfp {
185 ; RV32IF-LABEL: fmin_s:
187 ; RV32IF-NEXT: addi sp, sp, -16
188 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
189 ; RV32IF-NEXT: call fminf
190 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
191 ; RV32IF-NEXT: addi sp, sp, 16
194 ; RV64IF-LABEL: fmin_s:
196 ; RV64IF-NEXT: addi sp, sp, -16
197 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
198 ; RV64IF-NEXT: call fminf
199 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
200 ; RV64IF-NEXT: addi sp, sp, 16
203 ; RV32I-LABEL: fmin_s:
205 ; RV32I-NEXT: addi sp, sp, -16
206 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
207 ; RV32I-NEXT: call fminf
208 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
209 ; RV32I-NEXT: addi sp, sp, 16
212 ; RV64I-LABEL: fmin_s:
214 ; RV64I-NEXT: addi sp, sp, -16
215 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
216 ; RV64I-NEXT: call fminf
217 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
218 ; RV64I-NEXT: addi sp, sp, 16
221 ; RV32IZFINX-LABEL: fmin_s:
222 ; RV32IZFINX: # %bb.0:
223 ; RV32IZFINX-NEXT: addi sp, sp, -16
224 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
225 ; RV32IZFINX-NEXT: call fminf
226 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
227 ; RV32IZFINX-NEXT: addi sp, sp, 16
228 ; RV32IZFINX-NEXT: ret
230 ; RV64IZFINX-LABEL: fmin_s:
231 ; RV64IZFINX: # %bb.0:
232 ; RV64IZFINX-NEXT: addi sp, sp, -16
233 ; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
234 ; RV64IZFINX-NEXT: call fminf
235 ; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
236 ; RV64IZFINX-NEXT: addi sp, sp, 16
237 ; RV64IZFINX-NEXT: ret
238 %1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
241 declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) strictfp
243 define float @fmax_s(float %a, float %b) nounwind strictfp {
244 ; RV32IF-LABEL: fmax_s:
246 ; RV32IF-NEXT: addi sp, sp, -16
247 ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
248 ; RV32IF-NEXT: call fmaxf
249 ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
250 ; RV32IF-NEXT: addi sp, sp, 16
253 ; RV64IF-LABEL: fmax_s:
255 ; RV64IF-NEXT: addi sp, sp, -16
256 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
257 ; RV64IF-NEXT: call fmaxf
258 ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
259 ; RV64IF-NEXT: addi sp, sp, 16
262 ; RV32I-LABEL: fmax_s:
264 ; RV32I-NEXT: addi sp, sp, -16
265 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
266 ; RV32I-NEXT: call fmaxf
267 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
268 ; RV32I-NEXT: addi sp, sp, 16
271 ; RV64I-LABEL: fmax_s:
273 ; RV64I-NEXT: addi sp, sp, -16
274 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
275 ; RV64I-NEXT: call fmaxf
276 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
277 ; RV64I-NEXT: addi sp, sp, 16
280 ; RV32IZFINX-LABEL: fmax_s:
281 ; RV32IZFINX: # %bb.0:
282 ; RV32IZFINX-NEXT: addi sp, sp, -16
283 ; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
284 ; RV32IZFINX-NEXT: call fmaxf
285 ; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
286 ; RV32IZFINX-NEXT: addi sp, sp, 16
287 ; RV32IZFINX-NEXT: ret
289 ; RV64IZFINX-LABEL: fmax_s:
290 ; RV64IZFINX: # %bb.0:
291 ; RV64IZFINX-NEXT: addi sp, sp, -16
292 ; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
293 ; RV64IZFINX-NEXT: call fmaxf
294 ; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
295 ; RV64IZFINX-NEXT: addi sp, sp, 16
296 ; RV64IZFINX-NEXT: ret
297 %1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
300 declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) strictfp
302 define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
303 ; CHECKIF-LABEL: fmadd_s:
305 ; CHECKIF-NEXT: fmadd.s fa0, fa0, fa1, fa2
308 ; RV32I-LABEL: fmadd_s:
310 ; RV32I-NEXT: addi sp, sp, -16
311 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
312 ; RV32I-NEXT: call fmaf
313 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
314 ; RV32I-NEXT: addi sp, sp, 16
317 ; RV64I-LABEL: fmadd_s:
319 ; RV64I-NEXT: addi sp, sp, -16
320 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
321 ; RV64I-NEXT: call fmaf
322 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
323 ; RV64I-NEXT: addi sp, sp, 16
326 ; CHECKIZFINX-LABEL: fmadd_s:
327 ; CHECKIZFINX: # %bb.0:
328 ; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
329 ; CHECKIZFINX-NEXT: ret
330 %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
333 declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) strictfp
335 define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
336 ; CHECKIF-LABEL: fmsub_s:
338 ; CHECKIF-NEXT: fmv.w.x fa5, zero
339 ; CHECKIF-NEXT: fadd.s fa5, fa2, fa5
340 ; CHECKIF-NEXT: fmsub.s fa0, fa0, fa1, fa5
343 ; RV32I-LABEL: fmsub_s:
345 ; RV32I-NEXT: addi sp, sp, -16
346 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
347 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
348 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
349 ; RV32I-NEXT: mv s0, a1
350 ; RV32I-NEXT: mv s1, a0
351 ; RV32I-NEXT: mv a0, a2
352 ; RV32I-NEXT: li a1, 0
353 ; RV32I-NEXT: call __addsf3
354 ; RV32I-NEXT: lui a2, 524288
355 ; RV32I-NEXT: xor a2, a0, a2
356 ; RV32I-NEXT: mv a0, s1
357 ; RV32I-NEXT: mv a1, s0
358 ; RV32I-NEXT: call fmaf
359 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
360 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
361 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
362 ; RV32I-NEXT: addi sp, sp, 16
365 ; RV64I-LABEL: fmsub_s:
367 ; RV64I-NEXT: addi sp, sp, -32
368 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
369 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
370 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
371 ; RV64I-NEXT: mv s0, a1
372 ; RV64I-NEXT: mv s1, a0
373 ; RV64I-NEXT: mv a0, a2
374 ; RV64I-NEXT: li a1, 0
375 ; RV64I-NEXT: call __addsf3
376 ; RV64I-NEXT: lui a2, 524288
377 ; RV64I-NEXT: xor a2, a0, a2
378 ; RV64I-NEXT: mv a0, s1
379 ; RV64I-NEXT: mv a1, s0
380 ; RV64I-NEXT: call fmaf
381 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
382 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
383 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
384 ; RV64I-NEXT: addi sp, sp, 32
387 ; CHECKIZFINX-LABEL: fmsub_s:
388 ; CHECKIZFINX: # %bb.0:
389 ; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
390 ; CHECKIZFINX-NEXT: fmsub.s a0, a0, a1, a2
391 ; CHECKIZFINX-NEXT: ret
392 %c_ = fadd float 0.0, %c ; avoid negation using xor
393 %negc = fneg float %c_
394 %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
398 define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
399 ; CHECKIF-LABEL: fnmadd_s:
401 ; CHECKIF-NEXT: fmv.w.x fa5, zero
402 ; CHECKIF-NEXT: fadd.s fa4, fa0, fa5
403 ; CHECKIF-NEXT: fadd.s fa5, fa2, fa5
404 ; CHECKIF-NEXT: fnmadd.s fa0, fa4, fa1, fa5
407 ; RV32I-LABEL: fnmadd_s:
409 ; RV32I-NEXT: addi sp, sp, -16
410 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
411 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
412 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
413 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
414 ; RV32I-NEXT: mv s0, a2
415 ; RV32I-NEXT: mv s1, a1
416 ; RV32I-NEXT: li a1, 0
417 ; RV32I-NEXT: call __addsf3
418 ; RV32I-NEXT: mv s2, a0
419 ; RV32I-NEXT: mv a0, s0
420 ; RV32I-NEXT: li a1, 0
421 ; RV32I-NEXT: call __addsf3
422 ; RV32I-NEXT: lui a2, 524288
423 ; RV32I-NEXT: xor a1, s2, a2
424 ; RV32I-NEXT: xor a2, a0, a2
425 ; RV32I-NEXT: mv a0, a1
426 ; RV32I-NEXT: mv a1, s1
427 ; RV32I-NEXT: call fmaf
428 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
429 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
430 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
431 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
432 ; RV32I-NEXT: addi sp, sp, 16
435 ; RV64I-LABEL: fnmadd_s:
437 ; RV64I-NEXT: addi sp, sp, -32
438 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
439 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
440 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
441 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
442 ; RV64I-NEXT: mv s0, a2
443 ; RV64I-NEXT: mv s1, a1
444 ; RV64I-NEXT: li a1, 0
445 ; RV64I-NEXT: call __addsf3
446 ; RV64I-NEXT: mv s2, a0
447 ; RV64I-NEXT: mv a0, s0
448 ; RV64I-NEXT: li a1, 0
449 ; RV64I-NEXT: call __addsf3
450 ; RV64I-NEXT: lui a2, 524288
451 ; RV64I-NEXT: xor a1, s2, a2
452 ; RV64I-NEXT: xor a2, a0, a2
453 ; RV64I-NEXT: mv a0, a1
454 ; RV64I-NEXT: mv a1, s1
455 ; RV64I-NEXT: call fmaf
456 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
457 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
458 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
459 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
460 ; RV64I-NEXT: addi sp, sp, 32
463 ; CHECKIZFINX-LABEL: fnmadd_s:
464 ; CHECKIZFINX: # %bb.0:
465 ; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
466 ; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
467 ; CHECKIZFINX-NEXT: fnmadd.s a0, a0, a1, a2
468 ; CHECKIZFINX-NEXT: ret
469 %a_ = fadd float 0.0, %a
470 %c_ = fadd float 0.0, %c
471 %nega = fneg float %a_
472 %negc = fneg float %c_
473 %1 = call float @llvm.experimental.constrained.fma.f32(float %nega, float %b, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
477 define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
478 ; CHECKIF-LABEL: fnmadd_s_2:
480 ; CHECKIF-NEXT: fmv.w.x fa5, zero
481 ; CHECKIF-NEXT: fadd.s fa4, fa1, fa5
482 ; CHECKIF-NEXT: fadd.s fa5, fa2, fa5
483 ; CHECKIF-NEXT: fnmadd.s fa0, fa4, fa0, fa5
486 ; RV32I-LABEL: fnmadd_s_2:
488 ; RV32I-NEXT: addi sp, sp, -16
489 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
490 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
491 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
492 ; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
493 ; RV32I-NEXT: mv s0, a2
494 ; RV32I-NEXT: mv s1, a0
495 ; RV32I-NEXT: mv a0, a1
496 ; RV32I-NEXT: li a1, 0
497 ; RV32I-NEXT: call __addsf3
498 ; RV32I-NEXT: mv s2, a0
499 ; RV32I-NEXT: mv a0, s0
500 ; RV32I-NEXT: li a1, 0
501 ; RV32I-NEXT: call __addsf3
502 ; RV32I-NEXT: lui a2, 524288
503 ; RV32I-NEXT: xor a1, s2, a2
504 ; RV32I-NEXT: xor a2, a0, a2
505 ; RV32I-NEXT: mv a0, s1
506 ; RV32I-NEXT: call fmaf
507 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
508 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
509 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
510 ; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
511 ; RV32I-NEXT: addi sp, sp, 16
514 ; RV64I-LABEL: fnmadd_s_2:
516 ; RV64I-NEXT: addi sp, sp, -32
517 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
518 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
519 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
520 ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
521 ; RV64I-NEXT: mv s0, a2
522 ; RV64I-NEXT: mv s1, a0
523 ; RV64I-NEXT: mv a0, a1
524 ; RV64I-NEXT: li a1, 0
525 ; RV64I-NEXT: call __addsf3
526 ; RV64I-NEXT: mv s2, a0
527 ; RV64I-NEXT: mv a0, s0
528 ; RV64I-NEXT: li a1, 0
529 ; RV64I-NEXT: call __addsf3
530 ; RV64I-NEXT: lui a2, 524288
531 ; RV64I-NEXT: xor a1, s2, a2
532 ; RV64I-NEXT: xor a2, a0, a2
533 ; RV64I-NEXT: mv a0, s1
534 ; RV64I-NEXT: call fmaf
535 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
536 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
537 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
538 ; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
539 ; RV64I-NEXT: addi sp, sp, 32
542 ; CHECKIZFINX-LABEL: fnmadd_s_2:
543 ; CHECKIZFINX: # %bb.0:
544 ; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
545 ; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
546 ; CHECKIZFINX-NEXT: fnmadd.s a0, a1, a0, a2
547 ; CHECKIZFINX-NEXT: ret
548 %b_ = fadd float 0.0, %b
549 %c_ = fadd float 0.0, %c
550 %negb = fneg float %b_
551 %negc = fneg float %c_
552 %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %negb, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
556 define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
557 ; CHECKIF-LABEL: fnmsub_s:
559 ; CHECKIF-NEXT: fmv.w.x fa5, zero
560 ; CHECKIF-NEXT: fadd.s fa5, fa0, fa5
561 ; CHECKIF-NEXT: fnmsub.s fa0, fa5, fa1, fa2
564 ; RV32I-LABEL: fnmsub_s:
566 ; RV32I-NEXT: addi sp, sp, -16
567 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
568 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
569 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
570 ; RV32I-NEXT: mv s0, a2
571 ; RV32I-NEXT: mv s1, a1
572 ; RV32I-NEXT: li a1, 0
573 ; RV32I-NEXT: call __addsf3
574 ; RV32I-NEXT: lui a1, 524288
575 ; RV32I-NEXT: xor a0, a0, a1
576 ; RV32I-NEXT: mv a1, s1
577 ; RV32I-NEXT: mv a2, s0
578 ; RV32I-NEXT: call fmaf
579 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
580 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
581 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
582 ; RV32I-NEXT: addi sp, sp, 16
585 ; RV64I-LABEL: fnmsub_s:
587 ; RV64I-NEXT: addi sp, sp, -32
588 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
589 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
590 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
591 ; RV64I-NEXT: mv s0, a2
592 ; RV64I-NEXT: mv s1, a1
593 ; RV64I-NEXT: li a1, 0
594 ; RV64I-NEXT: call __addsf3
595 ; RV64I-NEXT: lui a1, 524288
596 ; RV64I-NEXT: xor a0, a0, a1
597 ; RV64I-NEXT: mv a1, s1
598 ; RV64I-NEXT: mv a2, s0
599 ; RV64I-NEXT: call fmaf
600 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
601 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
602 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
603 ; RV64I-NEXT: addi sp, sp, 32
606 ; CHECKIZFINX-LABEL: fnmsub_s:
607 ; CHECKIZFINX: # %bb.0:
608 ; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
609 ; CHECKIZFINX-NEXT: fnmsub.s a0, a0, a1, a2
610 ; CHECKIZFINX-NEXT: ret
611 %a_ = fadd float 0.0, %a
612 %nega = fneg float %a_
613 %1 = call float @llvm.experimental.constrained.fma.f32(float %nega, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
617 define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
618 ; CHECKIF-LABEL: fnmsub_s_2:
620 ; CHECKIF-NEXT: fmv.w.x fa5, zero
621 ; CHECKIF-NEXT: fadd.s fa5, fa1, fa5
622 ; CHECKIF-NEXT: fnmsub.s fa0, fa5, fa0, fa2
625 ; RV32I-LABEL: fnmsub_s_2:
627 ; RV32I-NEXT: addi sp, sp, -16
628 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
629 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
630 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
631 ; RV32I-NEXT: mv s0, a2
632 ; RV32I-NEXT: mv s1, a0
633 ; RV32I-NEXT: mv a0, a1
634 ; RV32I-NEXT: li a1, 0
635 ; RV32I-NEXT: call __addsf3
636 ; RV32I-NEXT: lui a1, 524288
637 ; RV32I-NEXT: xor a1, a0, a1
638 ; RV32I-NEXT: mv a0, s1
639 ; RV32I-NEXT: mv a2, s0
640 ; RV32I-NEXT: call fmaf
641 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
642 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
643 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
644 ; RV32I-NEXT: addi sp, sp, 16
647 ; RV64I-LABEL: fnmsub_s_2:
649 ; RV64I-NEXT: addi sp, sp, -32
650 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
651 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
652 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
653 ; RV64I-NEXT: mv s0, a2
654 ; RV64I-NEXT: mv s1, a0
655 ; RV64I-NEXT: mv a0, a1
656 ; RV64I-NEXT: li a1, 0
657 ; RV64I-NEXT: call __addsf3
658 ; RV64I-NEXT: lui a1, 524288
659 ; RV64I-NEXT: xor a1, a0, a1
660 ; RV64I-NEXT: mv a0, s1
661 ; RV64I-NEXT: mv a2, s0
662 ; RV64I-NEXT: call fmaf
663 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
664 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
665 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
666 ; RV64I-NEXT: addi sp, sp, 32
669 ; CHECKIZFINX-LABEL: fnmsub_s_2:
670 ; CHECKIZFINX: # %bb.0:
671 ; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
672 ; CHECKIZFINX-NEXT: fnmsub.s a0, a1, a0, a2
673 ; CHECKIZFINX-NEXT: ret
674 %b_ = fadd float 0.0, %b
675 %negb = fneg float %b_
676 %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %negb, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp