1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zfh -verify-machineinstrs \
3 ; RUN: -target-abi ilp32f -disable-strictnode-mutation < %s \
4 ; RUN: | FileCheck -check-prefixes=CHECKIZFH,RV32IZFH %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \
6 ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s \
7 ; RUN: | FileCheck -check-prefixes=CHECKIZFH,RV64IZFH %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh -verify-machineinstrs \
9 ; RUN: -target-abi ilp32d -disable-strictnode-mutation < %s \
10 ; RUN: | FileCheck -check-prefix=RV32IDZFH %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh -verify-machineinstrs \
12 ; RUN: -target-abi lp64d -disable-strictnode-mutation < %s \
13 ; RUN: | FileCheck -check-prefix=RV64IDZFH %s
15 ; NOTE: The rounding mode metadata does not effect which instruction is
16 ; selected. Dynamic rounding mode is always used for operations that
17 ; support rounding mode.
19 define i16 @fcvt_si_h(half %a) nounwind strictfp {
20 ; RV32IZFH-LABEL: fcvt_si_h:
22 ; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rtz
25 ; RV64IZFH-LABEL: fcvt_si_h:
27 ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz
30 ; RV32IDZFH-LABEL: fcvt_si_h:
32 ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz
35 ; RV64IDZFH-LABEL: fcvt_si_h:
37 ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz
39 %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
42 declare i16 @llvm.experimental.constrained.fptosi.i16.f16(half, metadata)
44 define i16 @fcvt_ui_h(half %a) nounwind strictfp {
45 ; RV32IZFH-LABEL: fcvt_ui_h:
47 ; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rtz
50 ; RV64IZFH-LABEL: fcvt_ui_h:
52 ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz
55 ; RV32IDZFH-LABEL: fcvt_ui_h:
57 ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz
60 ; RV64IDZFH-LABEL: fcvt_ui_h:
62 ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz
64 %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
67 declare i16 @llvm.experimental.constrained.fptoui.i16.f16(half, metadata)
69 define i32 @fcvt_w_h(half %a) nounwind strictfp {
70 ; CHECKIZFH-LABEL: fcvt_w_h:
72 ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rtz
75 ; RV32IDZFH-LABEL: fcvt_w_h:
77 ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz
80 ; RV64IDZFH-LABEL: fcvt_w_h:
82 ; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rtz
84 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
87 declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
89 define i32 @fcvt_wu_h(half %a) nounwind strictfp {
90 ; CHECKIZFH-LABEL: fcvt_wu_h:
92 ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rtz
95 ; RV32IDZFH-LABEL: fcvt_wu_h:
97 ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz
100 ; RV64IDZFH-LABEL: fcvt_wu_h:
101 ; RV64IDZFH: # %bb.0:
102 ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz
103 ; RV64IDZFH-NEXT: ret
104 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
107 declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
109 ; Test where the fptoui has multiple uses, one of which causes a sext to be
111 ; FIXME: We should not have an fcvt.wu.h and an fcvt.lu.h.
112 define i32 @fcvt_wu_h_multiple_use(half %x, i32* %y) {
113 ; CHECKIZFH-LABEL: fcvt_wu_h_multiple_use:
114 ; CHECKIZFH: # %bb.0:
115 ; CHECKIZFH-NEXT: fcvt.wu.h a1, fa0, rtz
116 ; CHECKIZFH-NEXT: li a0, 1
117 ; CHECKIZFH-NEXT: beqz a1, .LBB4_2
118 ; CHECKIZFH-NEXT: # %bb.1:
119 ; CHECKIZFH-NEXT: mv a0, a1
120 ; CHECKIZFH-NEXT: .LBB4_2:
121 ; CHECKIZFH-NEXT: ret
123 ; RV32IDZFH-LABEL: fcvt_wu_h_multiple_use:
124 ; RV32IDZFH: # %bb.0:
125 ; RV32IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz
126 ; RV32IDZFH-NEXT: li a0, 1
127 ; RV32IDZFH-NEXT: beqz a1, .LBB4_2
128 ; RV32IDZFH-NEXT: # %bb.1:
129 ; RV32IDZFH-NEXT: mv a0, a1
130 ; RV32IDZFH-NEXT: .LBB4_2:
131 ; RV32IDZFH-NEXT: ret
133 ; RV64IDZFH-LABEL: fcvt_wu_h_multiple_use:
134 ; RV64IDZFH: # %bb.0:
135 ; RV64IDZFH-NEXT: fcvt.wu.h a1, fa0, rtz
136 ; RV64IDZFH-NEXT: li a0, 1
137 ; RV64IDZFH-NEXT: beqz a1, .LBB4_2
138 ; RV64IDZFH-NEXT: # %bb.1:
139 ; RV64IDZFH-NEXT: mv a0, a1
140 ; RV64IDZFH-NEXT: .LBB4_2:
141 ; RV64IDZFH-NEXT: ret
142 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") strictfp
143 %b = icmp eq i32 %a, 0
144 %c = select i1 %b, i32 1, i32 %a
148 define i64 @fcvt_l_h(half %a) nounwind strictfp {
149 ; RV32IZFH-LABEL: fcvt_l_h:
151 ; RV32IZFH-NEXT: addi sp, sp, -16
152 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
153 ; RV32IZFH-NEXT: call __fixhfdi@plt
154 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
155 ; RV32IZFH-NEXT: addi sp, sp, 16
158 ; RV64IZFH-LABEL: fcvt_l_h:
160 ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz
163 ; RV32IDZFH-LABEL: fcvt_l_h:
164 ; RV32IDZFH: # %bb.0:
165 ; RV32IDZFH-NEXT: addi sp, sp, -16
166 ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
167 ; RV32IDZFH-NEXT: call __fixhfdi@plt
168 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
169 ; RV32IDZFH-NEXT: addi sp, sp, 16
170 ; RV32IDZFH-NEXT: ret
172 ; RV64IDZFH-LABEL: fcvt_l_h:
173 ; RV64IDZFH: # %bb.0:
174 ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz
175 ; RV64IDZFH-NEXT: ret
176 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
179 declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata)
181 define i64 @fcvt_lu_h(half %a) nounwind strictfp {
182 ; RV32IZFH-LABEL: fcvt_lu_h:
184 ; RV32IZFH-NEXT: addi sp, sp, -16
185 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
186 ; RV32IZFH-NEXT: call __fixunshfdi@plt
187 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
188 ; RV32IZFH-NEXT: addi sp, sp, 16
191 ; RV64IZFH-LABEL: fcvt_lu_h:
193 ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz
196 ; RV32IDZFH-LABEL: fcvt_lu_h:
197 ; RV32IDZFH: # %bb.0:
198 ; RV32IDZFH-NEXT: addi sp, sp, -16
199 ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
200 ; RV32IDZFH-NEXT: call __fixunshfdi@plt
201 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
202 ; RV32IDZFH-NEXT: addi sp, sp, 16
203 ; RV32IDZFH-NEXT: ret
205 ; RV64IDZFH-LABEL: fcvt_lu_h:
206 ; RV64IDZFH: # %bb.0:
207 ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz
208 ; RV64IDZFH-NEXT: ret
209 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
212 declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata)
214 define half @fcvt_h_si(i16 %a) nounwind strictfp {
215 ; RV32IZFH-LABEL: fcvt_h_si:
217 ; RV32IZFH-NEXT: slli a0, a0, 16
218 ; RV32IZFH-NEXT: srai a0, a0, 16
219 ; RV32IZFH-NEXT: fcvt.h.w fa0, a0
222 ; RV64IZFH-LABEL: fcvt_h_si:
224 ; RV64IZFH-NEXT: slli a0, a0, 48
225 ; RV64IZFH-NEXT: srai a0, a0, 48
226 ; RV64IZFH-NEXT: fcvt.h.w fa0, a0
229 ; RV32IDZFH-LABEL: fcvt_h_si:
230 ; RV32IDZFH: # %bb.0:
231 ; RV32IDZFH-NEXT: slli a0, a0, 16
232 ; RV32IDZFH-NEXT: srai a0, a0, 16
233 ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0
234 ; RV32IDZFH-NEXT: ret
236 ; RV64IDZFH-LABEL: fcvt_h_si:
237 ; RV64IDZFH: # %bb.0:
238 ; RV64IDZFH-NEXT: slli a0, a0, 48
239 ; RV64IDZFH-NEXT: srai a0, a0, 48
240 ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
241 ; RV64IDZFH-NEXT: ret
242 %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
245 declare half @llvm.experimental.constrained.sitofp.f16.i16(i16, metadata, metadata)
247 define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp {
248 ; CHECKIZFH-LABEL: fcvt_h_si_signext:
249 ; CHECKIZFH: # %bb.0:
250 ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0
251 ; CHECKIZFH-NEXT: ret
253 ; RV32IDZFH-LABEL: fcvt_h_si_signext:
254 ; RV32IDZFH: # %bb.0:
255 ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0
256 ; RV32IDZFH-NEXT: ret
258 ; RV64IDZFH-LABEL: fcvt_h_si_signext:
259 ; RV64IDZFH: # %bb.0:
260 ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
261 ; RV64IDZFH-NEXT: ret
262 %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
266 define half @fcvt_h_ui(i16 %a) nounwind strictfp {
267 ; RV32IZFH-LABEL: fcvt_h_ui:
269 ; RV32IZFH-NEXT: slli a0, a0, 16
270 ; RV32IZFH-NEXT: srli a0, a0, 16
271 ; RV32IZFH-NEXT: fcvt.h.wu fa0, a0
274 ; RV64IZFH-LABEL: fcvt_h_ui:
276 ; RV64IZFH-NEXT: slli a0, a0, 48
277 ; RV64IZFH-NEXT: srli a0, a0, 48
278 ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0
281 ; RV32IDZFH-LABEL: fcvt_h_ui:
282 ; RV32IDZFH: # %bb.0:
283 ; RV32IDZFH-NEXT: slli a0, a0, 16
284 ; RV32IDZFH-NEXT: srli a0, a0, 16
285 ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0
286 ; RV32IDZFH-NEXT: ret
288 ; RV64IDZFH-LABEL: fcvt_h_ui:
289 ; RV64IDZFH: # %bb.0:
290 ; RV64IDZFH-NEXT: slli a0, a0, 48
291 ; RV64IDZFH-NEXT: srli a0, a0, 48
292 ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
293 ; RV64IDZFH-NEXT: ret
294 %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
297 declare half @llvm.experimental.constrained.uitofp.f16.i16(i16, metadata, metadata)
299 define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp {
300 ; CHECKIZFH-LABEL: fcvt_h_ui_zeroext:
301 ; CHECKIZFH: # %bb.0:
302 ; CHECKIZFH-NEXT: fcvt.h.wu fa0, a0
303 ; CHECKIZFH-NEXT: ret
305 ; RV32IDZFH-LABEL: fcvt_h_ui_zeroext:
306 ; RV32IDZFH: # %bb.0:
307 ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0
308 ; RV32IDZFH-NEXT: ret
310 ; RV64IDZFH-LABEL: fcvt_h_ui_zeroext:
311 ; RV64IDZFH: # %bb.0:
312 ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
313 ; RV64IDZFH-NEXT: ret
314 %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
318 define half @fcvt_h_w(i32 %a) nounwind strictfp {
319 ; CHECKIZFH-LABEL: fcvt_h_w:
320 ; CHECKIZFH: # %bb.0:
321 ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0
322 ; CHECKIZFH-NEXT: ret
324 ; RV32IDZFH-LABEL: fcvt_h_w:
325 ; RV32IDZFH: # %bb.0:
326 ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0
327 ; RV32IDZFH-NEXT: ret
329 ; RV64IDZFH-LABEL: fcvt_h_w:
330 ; RV64IDZFH: # %bb.0:
331 ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
332 ; RV64IDZFH-NEXT: ret
333 %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
336 declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
338 define half @fcvt_h_w_load(i32* %p) nounwind strictfp {
339 ; CHECKIZFH-LABEL: fcvt_h_w_load:
340 ; CHECKIZFH: # %bb.0:
341 ; CHECKIZFH-NEXT: lw a0, 0(a0)
342 ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0
343 ; CHECKIZFH-NEXT: ret
345 ; RV32IDZFH-LABEL: fcvt_h_w_load:
346 ; RV32IDZFH: # %bb.0:
347 ; RV32IDZFH-NEXT: lw a0, 0(a0)
348 ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0
349 ; RV32IDZFH-NEXT: ret
351 ; RV64IDZFH-LABEL: fcvt_h_w_load:
352 ; RV64IDZFH: # %bb.0:
353 ; RV64IDZFH-NEXT: lw a0, 0(a0)
354 ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
355 ; RV64IDZFH-NEXT: ret
356 %a = load i32, i32* %p
357 %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
361 define half @fcvt_h_wu(i32 %a) nounwind strictfp {
362 ; CHECKIZFH-LABEL: fcvt_h_wu:
363 ; CHECKIZFH: # %bb.0:
364 ; CHECKIZFH-NEXT: fcvt.h.wu fa0, a0
365 ; CHECKIZFH-NEXT: ret
367 ; RV32IDZFH-LABEL: fcvt_h_wu:
368 ; RV32IDZFH: # %bb.0:
369 ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0
370 ; RV32IDZFH-NEXT: ret
372 ; RV64IDZFH-LABEL: fcvt_h_wu:
373 ; RV64IDZFH: # %bb.0:
374 ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
375 ; RV64IDZFH-NEXT: ret
376 %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
379 declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
381 define half @fcvt_h_wu_load(i32* %p) nounwind strictfp {
382 ; RV32IZFH-LABEL: fcvt_h_wu_load:
384 ; RV32IZFH-NEXT: lw a0, 0(a0)
385 ; RV32IZFH-NEXT: fcvt.h.wu fa0, a0
388 ; RV64IZFH-LABEL: fcvt_h_wu_load:
390 ; RV64IZFH-NEXT: lwu a0, 0(a0)
391 ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0
394 ; RV32IDZFH-LABEL: fcvt_h_wu_load:
395 ; RV32IDZFH: # %bb.0:
396 ; RV32IDZFH-NEXT: lw a0, 0(a0)
397 ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0
398 ; RV32IDZFH-NEXT: ret
400 ; RV64IDZFH-LABEL: fcvt_h_wu_load:
401 ; RV64IDZFH: # %bb.0:
402 ; RV64IDZFH-NEXT: lwu a0, 0(a0)
403 ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
404 ; RV64IDZFH-NEXT: ret
405 %a = load i32, i32* %p
406 %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
410 define half @fcvt_h_l(i64 %a) nounwind strictfp {
411 ; RV32IZFH-LABEL: fcvt_h_l:
413 ; RV32IZFH-NEXT: addi sp, sp, -16
414 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
415 ; RV32IZFH-NEXT: call __floatdihf@plt
416 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
417 ; RV32IZFH-NEXT: addi sp, sp, 16
420 ; RV64IZFH-LABEL: fcvt_h_l:
422 ; RV64IZFH-NEXT: fcvt.h.l fa0, a0
425 ; RV32IDZFH-LABEL: fcvt_h_l:
426 ; RV32IDZFH: # %bb.0:
427 ; RV32IDZFH-NEXT: addi sp, sp, -16
428 ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
429 ; RV32IDZFH-NEXT: call __floatdihf@plt
430 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
431 ; RV32IDZFH-NEXT: addi sp, sp, 16
432 ; RV32IDZFH-NEXT: ret
434 ; RV64IDZFH-LABEL: fcvt_h_l:
435 ; RV64IDZFH: # %bb.0:
436 ; RV64IDZFH-NEXT: fcvt.h.l fa0, a0
437 ; RV64IDZFH-NEXT: ret
438 %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
441 declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata)
443 define half @fcvt_h_lu(i64 %a) nounwind strictfp {
444 ; RV32IZFH-LABEL: fcvt_h_lu:
446 ; RV32IZFH-NEXT: addi sp, sp, -16
447 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
448 ; RV32IZFH-NEXT: call __floatundihf@plt
449 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
450 ; RV32IZFH-NEXT: addi sp, sp, 16
453 ; RV64IZFH-LABEL: fcvt_h_lu:
455 ; RV64IZFH-NEXT: fcvt.h.lu fa0, a0
458 ; RV32IDZFH-LABEL: fcvt_h_lu:
459 ; RV32IDZFH: # %bb.0:
460 ; RV32IDZFH-NEXT: addi sp, sp, -16
461 ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
462 ; RV32IDZFH-NEXT: call __floatundihf@plt
463 ; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
464 ; RV32IDZFH-NEXT: addi sp, sp, 16
465 ; RV32IDZFH-NEXT: ret
467 ; RV64IDZFH-LABEL: fcvt_h_lu:
468 ; RV64IDZFH: # %bb.0:
469 ; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0
470 ; RV64IDZFH-NEXT: ret
471 %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
474 declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata)
476 define half @fcvt_h_s(float %a) nounwind strictfp {
477 ; CHECKIZFH-LABEL: fcvt_h_s:
478 ; CHECKIZFH: # %bb.0:
479 ; CHECKIZFH-NEXT: fcvt.h.s fa0, fa0
480 ; CHECKIZFH-NEXT: ret
482 ; RV32IDZFH-LABEL: fcvt_h_s:
483 ; RV32IDZFH: # %bb.0:
484 ; RV32IDZFH-NEXT: fcvt.h.s fa0, fa0
485 ; RV32IDZFH-NEXT: ret
487 ; RV64IDZFH-LABEL: fcvt_h_s:
488 ; RV64IDZFH: # %bb.0:
489 ; RV64IDZFH-NEXT: fcvt.h.s fa0, fa0
490 ; RV64IDZFH-NEXT: ret
491 %1 = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
494 declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata)
496 define float @fcvt_s_h(half %a) nounwind strictfp {
497 ; CHECKIZFH-LABEL: fcvt_s_h:
498 ; CHECKIZFH: # %bb.0:
499 ; CHECKIZFH-NEXT: fcvt.s.h fa0, fa0
500 ; CHECKIZFH-NEXT: ret
502 ; RV32IDZFH-LABEL: fcvt_s_h:
503 ; RV32IDZFH: # %bb.0:
504 ; RV32IDZFH-NEXT: fcvt.s.h fa0, fa0
505 ; RV32IDZFH-NEXT: ret
507 ; RV64IDZFH-LABEL: fcvt_s_h:
508 ; RV64IDZFH: # %bb.0:
509 ; RV64IDZFH-NEXT: fcvt.s.h fa0, fa0
510 ; RV64IDZFH-NEXT: ret
511 %1 = call float @llvm.experimental.constrained.fpext.f32.f16(half %a, metadata !"fpexcept.strict")
514 declare float @llvm.experimental.constrained.fpext.f32.f16(half, metadata)
516 define half @fcvt_h_d(double %a) nounwind strictfp {
517 ; RV32IZFH-LABEL: fcvt_h_d:
519 ; RV32IZFH-NEXT: addi sp, sp, -16
520 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
521 ; RV32IZFH-NEXT: call __truncdfhf2@plt
522 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
523 ; RV32IZFH-NEXT: addi sp, sp, 16
526 ; RV64IZFH-LABEL: fcvt_h_d:
528 ; RV64IZFH-NEXT: addi sp, sp, -16
529 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
530 ; RV64IZFH-NEXT: call __truncdfhf2@plt
531 ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
532 ; RV64IZFH-NEXT: addi sp, sp, 16
535 ; RV32IDZFH-LABEL: fcvt_h_d:
536 ; RV32IDZFH: # %bb.0:
537 ; RV32IDZFH-NEXT: fcvt.h.d fa0, fa0
538 ; RV32IDZFH-NEXT: ret
540 ; RV64IDZFH-LABEL: fcvt_h_d:
541 ; RV64IDZFH: # %bb.0:
542 ; RV64IDZFH-NEXT: fcvt.h.d fa0, fa0
543 ; RV64IDZFH-NEXT: ret
544 %1 = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
547 declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata)
549 define double @fcvt_d_h(half %a) nounwind strictfp {
550 ; RV32IZFH-LABEL: fcvt_d_h:
552 ; RV32IZFH-NEXT: addi sp, sp, -16
553 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
554 ; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
555 ; RV32IZFH-NEXT: call __extendsfdf2@plt
556 ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
557 ; RV32IZFH-NEXT: addi sp, sp, 16
560 ; RV64IZFH-LABEL: fcvt_d_h:
562 ; RV64IZFH-NEXT: addi sp, sp, -16
563 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
564 ; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
565 ; RV64IZFH-NEXT: call __extendsfdf2@plt
566 ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
567 ; RV64IZFH-NEXT: addi sp, sp, 16
570 ; RV32IDZFH-LABEL: fcvt_d_h:
571 ; RV32IDZFH: # %bb.0:
572 ; RV32IDZFH-NEXT: fcvt.d.h fa0, fa0
573 ; RV32IDZFH-NEXT: ret
575 ; RV64IDZFH-LABEL: fcvt_d_h:
576 ; RV64IDZFH: # %bb.0:
577 ; RV64IDZFH-NEXT: fcvt.d.h fa0, fa0
578 ; RV64IDZFH-NEXT: ret
579 %1 = call double @llvm.experimental.constrained.fpext.f64.f16(half %a, metadata !"fpexcept.strict")
582 declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata)
584 ; Make sure we select W version of addi on RV64.
585 define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, half* %1) {
586 ; RV32IZFH-LABEL: fcvt_h_w_demanded_bits:
588 ; RV32IZFH-NEXT: addi a0, a0, 1
589 ; RV32IZFH-NEXT: fcvt.h.w ft0, a0
590 ; RV32IZFH-NEXT: fsh ft0, 0(a1)
593 ; RV64IZFH-LABEL: fcvt_h_w_demanded_bits:
595 ; RV64IZFH-NEXT: addiw a0, a0, 1
596 ; RV64IZFH-NEXT: fcvt.h.w ft0, a0
597 ; RV64IZFH-NEXT: fsh ft0, 0(a1)
600 ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits:
601 ; RV32IDZFH: # %bb.0:
602 ; RV32IDZFH-NEXT: addi a0, a0, 1
603 ; RV32IDZFH-NEXT: fcvt.h.w ft0, a0
604 ; RV32IDZFH-NEXT: fsh ft0, 0(a1)
605 ; RV32IDZFH-NEXT: ret
607 ; RV64IDZFH-LABEL: fcvt_h_w_demanded_bits:
608 ; RV64IDZFH: # %bb.0:
609 ; RV64IDZFH-NEXT: addiw a0, a0, 1
610 ; RV64IDZFH-NEXT: fcvt.h.w ft0, a0
611 ; RV64IDZFH-NEXT: fsh ft0, 0(a1)
612 ; RV64IDZFH-NEXT: ret
614 %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
615 store half %4, half* %1, align 2
619 ; Make sure we select W version of addi on RV64.
620 define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, half* %1) {
621 ; RV32IZFH-LABEL: fcvt_h_wu_demanded_bits:
623 ; RV32IZFH-NEXT: addi a0, a0, 1
624 ; RV32IZFH-NEXT: fcvt.h.wu ft0, a0
625 ; RV32IZFH-NEXT: fsh ft0, 0(a1)
628 ; RV64IZFH-LABEL: fcvt_h_wu_demanded_bits:
630 ; RV64IZFH-NEXT: addiw a0, a0, 1
631 ; RV64IZFH-NEXT: fcvt.h.wu ft0, a0
632 ; RV64IZFH-NEXT: fsh ft0, 0(a1)
635 ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits:
636 ; RV32IDZFH: # %bb.0:
637 ; RV32IDZFH-NEXT: addi a0, a0, 1
638 ; RV32IDZFH-NEXT: fcvt.h.wu ft0, a0
639 ; RV32IDZFH-NEXT: fsh ft0, 0(a1)
640 ; RV32IDZFH-NEXT: ret
642 ; RV64IDZFH-LABEL: fcvt_h_wu_demanded_bits:
643 ; RV64IDZFH: # %bb.0:
644 ; RV64IDZFH-NEXT: addiw a0, a0, 1
645 ; RV64IDZFH-NEXT: fcvt.h.wu ft0, a0
646 ; RV64IDZFH-NEXT: fsh ft0, 0(a1)
647 ; RV64IDZFH-NEXT: ret
649 %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
650 store half %4, half* %1, align 2