1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -disable-strictnode-mutation -target-abi=ilp32d \
4 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
5 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6 ; RUN: -disable-strictnode-mutation -target-abi=lp64d \
7 ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
8 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9 ; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
10 ; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s
11 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12 ; RUN: -disable-strictnode-mutation -target-abi=lp64 \
13 ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s
14 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17 ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
19 ; NOTE: The rounding mode metadata does not effect which instruction is
20 ; selected. Dynamic rounding mode is always used for operations that
21 ; support rounding mode.
23 define float @fcvt_s_d(double %a) nounwind strictfp {
24 ; CHECKIFD-LABEL: fcvt_s_d:
26 ; CHECKIFD-NEXT: fcvt.s.d fa0, fa0
29 ; RV32IZFINXZDINX-LABEL: fcvt_s_d:
30 ; RV32IZFINXZDINX: # %bb.0:
31 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
32 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
33 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
34 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
35 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
36 ; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0
37 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
38 ; RV32IZFINXZDINX-NEXT: ret
40 ; RV64IZFINXZDINX-LABEL: fcvt_s_d:
41 ; RV64IZFINXZDINX: # %bb.0:
42 ; RV64IZFINXZDINX-NEXT: fcvt.s.d a0, a0
43 ; RV64IZFINXZDINX-NEXT: ret
45 ; RV32I-LABEL: fcvt_s_d:
47 ; RV32I-NEXT: addi sp, sp, -16
48 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
49 ; RV32I-NEXT: call __truncdfsf2
50 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
51 ; RV32I-NEXT: addi sp, sp, 16
54 ; RV64I-LABEL: fcvt_s_d:
56 ; RV64I-NEXT: addi sp, sp, -16
57 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
58 ; RV64I-NEXT: call __truncdfsf2
59 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
60 ; RV64I-NEXT: addi sp, sp, 16
62 %1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
65 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
67 define double @fcvt_d_s(float %a) nounwind strictfp {
68 ; CHECKIFD-LABEL: fcvt_d_s:
70 ; CHECKIFD-NEXT: fcvt.d.s fa0, fa0
73 ; RV32IZFINXZDINX-LABEL: fcvt_d_s:
74 ; RV32IZFINXZDINX: # %bb.0:
75 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
76 ; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0
77 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
78 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
79 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
80 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
81 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
82 ; RV32IZFINXZDINX-NEXT: ret
84 ; RV64IZFINXZDINX-LABEL: fcvt_d_s:
85 ; RV64IZFINXZDINX: # %bb.0:
86 ; RV64IZFINXZDINX-NEXT: fcvt.d.s a0, a0
87 ; RV64IZFINXZDINX-NEXT: ret
89 ; RV32I-LABEL: fcvt_d_s:
91 ; RV32I-NEXT: addi sp, sp, -16
92 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
93 ; RV32I-NEXT: call __extendsfdf2
94 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
95 ; RV32I-NEXT: addi sp, sp, 16
98 ; RV64I-LABEL: fcvt_d_s:
100 ; RV64I-NEXT: addi sp, sp, -16
101 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
102 ; RV64I-NEXT: call __extendsfdf2
103 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
104 ; RV64I-NEXT: addi sp, sp, 16
106 %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.strict")
109 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
111 define i32 @fcvt_w_d(double %a) nounwind strictfp {
112 ; CHECKIFD-LABEL: fcvt_w_d:
114 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz
117 ; RV32IZFINXZDINX-LABEL: fcvt_w_d:
118 ; RV32IZFINXZDINX: # %bb.0:
119 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
120 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
121 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
122 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
123 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
124 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
125 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
126 ; RV32IZFINXZDINX-NEXT: ret
128 ; RV64IZFINXZDINX-LABEL: fcvt_w_d:
129 ; RV64IZFINXZDINX: # %bb.0:
130 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz
131 ; RV64IZFINXZDINX-NEXT: ret
133 ; RV32I-LABEL: fcvt_w_d:
135 ; RV32I-NEXT: addi sp, sp, -16
136 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
137 ; RV32I-NEXT: call __fixdfsi
138 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
139 ; RV32I-NEXT: addi sp, sp, 16
142 ; RV64I-LABEL: fcvt_w_d:
144 ; RV64I-NEXT: addi sp, sp, -16
145 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
146 ; RV64I-NEXT: call __fixdfsi
147 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
148 ; RV64I-NEXT: addi sp, sp, 16
150 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
153 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
155 ; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case
156 ; because fptosi will produce poison if the result doesn't fit into an i32.
157 define i32 @fcvt_wu_d(double %a) nounwind strictfp {
158 ; CHECKIFD-LABEL: fcvt_wu_d:
160 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
163 ; RV32IZFINXZDINX-LABEL: fcvt_wu_d:
164 ; RV32IZFINXZDINX: # %bb.0:
165 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
166 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
167 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
168 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
169 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
170 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
171 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
172 ; RV32IZFINXZDINX-NEXT: ret
174 ; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
175 ; RV64IZFINXZDINX: # %bb.0:
176 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
177 ; RV64IZFINXZDINX-NEXT: ret
179 ; RV32I-LABEL: fcvt_wu_d:
181 ; RV32I-NEXT: addi sp, sp, -16
182 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
183 ; RV32I-NEXT: call __fixunsdfsi
184 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
185 ; RV32I-NEXT: addi sp, sp, 16
188 ; RV64I-LABEL: fcvt_wu_d:
190 ; RV64I-NEXT: addi sp, sp, -16
191 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
192 ; RV64I-NEXT: call __fixunsdfsi
193 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
194 ; RV64I-NEXT: addi sp, sp, 16
196 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
199 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
201 ; Test where the fptoui has multiple uses, one of which causes a sext to be
203 define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
204 ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
206 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
207 ; CHECKIFD-NEXT: seqz a1, a0
208 ; CHECKIFD-NEXT: add a0, a0, a1
211 ; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
212 ; RV32IZFINXZDINX: # %bb.0:
213 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
214 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
215 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
216 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
217 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
218 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
219 ; RV32IZFINXZDINX-NEXT: seqz a1, a0
220 ; RV32IZFINXZDINX-NEXT: add a0, a0, a1
221 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
222 ; RV32IZFINXZDINX-NEXT: ret
224 ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
225 ; RV64IZFINXZDINX: # %bb.0:
226 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz
227 ; RV64IZFINXZDINX-NEXT: seqz a1, a0
228 ; RV64IZFINXZDINX-NEXT: add a0, a0, a1
229 ; RV64IZFINXZDINX-NEXT: ret
231 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
233 ; RV32I-NEXT: addi sp, sp, -16
234 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
235 ; RV32I-NEXT: call __fixunsdfsi
236 ; RV32I-NEXT: seqz a1, a0
237 ; RV32I-NEXT: add a0, a0, a1
238 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
239 ; RV32I-NEXT: addi sp, sp, 16
242 ; RV64I-LABEL: fcvt_wu_d_multiple_use:
244 ; RV64I-NEXT: addi sp, sp, -16
245 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
246 ; RV64I-NEXT: call __fixunsdfsi
247 ; RV64I-NEXT: seqz a1, a0
248 ; RV64I-NEXT: add a0, a0, a1
249 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
250 ; RV64I-NEXT: addi sp, sp, 16
252 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict")
253 %b = icmp eq i32 %a, 0
254 %c = select i1 %b, i32 1, i32 %a
258 define double @fcvt_d_w(i32 %a) nounwind strictfp {
259 ; CHECKIFD-LABEL: fcvt_d_w:
261 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
264 ; RV32IZFINXZDINX-LABEL: fcvt_d_w:
265 ; RV32IZFINXZDINX: # %bb.0:
266 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
267 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
268 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
269 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
270 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
271 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
272 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
273 ; RV32IZFINXZDINX-NEXT: ret
275 ; RV64IZFINXZDINX-LABEL: fcvt_d_w:
276 ; RV64IZFINXZDINX: # %bb.0:
277 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
278 ; RV64IZFINXZDINX-NEXT: ret
280 ; RV32I-LABEL: fcvt_d_w:
282 ; RV32I-NEXT: addi sp, sp, -16
283 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
284 ; RV32I-NEXT: call __floatsidf
285 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
286 ; RV32I-NEXT: addi sp, sp, 16
289 ; RV64I-LABEL: fcvt_d_w:
291 ; RV64I-NEXT: addi sp, sp, -16
292 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
293 ; RV64I-NEXT: sext.w a0, a0
294 ; RV64I-NEXT: call __floatsidf
295 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
296 ; RV64I-NEXT: addi sp, sp, 16
298 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
301 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
303 define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
304 ; CHECKIFD-LABEL: fcvt_d_w_load:
306 ; CHECKIFD-NEXT: lw a0, 0(a0)
307 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
310 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_load:
311 ; RV32IZFINXZDINX: # %bb.0:
312 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
313 ; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
314 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
315 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
316 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
317 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
318 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
319 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
320 ; RV32IZFINXZDINX-NEXT: ret
322 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
323 ; RV64IZFINXZDINX: # %bb.0:
324 ; RV64IZFINXZDINX-NEXT: lw a0, 0(a0)
325 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
326 ; RV64IZFINXZDINX-NEXT: ret
328 ; RV32I-LABEL: fcvt_d_w_load:
330 ; RV32I-NEXT: addi sp, sp, -16
331 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
332 ; RV32I-NEXT: lw a0, 0(a0)
333 ; RV32I-NEXT: call __floatsidf
334 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
335 ; RV32I-NEXT: addi sp, sp, 16
338 ; RV64I-LABEL: fcvt_d_w_load:
340 ; RV64I-NEXT: addi sp, sp, -16
341 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
342 ; RV64I-NEXT: lw a0, 0(a0)
343 ; RV64I-NEXT: call __floatsidf
344 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
345 ; RV64I-NEXT: addi sp, sp, 16
347 %a = load i32, ptr %p
348 %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
352 define double @fcvt_d_wu(i32 %a) nounwind strictfp {
353 ; CHECKIFD-LABEL: fcvt_d_wu:
355 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
358 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu:
359 ; RV32IZFINXZDINX: # %bb.0:
360 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
361 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
362 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
363 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
364 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
365 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
366 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
367 ; RV32IZFINXZDINX-NEXT: ret
369 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
370 ; RV64IZFINXZDINX: # %bb.0:
371 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
372 ; RV64IZFINXZDINX-NEXT: ret
374 ; RV32I-LABEL: fcvt_d_wu:
376 ; RV32I-NEXT: addi sp, sp, -16
377 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
378 ; RV32I-NEXT: call __floatunsidf
379 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
380 ; RV32I-NEXT: addi sp, sp, 16
383 ; RV64I-LABEL: fcvt_d_wu:
385 ; RV64I-NEXT: addi sp, sp, -16
386 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
387 ; RV64I-NEXT: sext.w a0, a0
388 ; RV64I-NEXT: call __floatunsidf
389 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
390 ; RV64I-NEXT: addi sp, sp, 16
392 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
395 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
397 define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
398 ; RV32IFD-LABEL: fcvt_d_wu_load:
400 ; RV32IFD-NEXT: lw a0, 0(a0)
401 ; RV32IFD-NEXT: fcvt.d.wu fa0, a0
404 ; RV64IFD-LABEL: fcvt_d_wu_load:
406 ; RV64IFD-NEXT: lwu a0, 0(a0)
407 ; RV64IFD-NEXT: fcvt.d.wu fa0, a0
410 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load:
411 ; RV32IZFINXZDINX: # %bb.0:
412 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
413 ; RV32IZFINXZDINX-NEXT: lw a0, 0(a0)
414 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
415 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
416 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
417 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
418 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
419 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
420 ; RV32IZFINXZDINX-NEXT: ret
422 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
423 ; RV64IZFINXZDINX: # %bb.0:
424 ; RV64IZFINXZDINX-NEXT: lwu a0, 0(a0)
425 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
426 ; RV64IZFINXZDINX-NEXT: ret
428 ; RV32I-LABEL: fcvt_d_wu_load:
430 ; RV32I-NEXT: addi sp, sp, -16
431 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
432 ; RV32I-NEXT: lw a0, 0(a0)
433 ; RV32I-NEXT: call __floatunsidf
434 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
435 ; RV32I-NEXT: addi sp, sp, 16
438 ; RV64I-LABEL: fcvt_d_wu_load:
440 ; RV64I-NEXT: addi sp, sp, -16
441 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
442 ; RV64I-NEXT: lw a0, 0(a0)
443 ; RV64I-NEXT: call __floatunsidf
444 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
445 ; RV64I-NEXT: addi sp, sp, 16
447 %a = load i32, ptr %p
448 %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
452 define i64 @fcvt_l_d(double %a) nounwind strictfp {
453 ; RV32IFD-LABEL: fcvt_l_d:
455 ; RV32IFD-NEXT: addi sp, sp, -16
456 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
457 ; RV32IFD-NEXT: call __fixdfdi
458 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
459 ; RV32IFD-NEXT: addi sp, sp, 16
462 ; RV64IFD-LABEL: fcvt_l_d:
464 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
467 ; RV32IZFINXZDINX-LABEL: fcvt_l_d:
468 ; RV32IZFINXZDINX: # %bb.0:
469 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
470 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
471 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
472 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
473 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
474 ; RV32IZFINXZDINX-NEXT: ret
476 ; RV64IZFINXZDINX-LABEL: fcvt_l_d:
477 ; RV64IZFINXZDINX: # %bb.0:
478 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz
479 ; RV64IZFINXZDINX-NEXT: ret
481 ; RV32I-LABEL: fcvt_l_d:
483 ; RV32I-NEXT: addi sp, sp, -16
484 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
485 ; RV32I-NEXT: call __fixdfdi
486 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
487 ; RV32I-NEXT: addi sp, sp, 16
490 ; RV64I-LABEL: fcvt_l_d:
492 ; RV64I-NEXT: addi sp, sp, -16
493 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
494 ; RV64I-NEXT: call __fixdfdi
495 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
496 ; RV64I-NEXT: addi sp, sp, 16
498 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict")
501 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
503 define i64 @fcvt_lu_d(double %a) nounwind strictfp {
504 ; RV32IFD-LABEL: fcvt_lu_d:
506 ; RV32IFD-NEXT: addi sp, sp, -16
507 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
508 ; RV32IFD-NEXT: call __fixunsdfdi
509 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
510 ; RV32IFD-NEXT: addi sp, sp, 16
513 ; RV64IFD-LABEL: fcvt_lu_d:
515 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
518 ; RV32IZFINXZDINX-LABEL: fcvt_lu_d:
519 ; RV32IZFINXZDINX: # %bb.0:
520 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
521 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
522 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
523 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
524 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
525 ; RV32IZFINXZDINX-NEXT: ret
527 ; RV64IZFINXZDINX-LABEL: fcvt_lu_d:
528 ; RV64IZFINXZDINX: # %bb.0:
529 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz
530 ; RV64IZFINXZDINX-NEXT: ret
532 ; RV32I-LABEL: fcvt_lu_d:
534 ; RV32I-NEXT: addi sp, sp, -16
535 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
536 ; RV32I-NEXT: call __fixunsdfdi
537 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
538 ; RV32I-NEXT: addi sp, sp, 16
541 ; RV64I-LABEL: fcvt_lu_d:
543 ; RV64I-NEXT: addi sp, sp, -16
544 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
545 ; RV64I-NEXT: call __fixunsdfdi
546 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
547 ; RV64I-NEXT: addi sp, sp, 16
549 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict")
552 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
554 define double @fcvt_d_l(i64 %a) nounwind strictfp {
555 ; RV32IFD-LABEL: fcvt_d_l:
557 ; RV32IFD-NEXT: addi sp, sp, -16
558 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
559 ; RV32IFD-NEXT: call __floatdidf
560 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
561 ; RV32IFD-NEXT: addi sp, sp, 16
564 ; RV64IFD-LABEL: fcvt_d_l:
566 ; RV64IFD-NEXT: fcvt.d.l fa0, a0
569 ; RV32IZFINXZDINX-LABEL: fcvt_d_l:
570 ; RV32IZFINXZDINX: # %bb.0:
571 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
572 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
573 ; RV32IZFINXZDINX-NEXT: call __floatdidf
574 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
575 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
576 ; RV32IZFINXZDINX-NEXT: ret
578 ; RV64IZFINXZDINX-LABEL: fcvt_d_l:
579 ; RV64IZFINXZDINX: # %bb.0:
580 ; RV64IZFINXZDINX-NEXT: fcvt.d.l a0, a0
581 ; RV64IZFINXZDINX-NEXT: ret
583 ; RV32I-LABEL: fcvt_d_l:
585 ; RV32I-NEXT: addi sp, sp, -16
586 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
587 ; RV32I-NEXT: call __floatdidf
588 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
589 ; RV32I-NEXT: addi sp, sp, 16
592 ; RV64I-LABEL: fcvt_d_l:
594 ; RV64I-NEXT: addi sp, sp, -16
595 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
596 ; RV64I-NEXT: call __floatdidf
597 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
598 ; RV64I-NEXT: addi sp, sp, 16
600 %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
603 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
605 define double @fcvt_d_lu(i64 %a) nounwind strictfp {
606 ; RV32IFD-LABEL: fcvt_d_lu:
608 ; RV32IFD-NEXT: addi sp, sp, -16
609 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
610 ; RV32IFD-NEXT: call __floatundidf
611 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
612 ; RV32IFD-NEXT: addi sp, sp, 16
615 ; RV64IFD-LABEL: fcvt_d_lu:
617 ; RV64IFD-NEXT: fcvt.d.lu fa0, a0
620 ; RV32IZFINXZDINX-LABEL: fcvt_d_lu:
621 ; RV32IZFINXZDINX: # %bb.0:
622 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
623 ; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
624 ; RV32IZFINXZDINX-NEXT: call __floatundidf
625 ; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
626 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
627 ; RV32IZFINXZDINX-NEXT: ret
629 ; RV64IZFINXZDINX-LABEL: fcvt_d_lu:
630 ; RV64IZFINXZDINX: # %bb.0:
631 ; RV64IZFINXZDINX-NEXT: fcvt.d.lu a0, a0
632 ; RV64IZFINXZDINX-NEXT: ret
634 ; RV32I-LABEL: fcvt_d_lu:
636 ; RV32I-NEXT: addi sp, sp, -16
637 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
638 ; RV32I-NEXT: call __floatundidf
639 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
640 ; RV32I-NEXT: addi sp, sp, 16
643 ; RV64I-LABEL: fcvt_d_lu:
645 ; RV64I-NEXT: addi sp, sp, -16
646 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
647 ; RV64I-NEXT: call __floatundidf
648 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
649 ; RV64I-NEXT: addi sp, sp, 16
651 %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
654 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
656 define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
657 ; CHECKIFD-LABEL: fcvt_d_w_i8:
659 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
662 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8:
663 ; RV32IZFINXZDINX: # %bb.0:
664 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
665 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
666 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
667 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
668 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
669 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
670 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
671 ; RV32IZFINXZDINX-NEXT: ret
673 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
674 ; RV64IZFINXZDINX: # %bb.0:
675 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
676 ; RV64IZFINXZDINX-NEXT: ret
678 ; RV32I-LABEL: fcvt_d_w_i8:
680 ; RV32I-NEXT: addi sp, sp, -16
681 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
682 ; RV32I-NEXT: call __floatsidf
683 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
684 ; RV32I-NEXT: addi sp, sp, 16
687 ; RV64I-LABEL: fcvt_d_w_i8:
689 ; RV64I-NEXT: addi sp, sp, -16
690 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
691 ; RV64I-NEXT: call __floatsidf
692 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
693 ; RV64I-NEXT: addi sp, sp, 16
695 %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
698 declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
700 define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
701 ; CHECKIFD-LABEL: fcvt_d_wu_i8:
703 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
706 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8:
707 ; RV32IZFINXZDINX: # %bb.0:
708 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
709 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
710 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
711 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
712 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
713 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
714 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
715 ; RV32IZFINXZDINX-NEXT: ret
717 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
718 ; RV64IZFINXZDINX: # %bb.0:
719 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
720 ; RV64IZFINXZDINX-NEXT: ret
722 ; RV32I-LABEL: fcvt_d_wu_i8:
724 ; RV32I-NEXT: addi sp, sp, -16
725 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
726 ; RV32I-NEXT: call __floatunsidf
727 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
728 ; RV32I-NEXT: addi sp, sp, 16
731 ; RV64I-LABEL: fcvt_d_wu_i8:
733 ; RV64I-NEXT: addi sp, sp, -16
734 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
735 ; RV64I-NEXT: call __floatunsidf
736 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
737 ; RV64I-NEXT: addi sp, sp, 16
739 %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
742 declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
744 define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
745 ; CHECKIFD-LABEL: fcvt_d_w_i16:
747 ; CHECKIFD-NEXT: fcvt.d.w fa0, a0
750 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16:
751 ; RV32IZFINXZDINX: # %bb.0:
752 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
753 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0
754 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
755 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
756 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
757 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
758 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
759 ; RV32IZFINXZDINX-NEXT: ret
761 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
762 ; RV64IZFINXZDINX: # %bb.0:
763 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
764 ; RV64IZFINXZDINX-NEXT: ret
766 ; RV32I-LABEL: fcvt_d_w_i16:
768 ; RV32I-NEXT: addi sp, sp, -16
769 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
770 ; RV32I-NEXT: call __floatsidf
771 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
772 ; RV32I-NEXT: addi sp, sp, 16
775 ; RV64I-LABEL: fcvt_d_w_i16:
777 ; RV64I-NEXT: addi sp, sp, -16
778 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
779 ; RV64I-NEXT: call __floatsidf
780 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
781 ; RV64I-NEXT: addi sp, sp, 16
783 %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
786 declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
788 define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
789 ; CHECKIFD-LABEL: fcvt_d_wu_i16:
791 ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0
794 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16:
795 ; RV32IZFINXZDINX: # %bb.0:
796 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
797 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
798 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
799 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
800 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
801 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
802 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
803 ; RV32IZFINXZDINX-NEXT: ret
805 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
806 ; RV64IZFINXZDINX: # %bb.0:
807 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0
808 ; RV64IZFINXZDINX-NEXT: ret
810 ; RV32I-LABEL: fcvt_d_wu_i16:
812 ; RV32I-NEXT: addi sp, sp, -16
813 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
814 ; RV32I-NEXT: call __floatunsidf
815 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
816 ; RV32I-NEXT: addi sp, sp, 16
819 ; RV64I-LABEL: fcvt_d_wu_i16:
821 ; RV64I-NEXT: addi sp, sp, -16
822 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
823 ; RV64I-NEXT: call __floatunsidf
824 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
825 ; RV64I-NEXT: addi sp, sp, 16
827 %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
830 declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
832 ; Make sure we select W version of addi on RV64.
833 define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
834 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
836 ; RV32IFD-NEXT: addi a0, a0, 1
837 ; RV32IFD-NEXT: fcvt.d.w fa5, a0
838 ; RV32IFD-NEXT: fsd fa5, 0(a1)
841 ; RV64IFD-LABEL: fcvt_d_w_demanded_bits:
843 ; RV64IFD-NEXT: addiw a0, a0, 1
844 ; RV64IFD-NEXT: fcvt.d.w fa5, a0
845 ; RV64IFD-NEXT: fsd fa5, 0(a1)
848 ; RV32IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
849 ; RV32IZFINXZDINX: # %bb.0:
850 ; RV32IZFINXZDINX-NEXT: addi a0, a0, 1
851 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, a0
852 ; RV32IZFINXZDINX-NEXT: sw a2, 0(a1)
853 ; RV32IZFINXZDINX-NEXT: sw a3, 4(a1)
854 ; RV32IZFINXZDINX-NEXT: ret
856 ; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
857 ; RV64IZFINXZDINX: # %bb.0:
858 ; RV64IZFINXZDINX-NEXT: addiw a2, a0, 1
859 ; RV64IZFINXZDINX-NEXT: addi a0, a0, 1
860 ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0
861 ; RV64IZFINXZDINX-NEXT: sd a0, 0(a1)
862 ; RV64IZFINXZDINX-NEXT: mv a0, a2
863 ; RV64IZFINXZDINX-NEXT: ret
865 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
867 ; RV32I-NEXT: addi sp, sp, -16
868 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
869 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
870 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
871 ; RV32I-NEXT: mv s0, a1
872 ; RV32I-NEXT: addi s1, a0, 1
873 ; RV32I-NEXT: mv a0, s1
874 ; RV32I-NEXT: call __floatsidf
875 ; RV32I-NEXT: sw a1, 4(s0)
876 ; RV32I-NEXT: sw a0, 0(s0)
877 ; RV32I-NEXT: mv a0, s1
878 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
879 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
880 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
881 ; RV32I-NEXT: addi sp, sp, 16
884 ; RV64I-LABEL: fcvt_d_w_demanded_bits:
886 ; RV64I-NEXT: addi sp, sp, -32
887 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
888 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
889 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
890 ; RV64I-NEXT: mv s0, a1
891 ; RV64I-NEXT: addiw s1, a0, 1
892 ; RV64I-NEXT: mv a0, s1
893 ; RV64I-NEXT: call __floatsidf
894 ; RV64I-NEXT: sd a0, 0(s0)
895 ; RV64I-NEXT: mv a0, s1
896 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
897 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
898 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
899 ; RV64I-NEXT: addi sp, sp, 32
902 %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
903 store double %4, ptr %1, align 8
907 ; Make sure we select W version of addi on RV64.
908 define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
909 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
911 ; RV32IFD-NEXT: addi a0, a0, 1
912 ; RV32IFD-NEXT: fcvt.d.wu fa5, a0
913 ; RV32IFD-NEXT: fsd fa5, 0(a1)
916 ; RV64IFD-LABEL: fcvt_d_wu_demanded_bits:
918 ; RV64IFD-NEXT: addiw a0, a0, 1
919 ; RV64IFD-NEXT: fcvt.d.wu fa5, a0
920 ; RV64IFD-NEXT: fsd fa5, 0(a1)
923 ; RV32IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
924 ; RV32IZFINXZDINX: # %bb.0:
925 ; RV32IZFINXZDINX-NEXT: addi a0, a0, 1
926 ; RV32IZFINXZDINX-NEXT: fcvt.d.wu a2, a0
927 ; RV32IZFINXZDINX-NEXT: sw a2, 0(a1)
928 ; RV32IZFINXZDINX-NEXT: sw a3, 4(a1)
929 ; RV32IZFINXZDINX-NEXT: ret
931 ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
932 ; RV64IZFINXZDINX: # %bb.0:
933 ; RV64IZFINXZDINX-NEXT: addiw a0, a0, 1
934 ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a2, a0
935 ; RV64IZFINXZDINX-NEXT: sd a2, 0(a1)
936 ; RV64IZFINXZDINX-NEXT: ret
938 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
940 ; RV32I-NEXT: addi sp, sp, -16
941 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
942 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
943 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
944 ; RV32I-NEXT: mv s0, a1
945 ; RV32I-NEXT: addi s1, a0, 1
946 ; RV32I-NEXT: mv a0, s1
947 ; RV32I-NEXT: call __floatunsidf
948 ; RV32I-NEXT: sw a1, 4(s0)
949 ; RV32I-NEXT: sw a0, 0(s0)
950 ; RV32I-NEXT: mv a0, s1
951 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
952 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
953 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
954 ; RV32I-NEXT: addi sp, sp, 16
957 ; RV64I-LABEL: fcvt_d_wu_demanded_bits:
959 ; RV64I-NEXT: addi sp, sp, -32
960 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
961 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
962 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
963 ; RV64I-NEXT: mv s0, a1
964 ; RV64I-NEXT: addiw s1, a0, 1
965 ; RV64I-NEXT: mv a0, s1
966 ; RV64I-NEXT: call __floatunsidf
967 ; RV64I-NEXT: sd a0, 0(s0)
968 ; RV64I-NEXT: mv a0, s1
969 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
970 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
971 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
972 ; RV64I-NEXT: addi sp, sp, 32
975 %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
976 store double %4, ptr %1, align 8