1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3 ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5 ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
6 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
7 ; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
9 ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
11 define signext i32 @test_floor_si32(double %x) {
12 ; CHECKIFD-LABEL: test_floor_si32:
14 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rdn
15 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
16 ; CHECKIFD-NEXT: seqz a1, a1
17 ; CHECKIFD-NEXT: addi a1, a1, -1
18 ; CHECKIFD-NEXT: and a0, a1, a0
21 ; RV32IZFINXZDINX-LABEL: test_floor_si32:
22 ; RV32IZFINXZDINX: # %bb.0:
23 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
24 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
25 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
26 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
27 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
28 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
29 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rdn
30 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
31 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
32 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
33 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
34 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
35 ; RV32IZFINXZDINX-NEXT: ret
37 ; RV64IZFINXZDINX-LABEL: test_floor_si32:
38 ; RV64IZFINXZDINX: # %bb.0:
39 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rdn
40 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
41 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
42 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
43 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
44 ; RV64IZFINXZDINX-NEXT: ret
45 %a = call double @llvm.floor.f64(double %x)
46 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
50 define i64 @test_floor_si64(double %x) nounwind {
51 ; RV32IFD-LABEL: test_floor_si64:
53 ; RV32IFD-NEXT: addi sp, sp, -16
54 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
55 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
56 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
57 ; RV32IFD-NEXT: call floor
58 ; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0)
59 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
60 ; RV32IFD-NEXT: fmv.d fs0, fa0
61 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
62 ; RV32IFD-NEXT: call __fixdfdi
63 ; RV32IFD-NEXT: lui a4, 524288
64 ; RV32IFD-NEXT: lui a2, 524288
65 ; RV32IFD-NEXT: beqz s0, .LBB1_2
66 ; RV32IFD-NEXT: # %bb.1:
67 ; RV32IFD-NEXT: mv a2, a1
68 ; RV32IFD-NEXT: .LBB1_2:
69 ; RV32IFD-NEXT: lui a1, %hi(.LCPI1_1)
70 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_1)(a1)
71 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
72 ; RV32IFD-NEXT: beqz a3, .LBB1_4
73 ; RV32IFD-NEXT: # %bb.3:
74 ; RV32IFD-NEXT: addi a2, a4, -1
75 ; RV32IFD-NEXT: .LBB1_4:
76 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
77 ; RV32IFD-NEXT: neg a4, a1
78 ; RV32IFD-NEXT: and a1, a4, a2
79 ; RV32IFD-NEXT: neg a2, a3
80 ; RV32IFD-NEXT: neg a3, s0
81 ; RV32IFD-NEXT: and a0, a3, a0
82 ; RV32IFD-NEXT: or a0, a2, a0
83 ; RV32IFD-NEXT: and a0, a4, a0
84 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
85 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
86 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
87 ; RV32IFD-NEXT: addi sp, sp, 16
90 ; RV64IFD-LABEL: test_floor_si64:
92 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
93 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
94 ; RV64IFD-NEXT: seqz a1, a1
95 ; RV64IFD-NEXT: addi a1, a1, -1
96 ; RV64IFD-NEXT: and a0, a1, a0
99 ; RV32IZFINXZDINX-LABEL: test_floor_si64:
100 ; RV32IZFINXZDINX: # %bb.0:
101 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
102 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
103 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
104 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
105 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
106 ; RV32IZFINXZDINX-NEXT: call floor
107 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
108 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
109 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
110 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
111 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0)
112 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
113 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
114 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
115 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
116 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
117 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
118 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB1_2
119 ; RV32IZFINXZDINX-NEXT: # %bb.1:
120 ; RV32IZFINXZDINX-NEXT: mv a2, a1
121 ; RV32IZFINXZDINX-NEXT: .LBB1_2:
122 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1)
123 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1)
124 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1)
125 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
126 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4
127 ; RV32IZFINXZDINX-NEXT: # %bb.3:
128 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
129 ; RV32IZFINXZDINX-NEXT: .LBB1_4:
130 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
131 ; RV32IZFINXZDINX-NEXT: neg a4, a1
132 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
133 ; RV32IZFINXZDINX-NEXT: neg a2, s0
134 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
135 ; RV32IZFINXZDINX-NEXT: neg a2, a3
136 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
137 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
138 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
139 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
140 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
141 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
142 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
143 ; RV32IZFINXZDINX-NEXT: ret
145 ; RV64IZFINXZDINX-LABEL: test_floor_si64:
146 ; RV64IZFINXZDINX: # %bb.0:
147 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rdn
148 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
149 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
150 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
151 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
152 ; RV64IZFINXZDINX-NEXT: ret
153 %a = call double @llvm.floor.f64(double %x)
154 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
158 define signext i32 @test_floor_ui32(double %x) {
159 ; CHECKIFD-LABEL: test_floor_ui32:
161 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rdn
162 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
163 ; CHECKIFD-NEXT: seqz a1, a1
164 ; CHECKIFD-NEXT: addi a1, a1, -1
165 ; CHECKIFD-NEXT: and a0, a1, a0
168 ; RV32IZFINXZDINX-LABEL: test_floor_ui32:
169 ; RV32IZFINXZDINX: # %bb.0:
170 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
171 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
172 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
173 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
174 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
175 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
176 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rdn
177 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
178 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
179 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
180 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
181 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
182 ; RV32IZFINXZDINX-NEXT: ret
184 ; RV64IZFINXZDINX-LABEL: test_floor_ui32:
185 ; RV64IZFINXZDINX: # %bb.0:
186 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rdn
187 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
188 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
189 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
190 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
191 ; RV64IZFINXZDINX-NEXT: ret
192 %a = call double @llvm.floor.f64(double %x)
193 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
197 define i64 @test_floor_ui64(double %x) nounwind {
198 ; RV32IFD-LABEL: test_floor_ui64:
200 ; RV32IFD-NEXT: addi sp, sp, -16
201 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
202 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
203 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
204 ; RV32IFD-NEXT: call floor
205 ; RV32IFD-NEXT: lui a0, %hi(.LCPI3_0)
206 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
207 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
208 ; RV32IFD-NEXT: neg s0, a0
209 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
210 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
211 ; RV32IFD-NEXT: neg s1, a0
212 ; RV32IFD-NEXT: call __fixunsdfdi
213 ; RV32IFD-NEXT: and a0, s1, a0
214 ; RV32IFD-NEXT: or a0, s0, a0
215 ; RV32IFD-NEXT: and a1, s1, a1
216 ; RV32IFD-NEXT: or a1, s0, a1
217 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
218 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
219 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
220 ; RV32IFD-NEXT: addi sp, sp, 16
223 ; RV64IFD-LABEL: test_floor_ui64:
225 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn
226 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
227 ; RV64IFD-NEXT: seqz a1, a1
228 ; RV64IFD-NEXT: addi a1, a1, -1
229 ; RV64IFD-NEXT: and a0, a1, a0
232 ; RV32IZFINXZDINX-LABEL: test_floor_ui64:
233 ; RV32IZFINXZDINX: # %bb.0:
234 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
235 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
236 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
237 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
238 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
239 ; RV32IZFINXZDINX-NEXT: call floor
240 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
241 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
242 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
243 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
244 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
245 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
246 ; RV32IZFINXZDINX-NEXT: neg s2, a2
247 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
248 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0)
249 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2)
250 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
251 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
252 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
253 ; RV32IZFINXZDINX-NEXT: neg a2, a2
254 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
255 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
256 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
257 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
258 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
259 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
260 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
261 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
262 ; RV32IZFINXZDINX-NEXT: ret
264 ; RV64IZFINXZDINX-LABEL: test_floor_ui64:
265 ; RV64IZFINXZDINX: # %bb.0:
266 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rdn
267 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
268 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
269 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
270 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
271 ; RV64IZFINXZDINX-NEXT: ret
272 %a = call double @llvm.floor.f64(double %x)
273 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
277 define signext i32 @test_ceil_si32(double %x) {
278 ; CHECKIFD-LABEL: test_ceil_si32:
280 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rup
281 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
282 ; CHECKIFD-NEXT: seqz a1, a1
283 ; CHECKIFD-NEXT: addi a1, a1, -1
284 ; CHECKIFD-NEXT: and a0, a1, a0
287 ; RV32IZFINXZDINX-LABEL: test_ceil_si32:
288 ; RV32IZFINXZDINX: # %bb.0:
289 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
290 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
291 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
292 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
293 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
294 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
295 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rup
296 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
297 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
298 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
299 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
300 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
301 ; RV32IZFINXZDINX-NEXT: ret
303 ; RV64IZFINXZDINX-LABEL: test_ceil_si32:
304 ; RV64IZFINXZDINX: # %bb.0:
305 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rup
306 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
307 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
308 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
309 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
310 ; RV64IZFINXZDINX-NEXT: ret
311 %a = call double @llvm.ceil.f64(double %x)
312 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
316 define i64 @test_ceil_si64(double %x) nounwind {
317 ; RV32IFD-LABEL: test_ceil_si64:
319 ; RV32IFD-NEXT: addi sp, sp, -16
320 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
321 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
322 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
323 ; RV32IFD-NEXT: call ceil
324 ; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0)
325 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0)
326 ; RV32IFD-NEXT: fmv.d fs0, fa0
327 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
328 ; RV32IFD-NEXT: call __fixdfdi
329 ; RV32IFD-NEXT: lui a4, 524288
330 ; RV32IFD-NEXT: lui a2, 524288
331 ; RV32IFD-NEXT: beqz s0, .LBB5_2
332 ; RV32IFD-NEXT: # %bb.1:
333 ; RV32IFD-NEXT: mv a2, a1
334 ; RV32IFD-NEXT: .LBB5_2:
335 ; RV32IFD-NEXT: lui a1, %hi(.LCPI5_1)
336 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_1)(a1)
337 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
338 ; RV32IFD-NEXT: beqz a3, .LBB5_4
339 ; RV32IFD-NEXT: # %bb.3:
340 ; RV32IFD-NEXT: addi a2, a4, -1
341 ; RV32IFD-NEXT: .LBB5_4:
342 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
343 ; RV32IFD-NEXT: neg a4, a1
344 ; RV32IFD-NEXT: and a1, a4, a2
345 ; RV32IFD-NEXT: neg a2, a3
346 ; RV32IFD-NEXT: neg a3, s0
347 ; RV32IFD-NEXT: and a0, a3, a0
348 ; RV32IFD-NEXT: or a0, a2, a0
349 ; RV32IFD-NEXT: and a0, a4, a0
350 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
351 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
352 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
353 ; RV32IFD-NEXT: addi sp, sp, 16
356 ; RV64IFD-LABEL: test_ceil_si64:
358 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
359 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
360 ; RV64IFD-NEXT: seqz a1, a1
361 ; RV64IFD-NEXT: addi a1, a1, -1
362 ; RV64IFD-NEXT: and a0, a1, a0
365 ; RV32IZFINXZDINX-LABEL: test_ceil_si64:
366 ; RV32IZFINXZDINX: # %bb.0:
367 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
368 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
369 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
370 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
371 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
372 ; RV32IZFINXZDINX-NEXT: call ceil
373 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
374 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
375 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
376 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
377 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0)
378 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
379 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
380 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
381 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
382 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
383 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
384 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB5_2
385 ; RV32IZFINXZDINX-NEXT: # %bb.1:
386 ; RV32IZFINXZDINX-NEXT: mv a2, a1
387 ; RV32IZFINXZDINX-NEXT: .LBB5_2:
388 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1)
389 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1)
390 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1)
391 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
392 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4
393 ; RV32IZFINXZDINX-NEXT: # %bb.3:
394 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
395 ; RV32IZFINXZDINX-NEXT: .LBB5_4:
396 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
397 ; RV32IZFINXZDINX-NEXT: neg a4, a1
398 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
399 ; RV32IZFINXZDINX-NEXT: neg a2, s0
400 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
401 ; RV32IZFINXZDINX-NEXT: neg a2, a3
402 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
403 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
404 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
405 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
406 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
407 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
408 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
409 ; RV32IZFINXZDINX-NEXT: ret
411 ; RV64IZFINXZDINX-LABEL: test_ceil_si64:
412 ; RV64IZFINXZDINX: # %bb.0:
413 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rup
414 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
415 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
416 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
417 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
418 ; RV64IZFINXZDINX-NEXT: ret
419 %a = call double @llvm.ceil.f64(double %x)
420 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
424 define signext i32 @test_ceil_ui32(double %x) {
425 ; CHECKIFD-LABEL: test_ceil_ui32:
427 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rup
428 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
429 ; CHECKIFD-NEXT: seqz a1, a1
430 ; CHECKIFD-NEXT: addi a1, a1, -1
431 ; CHECKIFD-NEXT: and a0, a1, a0
434 ; RV32IZFINXZDINX-LABEL: test_ceil_ui32:
435 ; RV32IZFINXZDINX: # %bb.0:
436 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
437 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
438 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
439 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
440 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
441 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
442 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rup
443 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
444 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
445 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
446 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
447 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
448 ; RV32IZFINXZDINX-NEXT: ret
450 ; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
451 ; RV64IZFINXZDINX: # %bb.0:
452 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rup
453 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
454 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
455 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
456 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
457 ; RV64IZFINXZDINX-NEXT: ret
458 %a = call double @llvm.ceil.f64(double %x)
459 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
463 define i64 @test_ceil_ui64(double %x) nounwind {
464 ; RV32IFD-LABEL: test_ceil_ui64:
466 ; RV32IFD-NEXT: addi sp, sp, -16
467 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
468 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
469 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
470 ; RV32IFD-NEXT: call ceil
471 ; RV32IFD-NEXT: lui a0, %hi(.LCPI7_0)
472 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI7_0)(a0)
473 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
474 ; RV32IFD-NEXT: neg s0, a0
475 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
476 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
477 ; RV32IFD-NEXT: neg s1, a0
478 ; RV32IFD-NEXT: call __fixunsdfdi
479 ; RV32IFD-NEXT: and a0, s1, a0
480 ; RV32IFD-NEXT: or a0, s0, a0
481 ; RV32IFD-NEXT: and a1, s1, a1
482 ; RV32IFD-NEXT: or a1, s0, a1
483 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
484 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
485 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
486 ; RV32IFD-NEXT: addi sp, sp, 16
489 ; RV64IFD-LABEL: test_ceil_ui64:
491 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup
492 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
493 ; RV64IFD-NEXT: seqz a1, a1
494 ; RV64IFD-NEXT: addi a1, a1, -1
495 ; RV64IFD-NEXT: and a0, a1, a0
498 ; RV32IZFINXZDINX-LABEL: test_ceil_ui64:
499 ; RV32IZFINXZDINX: # %bb.0:
500 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
501 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
502 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
503 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
504 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
505 ; RV32IZFINXZDINX-NEXT: call ceil
506 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
507 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
508 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
509 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
510 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
511 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
512 ; RV32IZFINXZDINX-NEXT: neg s2, a2
513 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
514 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0)
515 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2)
516 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
517 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
518 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
519 ; RV32IZFINXZDINX-NEXT: neg a2, a2
520 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
521 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
522 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
523 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
524 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
525 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
526 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
527 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
528 ; RV32IZFINXZDINX-NEXT: ret
530 ; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
531 ; RV64IZFINXZDINX: # %bb.0:
532 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rup
533 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
534 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
535 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
536 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
537 ; RV64IZFINXZDINX-NEXT: ret
538 %a = call double @llvm.ceil.f64(double %x)
539 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
543 define signext i32 @test_trunc_si32(double %x) {
544 ; CHECKIFD-LABEL: test_trunc_si32:
546 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz
547 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
548 ; CHECKIFD-NEXT: seqz a1, a1
549 ; CHECKIFD-NEXT: addi a1, a1, -1
550 ; CHECKIFD-NEXT: and a0, a1, a0
553 ; RV32IZFINXZDINX-LABEL: test_trunc_si32:
554 ; RV32IZFINXZDINX: # %bb.0:
555 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
556 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
557 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
558 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
559 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
560 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
561 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz
562 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
563 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
564 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
565 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
566 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
567 ; RV32IZFINXZDINX-NEXT: ret
569 ; RV64IZFINXZDINX-LABEL: test_trunc_si32:
570 ; RV64IZFINXZDINX: # %bb.0:
571 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rtz
572 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
573 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
574 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
575 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
576 ; RV64IZFINXZDINX-NEXT: ret
577 %a = call double @llvm.trunc.f64(double %x)
578 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
582 define i64 @test_trunc_si64(double %x) nounwind {
583 ; RV32IFD-LABEL: test_trunc_si64:
585 ; RV32IFD-NEXT: addi sp, sp, -16
586 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
587 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
588 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
589 ; RV32IFD-NEXT: call trunc
590 ; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0)
591 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
592 ; RV32IFD-NEXT: fmv.d fs0, fa0
593 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
594 ; RV32IFD-NEXT: call __fixdfdi
595 ; RV32IFD-NEXT: lui a4, 524288
596 ; RV32IFD-NEXT: lui a2, 524288
597 ; RV32IFD-NEXT: beqz s0, .LBB9_2
598 ; RV32IFD-NEXT: # %bb.1:
599 ; RV32IFD-NEXT: mv a2, a1
600 ; RV32IFD-NEXT: .LBB9_2:
601 ; RV32IFD-NEXT: lui a1, %hi(.LCPI9_1)
602 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_1)(a1)
603 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
604 ; RV32IFD-NEXT: beqz a3, .LBB9_4
605 ; RV32IFD-NEXT: # %bb.3:
606 ; RV32IFD-NEXT: addi a2, a4, -1
607 ; RV32IFD-NEXT: .LBB9_4:
608 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
609 ; RV32IFD-NEXT: neg a4, a1
610 ; RV32IFD-NEXT: and a1, a4, a2
611 ; RV32IFD-NEXT: neg a2, a3
612 ; RV32IFD-NEXT: neg a3, s0
613 ; RV32IFD-NEXT: and a0, a3, a0
614 ; RV32IFD-NEXT: or a0, a2, a0
615 ; RV32IFD-NEXT: and a0, a4, a0
616 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
617 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
618 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
619 ; RV32IFD-NEXT: addi sp, sp, 16
622 ; RV64IFD-LABEL: test_trunc_si64:
624 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
625 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
626 ; RV64IFD-NEXT: seqz a1, a1
627 ; RV64IFD-NEXT: addi a1, a1, -1
628 ; RV64IFD-NEXT: and a0, a1, a0
631 ; RV32IZFINXZDINX-LABEL: test_trunc_si64:
632 ; RV32IZFINXZDINX: # %bb.0:
633 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
634 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
635 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
636 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
637 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
638 ; RV32IZFINXZDINX-NEXT: call trunc
639 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
640 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
641 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
642 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
643 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0)
644 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
645 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
646 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
647 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
648 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
649 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
650 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB9_2
651 ; RV32IZFINXZDINX-NEXT: # %bb.1:
652 ; RV32IZFINXZDINX-NEXT: mv a2, a1
653 ; RV32IZFINXZDINX-NEXT: .LBB9_2:
654 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1)
655 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1)
656 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1)
657 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
658 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4
659 ; RV32IZFINXZDINX-NEXT: # %bb.3:
660 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
661 ; RV32IZFINXZDINX-NEXT: .LBB9_4:
662 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
663 ; RV32IZFINXZDINX-NEXT: neg a4, a1
664 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
665 ; RV32IZFINXZDINX-NEXT: neg a2, s0
666 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
667 ; RV32IZFINXZDINX-NEXT: neg a2, a3
668 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
669 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
670 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
671 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
672 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
673 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
674 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
675 ; RV32IZFINXZDINX-NEXT: ret
677 ; RV64IZFINXZDINX-LABEL: test_trunc_si64:
678 ; RV64IZFINXZDINX: # %bb.0:
679 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rtz
680 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
681 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
682 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
683 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
684 ; RV64IZFINXZDINX-NEXT: ret
685 %a = call double @llvm.trunc.f64(double %x)
686 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
690 define signext i32 @test_trunc_ui32(double %x) {
691 ; CHECKIFD-LABEL: test_trunc_ui32:
693 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz
694 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
695 ; CHECKIFD-NEXT: seqz a1, a1
696 ; CHECKIFD-NEXT: addi a1, a1, -1
697 ; CHECKIFD-NEXT: and a0, a1, a0
700 ; RV32IZFINXZDINX-LABEL: test_trunc_ui32:
701 ; RV32IZFINXZDINX: # %bb.0:
702 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
703 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
704 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
705 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
706 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
707 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
708 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz
709 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
710 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
711 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
712 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
713 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
714 ; RV32IZFINXZDINX-NEXT: ret
716 ; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
717 ; RV64IZFINXZDINX: # %bb.0:
718 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rtz
719 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
720 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
721 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
722 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
723 ; RV64IZFINXZDINX-NEXT: ret
724 %a = call double @llvm.trunc.f64(double %x)
725 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
729 define i64 @test_trunc_ui64(double %x) nounwind {
730 ; RV32IFD-LABEL: test_trunc_ui64:
732 ; RV32IFD-NEXT: addi sp, sp, -16
733 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
734 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
735 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
736 ; RV32IFD-NEXT: call trunc
737 ; RV32IFD-NEXT: lui a0, %hi(.LCPI11_0)
738 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
739 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
740 ; RV32IFD-NEXT: neg s0, a0
741 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
742 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
743 ; RV32IFD-NEXT: neg s1, a0
744 ; RV32IFD-NEXT: call __fixunsdfdi
745 ; RV32IFD-NEXT: and a0, s1, a0
746 ; RV32IFD-NEXT: or a0, s0, a0
747 ; RV32IFD-NEXT: and a1, s1, a1
748 ; RV32IFD-NEXT: or a1, s0, a1
749 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
750 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
751 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
752 ; RV32IFD-NEXT: addi sp, sp, 16
755 ; RV64IFD-LABEL: test_trunc_ui64:
757 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
758 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
759 ; RV64IFD-NEXT: seqz a1, a1
760 ; RV64IFD-NEXT: addi a1, a1, -1
761 ; RV64IFD-NEXT: and a0, a1, a0
764 ; RV32IZFINXZDINX-LABEL: test_trunc_ui64:
765 ; RV32IZFINXZDINX: # %bb.0:
766 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
767 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
768 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
769 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
770 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
771 ; RV32IZFINXZDINX-NEXT: call trunc
772 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
773 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
774 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
775 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
776 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
777 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
778 ; RV32IZFINXZDINX-NEXT: neg s2, a2
779 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
780 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0)
781 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2)
782 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
783 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
784 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
785 ; RV32IZFINXZDINX-NEXT: neg a2, a2
786 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
787 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
788 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
789 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
790 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
791 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
792 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
793 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
794 ; RV32IZFINXZDINX-NEXT: ret
796 ; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
797 ; RV64IZFINXZDINX: # %bb.0:
798 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rtz
799 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
800 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
801 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
802 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
803 ; RV64IZFINXZDINX-NEXT: ret
804 %a = call double @llvm.trunc.f64(double %x)
805 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
809 define signext i32 @test_round_si32(double %x) {
810 ; CHECKIFD-LABEL: test_round_si32:
812 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm
813 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
814 ; CHECKIFD-NEXT: seqz a1, a1
815 ; CHECKIFD-NEXT: addi a1, a1, -1
816 ; CHECKIFD-NEXT: and a0, a1, a0
819 ; RV32IZFINXZDINX-LABEL: test_round_si32:
820 ; RV32IZFINXZDINX: # %bb.0:
821 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
822 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
823 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
824 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
825 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
826 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
827 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rmm
828 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
829 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
830 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
831 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
832 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
833 ; RV32IZFINXZDINX-NEXT: ret
835 ; RV64IZFINXZDINX-LABEL: test_round_si32:
836 ; RV64IZFINXZDINX: # %bb.0:
837 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rmm
838 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
839 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
840 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
841 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
842 ; RV64IZFINXZDINX-NEXT: ret
843 %a = call double @llvm.round.f64(double %x)
844 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
848 define i64 @test_round_si64(double %x) nounwind {
849 ; RV32IFD-LABEL: test_round_si64:
851 ; RV32IFD-NEXT: addi sp, sp, -16
852 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
853 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
854 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
855 ; RV32IFD-NEXT: call round
856 ; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0)
857 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
858 ; RV32IFD-NEXT: fmv.d fs0, fa0
859 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
860 ; RV32IFD-NEXT: call __fixdfdi
861 ; RV32IFD-NEXT: lui a4, 524288
862 ; RV32IFD-NEXT: lui a2, 524288
863 ; RV32IFD-NEXT: beqz s0, .LBB13_2
864 ; RV32IFD-NEXT: # %bb.1:
865 ; RV32IFD-NEXT: mv a2, a1
866 ; RV32IFD-NEXT: .LBB13_2:
867 ; RV32IFD-NEXT: lui a1, %hi(.LCPI13_1)
868 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_1)(a1)
869 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
870 ; RV32IFD-NEXT: beqz a3, .LBB13_4
871 ; RV32IFD-NEXT: # %bb.3:
872 ; RV32IFD-NEXT: addi a2, a4, -1
873 ; RV32IFD-NEXT: .LBB13_4:
874 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
875 ; RV32IFD-NEXT: neg a4, a1
876 ; RV32IFD-NEXT: and a1, a4, a2
877 ; RV32IFD-NEXT: neg a2, a3
878 ; RV32IFD-NEXT: neg a3, s0
879 ; RV32IFD-NEXT: and a0, a3, a0
880 ; RV32IFD-NEXT: or a0, a2, a0
881 ; RV32IFD-NEXT: and a0, a4, a0
882 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
883 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
884 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
885 ; RV32IFD-NEXT: addi sp, sp, 16
888 ; RV64IFD-LABEL: test_round_si64:
890 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
891 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
892 ; RV64IFD-NEXT: seqz a1, a1
893 ; RV64IFD-NEXT: addi a1, a1, -1
894 ; RV64IFD-NEXT: and a0, a1, a0
897 ; RV32IZFINXZDINX-LABEL: test_round_si64:
898 ; RV32IZFINXZDINX: # %bb.0:
899 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
900 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
901 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
902 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
903 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
904 ; RV32IZFINXZDINX-NEXT: call round
905 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
906 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
907 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
908 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
909 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0)
910 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
911 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
912 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
913 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
914 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
915 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
916 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB13_2
917 ; RV32IZFINXZDINX-NEXT: # %bb.1:
918 ; RV32IZFINXZDINX-NEXT: mv a2, a1
919 ; RV32IZFINXZDINX-NEXT: .LBB13_2:
920 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1)
921 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1)
922 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1)
923 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
924 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4
925 ; RV32IZFINXZDINX-NEXT: # %bb.3:
926 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
927 ; RV32IZFINXZDINX-NEXT: .LBB13_4:
928 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
929 ; RV32IZFINXZDINX-NEXT: neg a4, a1
930 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
931 ; RV32IZFINXZDINX-NEXT: neg a2, s0
932 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
933 ; RV32IZFINXZDINX-NEXT: neg a2, a3
934 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
935 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
936 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
937 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
938 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
939 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
940 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
941 ; RV32IZFINXZDINX-NEXT: ret
943 ; RV64IZFINXZDINX-LABEL: test_round_si64:
944 ; RV64IZFINXZDINX: # %bb.0:
945 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rmm
946 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
947 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
948 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
949 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
950 ; RV64IZFINXZDINX-NEXT: ret
951 %a = call double @llvm.round.f64(double %x)
952 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
956 define signext i32 @test_round_ui32(double %x) {
957 ; CHECKIFD-LABEL: test_round_ui32:
959 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rmm
960 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
961 ; CHECKIFD-NEXT: seqz a1, a1
962 ; CHECKIFD-NEXT: addi a1, a1, -1
963 ; CHECKIFD-NEXT: and a0, a1, a0
966 ; RV32IZFINXZDINX-LABEL: test_round_ui32:
967 ; RV32IZFINXZDINX: # %bb.0:
968 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
969 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
970 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
971 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
972 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
973 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
974 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rmm
975 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
976 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
977 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
978 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
979 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
980 ; RV32IZFINXZDINX-NEXT: ret
982 ; RV64IZFINXZDINX-LABEL: test_round_ui32:
983 ; RV64IZFINXZDINX: # %bb.0:
984 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rmm
985 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
986 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
987 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
988 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
989 ; RV64IZFINXZDINX-NEXT: ret
990 %a = call double @llvm.round.f64(double %x)
991 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
995 define i64 @test_round_ui64(double %x) nounwind {
996 ; RV32IFD-LABEL: test_round_ui64:
998 ; RV32IFD-NEXT: addi sp, sp, -16
999 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1000 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1001 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1002 ; RV32IFD-NEXT: call round
1003 ; RV32IFD-NEXT: lui a0, %hi(.LCPI15_0)
1004 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI15_0)(a0)
1005 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
1006 ; RV32IFD-NEXT: neg s0, a0
1007 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
1008 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
1009 ; RV32IFD-NEXT: neg s1, a0
1010 ; RV32IFD-NEXT: call __fixunsdfdi
1011 ; RV32IFD-NEXT: and a0, s1, a0
1012 ; RV32IFD-NEXT: or a0, s0, a0
1013 ; RV32IFD-NEXT: and a1, s1, a1
1014 ; RV32IFD-NEXT: or a1, s0, a1
1015 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1016 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1017 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1018 ; RV32IFD-NEXT: addi sp, sp, 16
1021 ; RV64IFD-LABEL: test_round_ui64:
1023 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm
1024 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
1025 ; RV64IFD-NEXT: seqz a1, a1
1026 ; RV64IFD-NEXT: addi a1, a1, -1
1027 ; RV64IFD-NEXT: and a0, a1, a0
1030 ; RV32IZFINXZDINX-LABEL: test_round_ui64:
1031 ; RV32IZFINXZDINX: # %bb.0:
1032 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
1033 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1034 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1035 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1036 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
1037 ; RV32IZFINXZDINX-NEXT: call round
1038 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1039 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1040 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
1041 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
1042 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
1043 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
1044 ; RV32IZFINXZDINX-NEXT: neg s2, a2
1045 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
1046 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0)
1047 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2)
1048 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
1049 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
1050 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
1051 ; RV32IZFINXZDINX-NEXT: neg a2, a2
1052 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
1053 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
1054 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
1055 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1056 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1057 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1058 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
1059 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
1060 ; RV32IZFINXZDINX-NEXT: ret
1062 ; RV64IZFINXZDINX-LABEL: test_round_ui64:
1063 ; RV64IZFINXZDINX: # %bb.0:
1064 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rmm
1065 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1066 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1067 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1068 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1069 ; RV64IZFINXZDINX-NEXT: ret
1070 %a = call double @llvm.round.f64(double %x)
1071 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
1075 define signext i32 @test_roundeven_si32(double %x) {
1076 ; CHECKIFD-LABEL: test_roundeven_si32:
1077 ; CHECKIFD: # %bb.0:
1078 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rne
1079 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
1080 ; CHECKIFD-NEXT: seqz a1, a1
1081 ; CHECKIFD-NEXT: addi a1, a1, -1
1082 ; CHECKIFD-NEXT: and a0, a1, a0
1083 ; CHECKIFD-NEXT: ret
1085 ; RV32IZFINXZDINX-LABEL: test_roundeven_si32:
1086 ; RV32IZFINXZDINX: # %bb.0:
1087 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1088 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
1089 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1090 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1091 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
1092 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
1093 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rne
1094 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
1095 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
1096 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
1097 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1098 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1099 ; RV32IZFINXZDINX-NEXT: ret
1101 ; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
1102 ; RV64IZFINXZDINX: # %bb.0:
1103 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rne
1104 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1105 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1106 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1107 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1108 ; RV64IZFINXZDINX-NEXT: ret
1109 %a = call double @llvm.roundeven.f64(double %x)
1110 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
1114 define i64 @test_roundeven_si64(double %x) nounwind {
1115 ; RV32IFD-LABEL: test_roundeven_si64:
1117 ; RV32IFD-NEXT: addi sp, sp, -16
1118 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1119 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1120 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
1121 ; RV32IFD-NEXT: call roundeven
1122 ; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
1123 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
1124 ; RV32IFD-NEXT: fmv.d fs0, fa0
1125 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
1126 ; RV32IFD-NEXT: call __fixdfdi
1127 ; RV32IFD-NEXT: lui a4, 524288
1128 ; RV32IFD-NEXT: lui a2, 524288
1129 ; RV32IFD-NEXT: beqz s0, .LBB17_2
1130 ; RV32IFD-NEXT: # %bb.1:
1131 ; RV32IFD-NEXT: mv a2, a1
1132 ; RV32IFD-NEXT: .LBB17_2:
1133 ; RV32IFD-NEXT: lui a1, %hi(.LCPI17_1)
1134 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_1)(a1)
1135 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
1136 ; RV32IFD-NEXT: beqz a3, .LBB17_4
1137 ; RV32IFD-NEXT: # %bb.3:
1138 ; RV32IFD-NEXT: addi a2, a4, -1
1139 ; RV32IFD-NEXT: .LBB17_4:
1140 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
1141 ; RV32IFD-NEXT: neg a4, a1
1142 ; RV32IFD-NEXT: and a1, a4, a2
1143 ; RV32IFD-NEXT: neg a2, a3
1144 ; RV32IFD-NEXT: neg a3, s0
1145 ; RV32IFD-NEXT: and a0, a3, a0
1146 ; RV32IFD-NEXT: or a0, a2, a0
1147 ; RV32IFD-NEXT: and a0, a4, a0
1148 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1149 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1150 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
1151 ; RV32IFD-NEXT: addi sp, sp, 16
1154 ; RV64IFD-LABEL: test_roundeven_si64:
1156 ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
1157 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
1158 ; RV64IFD-NEXT: seqz a1, a1
1159 ; RV64IFD-NEXT: addi a1, a1, -1
1160 ; RV64IFD-NEXT: and a0, a1, a0
1163 ; RV32IZFINXZDINX-LABEL: test_roundeven_si64:
1164 ; RV32IZFINXZDINX: # %bb.0:
1165 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
1166 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1167 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1168 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
1169 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
1170 ; RV32IZFINXZDINX-NEXT: call roundeven
1171 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1172 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1173 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
1174 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
1175 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0)
1176 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
1177 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
1178 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
1179 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
1180 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
1181 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
1182 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB17_2
1183 ; RV32IZFINXZDINX-NEXT: # %bb.1:
1184 ; RV32IZFINXZDINX-NEXT: mv a2, a1
1185 ; RV32IZFINXZDINX-NEXT: .LBB17_2:
1186 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1)
1187 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1)
1188 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1)
1189 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
1190 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4
1191 ; RV32IZFINXZDINX-NEXT: # %bb.3:
1192 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
1193 ; RV32IZFINXZDINX-NEXT: .LBB17_4:
1194 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
1195 ; RV32IZFINXZDINX-NEXT: neg a4, a1
1196 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
1197 ; RV32IZFINXZDINX-NEXT: neg a2, s0
1198 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
1199 ; RV32IZFINXZDINX-NEXT: neg a2, a3
1200 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
1201 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
1202 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1203 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1204 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
1205 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
1206 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
1207 ; RV32IZFINXZDINX-NEXT: ret
1209 ; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
1210 ; RV64IZFINXZDINX: # %bb.0:
1211 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rne
1212 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1213 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1214 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1215 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1216 ; RV64IZFINXZDINX-NEXT: ret
1217 %a = call double @llvm.roundeven.f64(double %x)
1218 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
1222 define signext i32 @test_roundeven_ui32(double %x) {
1223 ; CHECKIFD-LABEL: test_roundeven_ui32:
1224 ; CHECKIFD: # %bb.0:
1225 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rne
1226 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
1227 ; CHECKIFD-NEXT: seqz a1, a1
1228 ; CHECKIFD-NEXT: addi a1, a1, -1
1229 ; CHECKIFD-NEXT: and a0, a1, a0
1230 ; CHECKIFD-NEXT: ret
1232 ; RV32IZFINXZDINX-LABEL: test_roundeven_ui32:
1233 ; RV32IZFINXZDINX: # %bb.0:
1234 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1235 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
1236 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1237 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1238 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
1239 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
1240 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rne
1241 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
1242 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
1243 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
1244 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1245 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1246 ; RV32IZFINXZDINX-NEXT: ret
1248 ; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
1249 ; RV64IZFINXZDINX: # %bb.0:
1250 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rne
1251 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1252 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1253 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1254 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1255 ; RV64IZFINXZDINX-NEXT: ret
1256 %a = call double @llvm.roundeven.f64(double %x)
1257 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
1261 define i64 @test_roundeven_ui64(double %x) nounwind {
1262 ; RV32IFD-LABEL: test_roundeven_ui64:
1264 ; RV32IFD-NEXT: addi sp, sp, -16
1265 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1266 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1267 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1268 ; RV32IFD-NEXT: call roundeven
1269 ; RV32IFD-NEXT: lui a0, %hi(.LCPI19_0)
1270 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
1271 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
1272 ; RV32IFD-NEXT: neg s0, a0
1273 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
1274 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
1275 ; RV32IFD-NEXT: neg s1, a0
1276 ; RV32IFD-NEXT: call __fixunsdfdi
1277 ; RV32IFD-NEXT: and a0, s1, a0
1278 ; RV32IFD-NEXT: or a0, s0, a0
1279 ; RV32IFD-NEXT: and a1, s1, a1
1280 ; RV32IFD-NEXT: or a1, s0, a1
1281 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1282 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1283 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1284 ; RV32IFD-NEXT: addi sp, sp, 16
1287 ; RV64IFD-LABEL: test_roundeven_ui64:
1289 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne
1290 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
1291 ; RV64IFD-NEXT: seqz a1, a1
1292 ; RV64IFD-NEXT: addi a1, a1, -1
1293 ; RV64IFD-NEXT: and a0, a1, a0
1296 ; RV32IZFINXZDINX-LABEL: test_roundeven_ui64:
1297 ; RV32IZFINXZDINX: # %bb.0:
1298 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
1299 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1300 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1301 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1302 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
1303 ; RV32IZFINXZDINX-NEXT: call roundeven
1304 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1305 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1306 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
1307 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
1308 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
1309 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
1310 ; RV32IZFINXZDINX-NEXT: neg s2, a2
1311 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
1312 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0)
1313 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2)
1314 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
1315 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
1316 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
1317 ; RV32IZFINXZDINX-NEXT: neg a2, a2
1318 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
1319 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
1320 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
1321 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1322 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1323 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1324 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
1325 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
1326 ; RV32IZFINXZDINX-NEXT: ret
1328 ; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
1329 ; RV64IZFINXZDINX: # %bb.0:
1330 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rne
1331 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1332 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1333 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1334 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1335 ; RV64IZFINXZDINX-NEXT: ret
1336 %a = call double @llvm.roundeven.f64(double %x)
1337 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
1341 define signext i32 @test_rint_si32(double %x) {
1342 ; CHECKIFD-LABEL: test_rint_si32:
1343 ; CHECKIFD: # %bb.0:
1344 ; CHECKIFD-NEXT: fcvt.w.d a0, fa0
1345 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
1346 ; CHECKIFD-NEXT: seqz a1, a1
1347 ; CHECKIFD-NEXT: addi a1, a1, -1
1348 ; CHECKIFD-NEXT: and a0, a1, a0
1349 ; CHECKIFD-NEXT: ret
1351 ; RV32IZFINXZDINX-LABEL: test_rint_si32:
1352 ; RV32IZFINXZDINX: # %bb.0:
1353 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1354 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
1355 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1356 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1357 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
1358 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
1359 ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0
1360 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
1361 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
1362 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
1363 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1364 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1365 ; RV32IZFINXZDINX-NEXT: ret
1367 ; RV64IZFINXZDINX-LABEL: test_rint_si32:
1368 ; RV64IZFINXZDINX: # %bb.0:
1369 ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0
1370 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1371 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1372 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1373 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1374 ; RV64IZFINXZDINX-NEXT: ret
1375 %a = call double @llvm.rint.f64(double %x)
1376 %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
1380 define i64 @test_rint_si64(double %x) nounwind {
1381 ; RV32IFD-LABEL: test_rint_si64:
1383 ; RV32IFD-NEXT: addi sp, sp, -16
1384 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1385 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1386 ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
1387 ; RV32IFD-NEXT: call rint
1388 ; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0)
1389 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
1390 ; RV32IFD-NEXT: fmv.d fs0, fa0
1391 ; RV32IFD-NEXT: fle.d s0, fa5, fa0
1392 ; RV32IFD-NEXT: call __fixdfdi
1393 ; RV32IFD-NEXT: lui a4, 524288
1394 ; RV32IFD-NEXT: lui a2, 524288
1395 ; RV32IFD-NEXT: beqz s0, .LBB21_2
1396 ; RV32IFD-NEXT: # %bb.1:
1397 ; RV32IFD-NEXT: mv a2, a1
1398 ; RV32IFD-NEXT: .LBB21_2:
1399 ; RV32IFD-NEXT: lui a1, %hi(.LCPI21_1)
1400 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_1)(a1)
1401 ; RV32IFD-NEXT: flt.d a3, fa5, fs0
1402 ; RV32IFD-NEXT: beqz a3, .LBB21_4
1403 ; RV32IFD-NEXT: # %bb.3:
1404 ; RV32IFD-NEXT: addi a2, a4, -1
1405 ; RV32IFD-NEXT: .LBB21_4:
1406 ; RV32IFD-NEXT: feq.d a1, fs0, fs0
1407 ; RV32IFD-NEXT: neg a4, a1
1408 ; RV32IFD-NEXT: and a1, a4, a2
1409 ; RV32IFD-NEXT: neg a2, a3
1410 ; RV32IFD-NEXT: neg a3, s0
1411 ; RV32IFD-NEXT: and a0, a3, a0
1412 ; RV32IFD-NEXT: or a0, a2, a0
1413 ; RV32IFD-NEXT: and a0, a4, a0
1414 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1415 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1416 ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
1417 ; RV32IFD-NEXT: addi sp, sp, 16
1420 ; RV64IFD-LABEL: test_rint_si64:
1422 ; RV64IFD-NEXT: fcvt.l.d a0, fa0
1423 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
1424 ; RV64IFD-NEXT: seqz a1, a1
1425 ; RV64IFD-NEXT: addi a1, a1, -1
1426 ; RV64IFD-NEXT: and a0, a1, a0
1429 ; RV32IZFINXZDINX-LABEL: test_rint_si64:
1430 ; RV32IZFINXZDINX: # %bb.0:
1431 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
1432 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1433 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1434 ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
1435 ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
1436 ; RV32IZFINXZDINX-NEXT: call rint
1437 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1438 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1439 ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
1440 ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp)
1441 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0)
1442 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
1443 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
1444 ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
1445 ; RV32IZFINXZDINX-NEXT: call __fixdfdi
1446 ; RV32IZFINXZDINX-NEXT: lui a4, 524288
1447 ; RV32IZFINXZDINX-NEXT: lui a2, 524288
1448 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB21_2
1449 ; RV32IZFINXZDINX-NEXT: # %bb.1:
1450 ; RV32IZFINXZDINX-NEXT: mv a2, a1
1451 ; RV32IZFINXZDINX-NEXT: .LBB21_2:
1452 ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI21_1)
1453 ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI21_1)(a1)
1454 ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI21_1+4)(a1)
1455 ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2
1456 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB21_4
1457 ; RV32IZFINXZDINX-NEXT: # %bb.3:
1458 ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1
1459 ; RV32IZFINXZDINX-NEXT: .LBB21_4:
1460 ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2
1461 ; RV32IZFINXZDINX-NEXT: neg a4, a1
1462 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2
1463 ; RV32IZFINXZDINX-NEXT: neg a2, s0
1464 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0
1465 ; RV32IZFINXZDINX-NEXT: neg a2, a3
1466 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
1467 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0
1468 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1469 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1470 ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
1471 ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
1472 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
1473 ; RV32IZFINXZDINX-NEXT: ret
1475 ; RV64IZFINXZDINX-LABEL: test_rint_si64:
1476 ; RV64IZFINXZDINX: # %bb.0:
1477 ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0
1478 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1479 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1480 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1481 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1482 ; RV64IZFINXZDINX-NEXT: ret
1483 %a = call double @llvm.rint.f64(double %x)
1484 %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
1488 define signext i32 @test_rint_ui32(double %x) {
1489 ; CHECKIFD-LABEL: test_rint_ui32:
1490 ; CHECKIFD: # %bb.0:
1491 ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0
1492 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0
1493 ; CHECKIFD-NEXT: seqz a1, a1
1494 ; CHECKIFD-NEXT: addi a1, a1, -1
1495 ; CHECKIFD-NEXT: and a0, a1, a0
1496 ; CHECKIFD-NEXT: ret
1498 ; RV32IZFINXZDINX-LABEL: test_rint_ui32:
1499 ; RV32IZFINXZDINX: # %bb.0:
1500 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
1501 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
1502 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1503 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1504 ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
1505 ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
1506 ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0
1507 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0
1508 ; RV32IZFINXZDINX-NEXT: seqz a0, a0
1509 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1
1510 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2
1511 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
1512 ; RV32IZFINXZDINX-NEXT: ret
1514 ; RV64IZFINXZDINX-LABEL: test_rint_ui32:
1515 ; RV64IZFINXZDINX: # %bb.0:
1516 ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0
1517 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1518 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1519 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1520 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1521 ; RV64IZFINXZDINX-NEXT: ret
1522 %a = call double @llvm.rint.f64(double %x)
1523 %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
1527 define i64 @test_rint_ui64(double %x) nounwind {
1528 ; RV32IFD-LABEL: test_rint_ui64:
1530 ; RV32IFD-NEXT: addi sp, sp, -16
1531 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1532 ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1533 ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1534 ; RV32IFD-NEXT: call rint
1535 ; RV32IFD-NEXT: lui a0, %hi(.LCPI23_0)
1536 ; RV32IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0)
1537 ; RV32IFD-NEXT: flt.d a0, fa5, fa0
1538 ; RV32IFD-NEXT: neg s0, a0
1539 ; RV32IFD-NEXT: fcvt.d.w fa5, zero
1540 ; RV32IFD-NEXT: fle.d a0, fa5, fa0
1541 ; RV32IFD-NEXT: neg s1, a0
1542 ; RV32IFD-NEXT: call __fixunsdfdi
1543 ; RV32IFD-NEXT: and a0, s1, a0
1544 ; RV32IFD-NEXT: or a0, s0, a0
1545 ; RV32IFD-NEXT: and a1, s1, a1
1546 ; RV32IFD-NEXT: or a1, s0, a1
1547 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1548 ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1549 ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1550 ; RV32IFD-NEXT: addi sp, sp, 16
1553 ; RV64IFD-LABEL: test_rint_ui64:
1555 ; RV64IFD-NEXT: fcvt.lu.d a0, fa0
1556 ; RV64IFD-NEXT: feq.d a1, fa0, fa0
1557 ; RV64IFD-NEXT: seqz a1, a1
1558 ; RV64IFD-NEXT: addi a1, a1, -1
1559 ; RV64IFD-NEXT: and a0, a1, a0
1562 ; RV32IZFINXZDINX-LABEL: test_rint_ui64:
1563 ; RV32IZFINXZDINX: # %bb.0:
1564 ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32
1565 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
1566 ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
1567 ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
1568 ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
1569 ; RV32IZFINXZDINX-NEXT: call rint
1570 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
1571 ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
1572 ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
1573 ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
1574 ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
1575 ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
1576 ; RV32IZFINXZDINX-NEXT: neg s2, a2
1577 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
1578 ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0)
1579 ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2)
1580 ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2)
1581 ; RV32IZFINXZDINX-NEXT: and a0, s2, a0
1582 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0
1583 ; RV32IZFINXZDINX-NEXT: neg a2, a2
1584 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0
1585 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1
1586 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1
1587 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
1588 ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
1589 ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
1590 ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
1591 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
1592 ; RV32IZFINXZDINX-NEXT: ret
1594 ; RV64IZFINXZDINX-LABEL: test_rint_ui64:
1595 ; RV64IZFINXZDINX: # %bb.0:
1596 ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0
1597 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
1598 ; RV64IZFINXZDINX-NEXT: seqz a0, a0
1599 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1
1600 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1
1601 ; RV64IZFINXZDINX-NEXT: ret
1602 %a = call double @llvm.rint.f64(double %x)
1603 %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
1607 declare double @llvm.floor.f64(double)
1608 declare double @llvm.ceil.f64(double)
1609 declare double @llvm.trunc.f64(double)
1610 declare double @llvm.round.f64(double)
1611 declare double @llvm.roundeven.f64(double)
1612 declare double @llvm.rint.f64(double)
1613 declare i32 @llvm.fptosi.sat.i32.f64(double)
1614 declare i64 @llvm.fptosi.sat.i64.f64(double)
1615 declare i32 @llvm.fptoui.sat.i32.f64(double)
1616 declare i64 @llvm.fptoui.sat.i64.f64(double)