1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R32
3 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R64
5 define float @maxnum_f32(float %x, float %y) nounwind {
6 ; R32-LABEL: maxnum_f32:
8 ; R32-NEXT: addi sp, sp, -16
9 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
10 ; R32-NEXT: call fmaxf@plt
11 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
12 ; R32-NEXT: addi sp, sp, 16
15 ; R64-LABEL: maxnum_f32:
17 ; R64-NEXT: addi sp, sp, -16
18 ; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
19 ; R64-NEXT: call fmaxf@plt
20 ; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
21 ; R64-NEXT: addi sp, sp, 16
23 %r = call float @llvm.maxnum.f32(float %x, float %y)
27 define float @maxnum_f32_fast(float %x, float %y) nounwind {
28 ; R32-LABEL: maxnum_f32_fast:
30 ; R32-NEXT: addi sp, sp, -16
31 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
32 ; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
33 ; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
36 ; R32-NEXT: call __gtsf2@plt
37 ; R32-NEXT: bgtz a0, .LBB1_2
42 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
43 ; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
44 ; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
45 ; R32-NEXT: addi sp, sp, 16
48 ; R64-LABEL: maxnum_f32_fast:
50 ; R64-NEXT: addi sp, sp, -32
51 ; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
52 ; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
53 ; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
56 ; R64-NEXT: call __gtsf2@plt
57 ; R64-NEXT: bgtz a0, .LBB1_2
62 ; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
63 ; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
64 ; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
65 ; R64-NEXT: addi sp, sp, 32
67 %r = call fast float @llvm.maxnum.f32(float %x, float %y)
71 define double @maxnum_f64(double %x, double %y) nounwind {
72 ; R32-LABEL: maxnum_f64:
74 ; R32-NEXT: addi sp, sp, -16
75 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
76 ; R32-NEXT: call fmax@plt
77 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
78 ; R32-NEXT: addi sp, sp, 16
81 ; R64-LABEL: maxnum_f64:
83 ; R64-NEXT: addi sp, sp, -16
84 ; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
85 ; R64-NEXT: call fmax@plt
86 ; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
87 ; R64-NEXT: addi sp, sp, 16
89 %r = call double @llvm.maxnum.f64(double %x, double %y)
93 define double @maxnum_f64_nnan(double %x, double %y) nounwind {
94 ; R32-LABEL: maxnum_f64_nnan:
96 ; R32-NEXT: addi sp, sp, -32
97 ; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
98 ; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
99 ; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
100 ; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
101 ; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
102 ; R32-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
103 ; R32-NEXT: mv s1, a3
104 ; R32-NEXT: mv s2, a2
105 ; R32-NEXT: mv s0, a1
106 ; R32-NEXT: mv s4, a0
107 ; R32-NEXT: call __gtdf2@plt
108 ; R32-NEXT: mv s3, s4
109 ; R32-NEXT: bgtz a0, .LBB3_2
111 ; R32-NEXT: mv s3, s2
113 ; R32-NEXT: mv a0, s4
114 ; R32-NEXT: mv a1, s0
115 ; R32-NEXT: mv a2, s2
116 ; R32-NEXT: mv a3, s1
117 ; R32-NEXT: call __gtdf2@plt
118 ; R32-NEXT: bgtz a0, .LBB3_4
120 ; R32-NEXT: mv s0, s1
122 ; R32-NEXT: mv a0, s3
123 ; R32-NEXT: mv a1, s0
124 ; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
125 ; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
126 ; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
127 ; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
128 ; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
129 ; R32-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
130 ; R32-NEXT: addi sp, sp, 32
133 ; R64-LABEL: maxnum_f64_nnan:
135 ; R64-NEXT: addi sp, sp, -32
136 ; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
137 ; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
138 ; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
139 ; R64-NEXT: mv s1, a1
140 ; R64-NEXT: mv s0, a0
141 ; R64-NEXT: call __gtdf2@plt
142 ; R64-NEXT: bgtz a0, .LBB3_2
144 ; R64-NEXT: mv s0, s1
146 ; R64-NEXT: mv a0, s0
147 ; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
148 ; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
149 ; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
150 ; R64-NEXT: addi sp, sp, 32
152 %r = call nnan double @llvm.maxnum.f64(double %x, double %y)
156 define float @minnum_f32(float %x, float %y) nounwind {
157 ; R32-LABEL: minnum_f32:
159 ; R32-NEXT: addi sp, sp, -16
160 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
161 ; R32-NEXT: call fminf@plt
162 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
163 ; R32-NEXT: addi sp, sp, 16
166 ; R64-LABEL: minnum_f32:
168 ; R64-NEXT: addi sp, sp, -16
169 ; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
170 ; R64-NEXT: call fminf@plt
171 ; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
172 ; R64-NEXT: addi sp, sp, 16
174 %r = call float @llvm.minnum.f32(float %x, float %y)
178 define float @minnum_f32_nnan(float %x, float %y) nounwind {
179 ; R32-LABEL: minnum_f32_nnan:
181 ; R32-NEXT: addi sp, sp, -16
182 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
183 ; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
184 ; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
185 ; R32-NEXT: mv s1, a1
186 ; R32-NEXT: mv s0, a0
187 ; R32-NEXT: call __ltsf2@plt
188 ; R32-NEXT: bltz a0, .LBB5_2
190 ; R32-NEXT: mv s0, s1
192 ; R32-NEXT: mv a0, s0
193 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
194 ; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
195 ; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
196 ; R32-NEXT: addi sp, sp, 16
199 ; R64-LABEL: minnum_f32_nnan:
201 ; R64-NEXT: addi sp, sp, -32
202 ; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
203 ; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
204 ; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
205 ; R64-NEXT: mv s1, a1
206 ; R64-NEXT: mv s0, a0
207 ; R64-NEXT: call __ltsf2@plt
208 ; R64-NEXT: bltz a0, .LBB5_2
210 ; R64-NEXT: mv s0, s1
212 ; R64-NEXT: mv a0, s0
213 ; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
214 ; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
215 ; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
216 ; R64-NEXT: addi sp, sp, 32
218 %r = call nnan float @llvm.minnum.f32(float %x, float %y)
222 define double @minnum_f64(double %x, double %y) nounwind {
223 ; R32-LABEL: minnum_f64:
225 ; R32-NEXT: addi sp, sp, -16
226 ; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
227 ; R32-NEXT: call fmin@plt
228 ; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
229 ; R32-NEXT: addi sp, sp, 16
232 ; R64-LABEL: minnum_f64:
234 ; R64-NEXT: addi sp, sp, -16
235 ; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
236 ; R64-NEXT: call fmin@plt
237 ; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
238 ; R64-NEXT: addi sp, sp, 16
240 %r = call double @llvm.minnum.f64(double %x, double %y)
244 define double @minnum_f64_fast(double %x, double %y) nounwind {
245 ; R32-LABEL: minnum_f64_fast:
247 ; R32-NEXT: addi sp, sp, -32
248 ; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
249 ; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
250 ; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
251 ; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
252 ; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
253 ; R32-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
254 ; R32-NEXT: mv s1, a3
255 ; R32-NEXT: mv s2, a2
256 ; R32-NEXT: mv s0, a1
257 ; R32-NEXT: mv s4, a0
258 ; R32-NEXT: call __ltdf2@plt
259 ; R32-NEXT: mv s3, s4
260 ; R32-NEXT: bltz a0, .LBB7_2
262 ; R32-NEXT: mv s3, s2
264 ; R32-NEXT: mv a0, s4
265 ; R32-NEXT: mv a1, s0
266 ; R32-NEXT: mv a2, s2
267 ; R32-NEXT: mv a3, s1
268 ; R32-NEXT: call __ltdf2@plt
269 ; R32-NEXT: bltz a0, .LBB7_4
271 ; R32-NEXT: mv s0, s1
273 ; R32-NEXT: mv a0, s3
274 ; R32-NEXT: mv a1, s0
275 ; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
276 ; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
277 ; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
278 ; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
279 ; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
280 ; R32-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
281 ; R32-NEXT: addi sp, sp, 32
284 ; R64-LABEL: minnum_f64_fast:
286 ; R64-NEXT: addi sp, sp, -32
287 ; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
288 ; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
289 ; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
290 ; R64-NEXT: mv s1, a1
291 ; R64-NEXT: mv s0, a0
292 ; R64-NEXT: call __ltdf2@plt
293 ; R64-NEXT: bltz a0, .LBB7_2
295 ; R64-NEXT: mv s0, s1
297 ; R64-NEXT: mv a0, s0
298 ; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
299 ; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
300 ; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
301 ; R64-NEXT: addi sp, sp, 32
303 %r = call fast double @llvm.minnum.f64(double %x, double %y)
307 declare float @llvm.maxnum.f32(float, float)
308 declare double @llvm.maxnum.f64(double, double)
309 declare float @llvm.minnum.f32(float, float)
310 declare double @llvm.minnum.f64(double, double)