1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3 ; RUN: | FileCheck -check-prefix=RV32F %s
4 ; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \
5 ; RUN: | FileCheck -check-prefix=RV32FD %s
6 ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
7 ; RUN: | FileCheck -check-prefix=RV64F %s
8 ; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \
9 ; RUN: | FileCheck -check-prefix=RV64FD %s
11 ; These functions perform extra work to ensure that `%a3` starts in a
12 ; floating-point register, if the machine has them, and the result of
13 ; the bitwise operation is then needed in a floating-point register.
14 ; This should mean the optimisations will fire even if you're using the
15 ; soft-float ABI on a machine with hardware floating-point support.
17 define float @bitcast_and(float %a1, float %a2) nounwind {
18 ; RV32F-LABEL: bitcast_and:
20 ; RV32F-NEXT: fmv.w.x ft0, a1
21 ; RV32F-NEXT: fmv.w.x ft1, a0
22 ; RV32F-NEXT: fadd.s ft0, ft1, ft0
23 ; RV32F-NEXT: fabs.s ft0, ft0
24 ; RV32F-NEXT: fadd.s ft0, ft1, ft0
25 ; RV32F-NEXT: fmv.x.w a0, ft0
28 ; RV32FD-LABEL: bitcast_and:
30 ; RV32FD-NEXT: fmv.w.x ft0, a1
31 ; RV32FD-NEXT: fmv.w.x ft1, a0
32 ; RV32FD-NEXT: fadd.s ft0, ft1, ft0
33 ; RV32FD-NEXT: fabs.s ft0, ft0
34 ; RV32FD-NEXT: fadd.s ft0, ft1, ft0
35 ; RV32FD-NEXT: fmv.x.w a0, ft0
38 ; RV64F-LABEL: bitcast_and:
40 ; RV64F-NEXT: fmv.w.x ft0, a1
41 ; RV64F-NEXT: fmv.w.x ft1, a0
42 ; RV64F-NEXT: fadd.s ft0, ft1, ft0
43 ; RV64F-NEXT: fabs.s ft0, ft0
44 ; RV64F-NEXT: fadd.s ft0, ft1, ft0
45 ; RV64F-NEXT: fmv.x.w a0, ft0
48 ; RV64FD-LABEL: bitcast_and:
50 ; RV64FD-NEXT: fmv.w.x ft0, a1
51 ; RV64FD-NEXT: fmv.w.x ft1, a0
52 ; RV64FD-NEXT: fadd.s ft0, ft1, ft0
53 ; RV64FD-NEXT: fabs.s ft0, ft0
54 ; RV64FD-NEXT: fadd.s ft0, ft1, ft0
55 ; RV64FD-NEXT: fmv.x.w a0, ft0
57 %a3 = fadd float %a1, %a2
58 %bc1 = bitcast float %a3 to i32
59 %and = and i32 %bc1, 2147483647
60 %bc2 = bitcast i32 %and to float
61 %a4 = fadd float %a1, %bc2
65 define double @bitcast_double_and(double %a1, double %a2) nounwind {
66 ; RV32F-LABEL: bitcast_double_and:
68 ; RV32F-NEXT: addi sp, sp, -16
69 ; RV32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
70 ; RV32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
71 ; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
72 ; RV32F-NEXT: mv s0, a1
73 ; RV32F-NEXT: mv s1, a0
74 ; RV32F-NEXT: call __adddf3@plt
75 ; RV32F-NEXT: mv a2, a0
76 ; RV32F-NEXT: lui a0, 524288
77 ; RV32F-NEXT: addi a0, a0, -1
78 ; RV32F-NEXT: and a3, a1, a0
79 ; RV32F-NEXT: mv a0, s1
80 ; RV32F-NEXT: mv a1, s0
81 ; RV32F-NEXT: call __adddf3@plt
82 ; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
83 ; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
84 ; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
85 ; RV32F-NEXT: addi sp, sp, 16
88 ; RV32FD-LABEL: bitcast_double_and:
90 ; RV32FD-NEXT: addi sp, sp, -16
91 ; RV32FD-NEXT: sw a2, 8(sp)
92 ; RV32FD-NEXT: sw a3, 12(sp)
93 ; RV32FD-NEXT: fld ft0, 8(sp)
94 ; RV32FD-NEXT: sw a0, 8(sp)
95 ; RV32FD-NEXT: sw a1, 12(sp)
96 ; RV32FD-NEXT: fld ft1, 8(sp)
97 ; RV32FD-NEXT: fadd.d ft0, ft1, ft0
98 ; RV32FD-NEXT: fabs.d ft0, ft0
99 ; RV32FD-NEXT: fadd.d ft0, ft1, ft0
100 ; RV32FD-NEXT: fsd ft0, 8(sp)
101 ; RV32FD-NEXT: lw a0, 8(sp)
102 ; RV32FD-NEXT: lw a1, 12(sp)
103 ; RV32FD-NEXT: addi sp, sp, 16
106 ; RV64F-LABEL: bitcast_double_and:
108 ; RV64F-NEXT: addi sp, sp, -16
109 ; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
110 ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
111 ; RV64F-NEXT: mv s0, a0
112 ; RV64F-NEXT: call __adddf3@plt
113 ; RV64F-NEXT: addi a1, zero, -1
114 ; RV64F-NEXT: srli a1, a1, 1
115 ; RV64F-NEXT: and a1, a0, a1
116 ; RV64F-NEXT: mv a0, s0
117 ; RV64F-NEXT: call __adddf3@plt
118 ; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
119 ; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
120 ; RV64F-NEXT: addi sp, sp, 16
123 ; RV64FD-LABEL: bitcast_double_and:
125 ; RV64FD-NEXT: fmv.d.x ft0, a1
126 ; RV64FD-NEXT: fmv.d.x ft1, a0
127 ; RV64FD-NEXT: fadd.d ft0, ft1, ft0
128 ; RV64FD-NEXT: fabs.d ft0, ft0
129 ; RV64FD-NEXT: fadd.d ft0, ft1, ft0
130 ; RV64FD-NEXT: fmv.x.d a0, ft0
132 %a3 = fadd double %a1, %a2
133 %bc1 = bitcast double %a3 to i64
134 %and = and i64 %bc1, 9223372036854775807
135 %bc2 = bitcast i64 %and to double
136 %a4 = fadd double %a1, %bc2
141 define float @bitcast_xor(float %a1, float %a2) nounwind {
142 ; RV32F-LABEL: bitcast_xor:
144 ; RV32F-NEXT: fmv.w.x ft0, a1
145 ; RV32F-NEXT: fmv.w.x ft1, a0
146 ; RV32F-NEXT: fmul.s ft0, ft1, ft0
147 ; RV32F-NEXT: fneg.s ft0, ft0
148 ; RV32F-NEXT: fmul.s ft0, ft1, ft0
149 ; RV32F-NEXT: fmv.x.w a0, ft0
152 ; RV32FD-LABEL: bitcast_xor:
154 ; RV32FD-NEXT: fmv.w.x ft0, a1
155 ; RV32FD-NEXT: fmv.w.x ft1, a0
156 ; RV32FD-NEXT: fmul.s ft0, ft1, ft0
157 ; RV32FD-NEXT: fneg.s ft0, ft0
158 ; RV32FD-NEXT: fmul.s ft0, ft1, ft0
159 ; RV32FD-NEXT: fmv.x.w a0, ft0
162 ; RV64F-LABEL: bitcast_xor:
164 ; RV64F-NEXT: fmv.w.x ft0, a1
165 ; RV64F-NEXT: fmv.w.x ft1, a0
166 ; RV64F-NEXT: fmul.s ft0, ft1, ft0
167 ; RV64F-NEXT: fneg.s ft0, ft0
168 ; RV64F-NEXT: fmul.s ft0, ft1, ft0
169 ; RV64F-NEXT: fmv.x.w a0, ft0
172 ; RV64FD-LABEL: bitcast_xor:
174 ; RV64FD-NEXT: fmv.w.x ft0, a1
175 ; RV64FD-NEXT: fmv.w.x ft1, a0
176 ; RV64FD-NEXT: fmul.s ft0, ft1, ft0
177 ; RV64FD-NEXT: fneg.s ft0, ft0
178 ; RV64FD-NEXT: fmul.s ft0, ft1, ft0
179 ; RV64FD-NEXT: fmv.x.w a0, ft0
181 %a3 = fmul float %a1, %a2
182 %bc1 = bitcast float %a3 to i32
183 %and = xor i32 %bc1, 2147483648
184 %bc2 = bitcast i32 %and to float
185 %a4 = fmul float %a1, %bc2
189 define double @bitcast_double_xor(double %a1, double %a2) nounwind {
190 ; RV32F-LABEL: bitcast_double_xor:
192 ; RV32F-NEXT: addi sp, sp, -16
193 ; RV32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
194 ; RV32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
195 ; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
196 ; RV32F-NEXT: mv s0, a1
197 ; RV32F-NEXT: mv s1, a0
198 ; RV32F-NEXT: call __muldf3@plt
199 ; RV32F-NEXT: mv a2, a0
200 ; RV32F-NEXT: lui a0, 524288
201 ; RV32F-NEXT: xor a3, a1, a0
202 ; RV32F-NEXT: mv a0, s1
203 ; RV32F-NEXT: mv a1, s0
204 ; RV32F-NEXT: call __muldf3@plt
205 ; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
206 ; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
207 ; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
208 ; RV32F-NEXT: addi sp, sp, 16
211 ; RV32FD-LABEL: bitcast_double_xor:
213 ; RV32FD-NEXT: addi sp, sp, -16
214 ; RV32FD-NEXT: sw a2, 8(sp)
215 ; RV32FD-NEXT: sw a3, 12(sp)
216 ; RV32FD-NEXT: fld ft0, 8(sp)
217 ; RV32FD-NEXT: sw a0, 8(sp)
218 ; RV32FD-NEXT: sw a1, 12(sp)
219 ; RV32FD-NEXT: fld ft1, 8(sp)
220 ; RV32FD-NEXT: fmul.d ft0, ft1, ft0
221 ; RV32FD-NEXT: fneg.d ft0, ft0
222 ; RV32FD-NEXT: fmul.d ft0, ft1, ft0
223 ; RV32FD-NEXT: fsd ft0, 8(sp)
224 ; RV32FD-NEXT: lw a0, 8(sp)
225 ; RV32FD-NEXT: lw a1, 12(sp)
226 ; RV32FD-NEXT: addi sp, sp, 16
229 ; RV64F-LABEL: bitcast_double_xor:
231 ; RV64F-NEXT: addi sp, sp, -16
232 ; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
233 ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
234 ; RV64F-NEXT: mv s0, a0
235 ; RV64F-NEXT: call __muldf3@plt
236 ; RV64F-NEXT: addi a1, zero, -1
237 ; RV64F-NEXT: slli a1, a1, 63
238 ; RV64F-NEXT: xor a1, a0, a1
239 ; RV64F-NEXT: mv a0, s0
240 ; RV64F-NEXT: call __muldf3@plt
241 ; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
242 ; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
243 ; RV64F-NEXT: addi sp, sp, 16
246 ; RV64FD-LABEL: bitcast_double_xor:
248 ; RV64FD-NEXT: fmv.d.x ft0, a1
249 ; RV64FD-NEXT: fmv.d.x ft1, a0
250 ; RV64FD-NEXT: fmul.d ft0, ft1, ft0
251 ; RV64FD-NEXT: fneg.d ft0, ft0
252 ; RV64FD-NEXT: fmul.d ft0, ft1, ft0
253 ; RV64FD-NEXT: fmv.x.d a0, ft0
255 %a3 = fmul double %a1, %a2
256 %bc1 = bitcast double %a3 to i64
257 %and = xor i64 %bc1, 9223372036854775808
258 %bc2 = bitcast i64 %and to double
259 %a4 = fmul double %a1, %bc2
263 define float @bitcast_or(float %a1, float %a2) nounwind {
264 ; RV32F-LABEL: bitcast_or:
266 ; RV32F-NEXT: fmv.w.x ft0, a1
267 ; RV32F-NEXT: fmv.w.x ft1, a0
268 ; RV32F-NEXT: fmul.s ft0, ft1, ft0
269 ; RV32F-NEXT: fabs.s ft0, ft0
270 ; RV32F-NEXT: fneg.s ft0, ft0
271 ; RV32F-NEXT: fmul.s ft0, ft1, ft0
272 ; RV32F-NEXT: fmv.x.w a0, ft0
275 ; RV32FD-LABEL: bitcast_or:
277 ; RV32FD-NEXT: fmv.w.x ft0, a1
278 ; RV32FD-NEXT: fmv.w.x ft1, a0
279 ; RV32FD-NEXT: fmul.s ft0, ft1, ft0
280 ; RV32FD-NEXT: fabs.s ft0, ft0
281 ; RV32FD-NEXT: fneg.s ft0, ft0
282 ; RV32FD-NEXT: fmul.s ft0, ft1, ft0
283 ; RV32FD-NEXT: fmv.x.w a0, ft0
286 ; RV64F-LABEL: bitcast_or:
288 ; RV64F-NEXT: fmv.w.x ft0, a1
289 ; RV64F-NEXT: fmv.w.x ft1, a0
290 ; RV64F-NEXT: fmul.s ft0, ft1, ft0
291 ; RV64F-NEXT: fabs.s ft0, ft0
292 ; RV64F-NEXT: fneg.s ft0, ft0
293 ; RV64F-NEXT: fmul.s ft0, ft1, ft0
294 ; RV64F-NEXT: fmv.x.w a0, ft0
297 ; RV64FD-LABEL: bitcast_or:
299 ; RV64FD-NEXT: fmv.w.x ft0, a1
300 ; RV64FD-NEXT: fmv.w.x ft1, a0
301 ; RV64FD-NEXT: fmul.s ft0, ft1, ft0
302 ; RV64FD-NEXT: fabs.s ft0, ft0
303 ; RV64FD-NEXT: fneg.s ft0, ft0
304 ; RV64FD-NEXT: fmul.s ft0, ft1, ft0
305 ; RV64FD-NEXT: fmv.x.w a0, ft0
307 %a3 = fmul float %a1, %a2
308 %bc1 = bitcast float %a3 to i32
309 %and = or i32 %bc1, 2147483648
310 %bc2 = bitcast i32 %and to float
311 %a4 = fmul float %a1, %bc2
315 define double @bitcast_double_or(double %a1, double %a2) nounwind {
316 ; RV32F-LABEL: bitcast_double_or:
318 ; RV32F-NEXT: addi sp, sp, -16
319 ; RV32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
320 ; RV32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
321 ; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
322 ; RV32F-NEXT: mv s0, a1
323 ; RV32F-NEXT: mv s1, a0
324 ; RV32F-NEXT: call __muldf3@plt
325 ; RV32F-NEXT: mv a2, a0
326 ; RV32F-NEXT: lui a0, 524288
327 ; RV32F-NEXT: or a3, a1, a0
328 ; RV32F-NEXT: mv a0, s1
329 ; RV32F-NEXT: mv a1, s0
330 ; RV32F-NEXT: call __muldf3@plt
331 ; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
332 ; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
333 ; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
334 ; RV32F-NEXT: addi sp, sp, 16
337 ; RV32FD-LABEL: bitcast_double_or:
339 ; RV32FD-NEXT: addi sp, sp, -16
340 ; RV32FD-NEXT: sw a2, 8(sp)
341 ; RV32FD-NEXT: sw a3, 12(sp)
342 ; RV32FD-NEXT: fld ft0, 8(sp)
343 ; RV32FD-NEXT: sw a0, 8(sp)
344 ; RV32FD-NEXT: sw a1, 12(sp)
345 ; RV32FD-NEXT: fld ft1, 8(sp)
346 ; RV32FD-NEXT: fmul.d ft0, ft1, ft0
347 ; RV32FD-NEXT: fabs.d ft0, ft0
348 ; RV32FD-NEXT: fneg.d ft0, ft0
349 ; RV32FD-NEXT: fmul.d ft0, ft1, ft0
350 ; RV32FD-NEXT: fsd ft0, 8(sp)
351 ; RV32FD-NEXT: lw a0, 8(sp)
352 ; RV32FD-NEXT: lw a1, 12(sp)
353 ; RV32FD-NEXT: addi sp, sp, 16
356 ; RV64F-LABEL: bitcast_double_or:
358 ; RV64F-NEXT: addi sp, sp, -16
359 ; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
360 ; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
361 ; RV64F-NEXT: mv s0, a0
362 ; RV64F-NEXT: call __muldf3@plt
363 ; RV64F-NEXT: addi a1, zero, -1
364 ; RV64F-NEXT: slli a1, a1, 63
365 ; RV64F-NEXT: or a1, a0, a1
366 ; RV64F-NEXT: mv a0, s0
367 ; RV64F-NEXT: call __muldf3@plt
368 ; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
369 ; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
370 ; RV64F-NEXT: addi sp, sp, 16
373 ; RV64FD-LABEL: bitcast_double_or:
375 ; RV64FD-NEXT: fmv.d.x ft0, a1
376 ; RV64FD-NEXT: fmv.d.x ft1, a0
377 ; RV64FD-NEXT: fmul.d ft0, ft1, ft0
378 ; RV64FD-NEXT: fabs.d ft0, ft0
379 ; RV64FD-NEXT: fneg.d ft0, ft0
380 ; RV64FD-NEXT: fmul.d ft0, ft1, ft0
381 ; RV64FD-NEXT: fmv.x.d a0, ft0
383 %a3 = fmul double %a1, %a2
384 %bc1 = bitcast double %a3 to i64
385 %and = or i64 %bc1, 9223372036854775808
386 %bc2 = bitcast i64 %and to double
387 %a4 = fmul double %a1, %bc2