1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
3 ; RUN: < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s \
4 ; RUN: --check-prefixes=CHECK,P8
5 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
6 ; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s \
7 ; RUN: --check-prefixes=CHECK,P9
8 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
9 ; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
10 ; RUN: FileCheck %s -check-prefix=NOVSX
11 ; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
12 ; RUN: -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
14 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
15 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
16 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
17 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
19 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
20 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
21 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
22 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
24 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
25 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
26 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
27 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
29 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
30 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
31 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
32 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
34 define i32 @d_to_i32(double %m) #0 {
35 ; CHECK-LABEL: d_to_i32:
36 ; CHECK: # %bb.0: # %entry
37 ; CHECK-NEXT: xscvdpsxws f0, f1
38 ; CHECK-NEXT: mffprwz r3, f0
41 ; NOVSX-LABEL: d_to_i32:
42 ; NOVSX: # %bb.0: # %entry
43 ; NOVSX-NEXT: fctiwz f0, f1
44 ; NOVSX-NEXT: addi r3, r1, -4
45 ; NOVSX-NEXT: stfiwx f0, 0, r3
46 ; NOVSX-NEXT: lwz r3, -4(r1)
49 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
53 define i64 @d_to_i64(double %m) #0 {
54 ; CHECK-LABEL: d_to_i64:
55 ; CHECK: # %bb.0: # %entry
56 ; CHECK-NEXT: xscvdpsxds f0, f1
57 ; CHECK-NEXT: mffprd r3, f0
60 ; NOVSX-LABEL: d_to_i64:
61 ; NOVSX: # %bb.0: # %entry
62 ; NOVSX-NEXT: fctidz f0, f1
63 ; NOVSX-NEXT: stfd f0, -8(r1)
64 ; NOVSX-NEXT: ld r3, -8(r1)
67 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
71 define i64 @d_to_u64(double %m) #0 {
72 ; CHECK-LABEL: d_to_u64:
73 ; CHECK: # %bb.0: # %entry
74 ; CHECK-NEXT: xscvdpuxds f0, f1
75 ; CHECK-NEXT: mffprd r3, f0
78 ; NOVSX-LABEL: d_to_u64:
79 ; NOVSX: # %bb.0: # %entry
80 ; NOVSX-NEXT: fctiduz f0, f1
81 ; NOVSX-NEXT: stfd f0, -8(r1)
82 ; NOVSX-NEXT: ld r3, -8(r1)
85 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
89 define zeroext i32 @d_to_u32(double %m) #0 {
90 ; CHECK-LABEL: d_to_u32:
91 ; CHECK: # %bb.0: # %entry
92 ; CHECK-NEXT: xscvdpuxws f0, f1
93 ; CHECK-NEXT: mffprwz r3, f0
96 ; NOVSX-LABEL: d_to_u32:
97 ; NOVSX: # %bb.0: # %entry
98 ; NOVSX-NEXT: fctiwuz f0, f1
99 ; NOVSX-NEXT: addi r3, r1, -4
100 ; NOVSX-NEXT: stfiwx f0, 0, r3
101 ; NOVSX-NEXT: lwz r3, -4(r1)
104 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
108 define signext i32 @f_to_i32(float %m) #0 {
109 ; CHECK-LABEL: f_to_i32:
110 ; CHECK: # %bb.0: # %entry
111 ; CHECK-NEXT: xscvdpsxws f0, f1
112 ; CHECK-NEXT: mffprwz r3, f0
113 ; CHECK-NEXT: extsw r3, r3
116 ; NOVSX-LABEL: f_to_i32:
117 ; NOVSX: # %bb.0: # %entry
118 ; NOVSX-NEXT: fctiwz f0, f1
119 ; NOVSX-NEXT: addi r3, r1, -4
120 ; NOVSX-NEXT: stfiwx f0, 0, r3
121 ; NOVSX-NEXT: lwa r3, -4(r1)
124 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
128 define i64 @f_to_i64(float %m) #0 {
129 ; CHECK-LABEL: f_to_i64:
130 ; CHECK: # %bb.0: # %entry
131 ; CHECK-NEXT: xscvdpsxds f0, f1
132 ; CHECK-NEXT: mffprd r3, f0
135 ; NOVSX-LABEL: f_to_i64:
136 ; NOVSX: # %bb.0: # %entry
137 ; NOVSX-NEXT: fctidz f0, f1
138 ; NOVSX-NEXT: stfd f0, -8(r1)
139 ; NOVSX-NEXT: ld r3, -8(r1)
142 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
146 define i64 @f_to_u64(float %m) #0 {
147 ; CHECK-LABEL: f_to_u64:
148 ; CHECK: # %bb.0: # %entry
149 ; CHECK-NEXT: xscvdpuxds f0, f1
150 ; CHECK-NEXT: mffprd r3, f0
153 ; NOVSX-LABEL: f_to_u64:
154 ; NOVSX: # %bb.0: # %entry
155 ; NOVSX-NEXT: fctiduz f0, f1
156 ; NOVSX-NEXT: stfd f0, -8(r1)
157 ; NOVSX-NEXT: ld r3, -8(r1)
160 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
164 define zeroext i32 @f_to_u32(float %m) #0 {
165 ; CHECK-LABEL: f_to_u32:
166 ; CHECK: # %bb.0: # %entry
167 ; CHECK-NEXT: xscvdpuxws f0, f1
168 ; CHECK-NEXT: mffprwz r3, f0
171 ; NOVSX-LABEL: f_to_u32:
172 ; NOVSX: # %bb.0: # %entry
173 ; NOVSX-NEXT: fctiwuz f0, f1
174 ; NOVSX-NEXT: addi r3, r1, -4
175 ; NOVSX-NEXT: stfiwx f0, 0, r3
176 ; NOVSX-NEXT: lwz r3, -4(r1)
179 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
183 define double @i32_to_d(i32 signext %m) #0 {
184 ; CHECK-LABEL: i32_to_d:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: mtfprwa f0, r3
187 ; CHECK-NEXT: xscvsxddp f1, f0
190 ; NOVSX-LABEL: i32_to_d:
191 ; NOVSX: # %bb.0: # %entry
192 ; NOVSX-NEXT: stw r3, -4(r1)
193 ; NOVSX-NEXT: addi r3, r1, -4
194 ; NOVSX-NEXT: lfiwax f0, 0, r3
195 ; NOVSX-NEXT: fcfid f1, f0
198 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
202 define double @i64_to_d(i64 %m) #0 {
203 ; CHECK-LABEL: i64_to_d:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: mtfprd f0, r3
206 ; CHECK-NEXT: xscvsxddp f1, f0
209 ; NOVSX-LABEL: i64_to_d:
210 ; NOVSX: # %bb.0: # %entry
211 ; NOVSX-NEXT: std r3, -8(r1)
212 ; NOVSX-NEXT: lfd f0, -8(r1)
213 ; NOVSX-NEXT: fcfid f1, f0
216 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
220 define double @u32_to_d(i32 zeroext %m) #0 {
221 ; CHECK-LABEL: u32_to_d:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: mtfprwz f0, r3
224 ; CHECK-NEXT: xscvuxddp f1, f0
227 ; NOVSX-LABEL: u32_to_d:
228 ; NOVSX: # %bb.0: # %entry
229 ; NOVSX-NEXT: stw r3, -4(r1)
230 ; NOVSX-NEXT: addi r3, r1, -4
231 ; NOVSX-NEXT: lfiwzx f0, 0, r3
232 ; NOVSX-NEXT: fcfidu f1, f0
235 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
239 define double @u64_to_d(i64 %m) #0 {
240 ; CHECK-LABEL: u64_to_d:
241 ; CHECK: # %bb.0: # %entry
242 ; CHECK-NEXT: mtfprd f0, r3
243 ; CHECK-NEXT: xscvuxddp f1, f0
246 ; NOVSX-LABEL: u64_to_d:
247 ; NOVSX: # %bb.0: # %entry
248 ; NOVSX-NEXT: std r3, -8(r1)
249 ; NOVSX-NEXT: lfd f0, -8(r1)
250 ; NOVSX-NEXT: fcfidu f1, f0
253 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
257 define float @i32_to_f(i32 signext %m) #0 {
258 ; CHECK-LABEL: i32_to_f:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: mtfprwa f0, r3
261 ; CHECK-NEXT: xscvsxdsp f1, f0
264 ; NOVSX-LABEL: i32_to_f:
265 ; NOVSX: # %bb.0: # %entry
266 ; NOVSX-NEXT: stw r3, -4(r1)
267 ; NOVSX-NEXT: addi r3, r1, -4
268 ; NOVSX-NEXT: lfiwax f0, 0, r3
269 ; NOVSX-NEXT: fcfids f1, f0
272 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
276 define float @i64_to_f(i64 %m) #0 {
277 ; CHECK-LABEL: i64_to_f:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: mtfprd f0, r3
280 ; CHECK-NEXT: xscvsxdsp f1, f0
283 ; NOVSX-LABEL: i64_to_f:
284 ; NOVSX: # %bb.0: # %entry
285 ; NOVSX-NEXT: std r3, -8(r1)
286 ; NOVSX-NEXT: lfd f0, -8(r1)
287 ; NOVSX-NEXT: fcfids f1, f0
290 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
294 define float @u32_to_f(i32 zeroext %m) #0 {
295 ; CHECK-LABEL: u32_to_f:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: mtfprwz f0, r3
298 ; CHECK-NEXT: xscvuxdsp f1, f0
301 ; NOVSX-LABEL: u32_to_f:
302 ; NOVSX: # %bb.0: # %entry
303 ; NOVSX-NEXT: stw r3, -4(r1)
304 ; NOVSX-NEXT: addi r3, r1, -4
305 ; NOVSX-NEXT: lfiwzx f0, 0, r3
306 ; NOVSX-NEXT: fcfidus f1, f0
309 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
313 define float @u64_to_f(i64 %m) #0 {
314 ; CHECK-LABEL: u64_to_f:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: mtfprd f0, r3
317 ; CHECK-NEXT: xscvuxdsp f1, f0
320 ; NOVSX-LABEL: u64_to_f:
321 ; NOVSX: # %bb.0: # %entry
322 ; NOVSX-NEXT: std r3, -8(r1)
323 ; NOVSX-NEXT: lfd f0, -8(r1)
324 ; NOVSX-NEXT: fcfidus f1, f0
327 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
331 define void @d_to_i32_store(double %m, ptr %addr) #0 {
332 ; CHECK-LABEL: d_to_i32_store:
333 ; CHECK: # %bb.0: # %entry
334 ; CHECK-NEXT: xscvdpsxws f0, f1
335 ; CHECK-NEXT: stfiwx f0, 0, r4
338 ; NOVSX-LABEL: d_to_i32_store:
339 ; NOVSX: # %bb.0: # %entry
340 ; NOVSX-NEXT: fctiwz f0, f1
341 ; NOVSX-NEXT: addi r3, r1, -4
342 ; NOVSX-NEXT: stfiwx f0, 0, r3
343 ; NOVSX-NEXT: lwz r3, -4(r1)
344 ; NOVSX-NEXT: stw r3, 0(r4)
347 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict")
348 store i32 %conv, ptr %addr, align 4
352 define void @d_to_i64_store(double %m, ptr %addr) #0 {
353 ; P8-LABEL: d_to_i64_store:
354 ; P8: # %bb.0: # %entry
355 ; P8-NEXT: xscvdpsxds f0, f1
356 ; P8-NEXT: stxsdx f0, 0, r4
359 ; P9-LABEL: d_to_i64_store:
360 ; P9: # %bb.0: # %entry
361 ; P9-NEXT: xscvdpsxds v2, f1
362 ; P9-NEXT: stxsd v2, 0(r4)
365 ; NOVSX-LABEL: d_to_i64_store:
366 ; NOVSX: # %bb.0: # %entry
367 ; NOVSX-NEXT: fctidz f0, f1
368 ; NOVSX-NEXT: stfd f0, -8(r1)
369 ; NOVSX-NEXT: ld r3, -8(r1)
370 ; NOVSX-NEXT: std r3, 0(r4)
373 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict")
374 store i64 %conv, ptr %addr, align 8
378 define void @d_to_u64_store(double %m, ptr %addr) #0 {
379 ; P8-LABEL: d_to_u64_store:
380 ; P8: # %bb.0: # %entry
381 ; P8-NEXT: xscvdpuxds f0, f1
382 ; P8-NEXT: stxsdx f0, 0, r4
385 ; P9-LABEL: d_to_u64_store:
386 ; P9: # %bb.0: # %entry
387 ; P9-NEXT: xscvdpuxds v2, f1
388 ; P9-NEXT: stxsd v2, 0(r4)
391 ; NOVSX-LABEL: d_to_u64_store:
392 ; NOVSX: # %bb.0: # %entry
393 ; NOVSX-NEXT: fctiduz f0, f1
394 ; NOVSX-NEXT: stfd f0, -8(r1)
395 ; NOVSX-NEXT: ld r3, -8(r1)
396 ; NOVSX-NEXT: std r3, 0(r4)
399 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict")
400 store i64 %conv, ptr %addr, align 8
404 define void @d_to_u32_store(double %m, ptr %addr) #0 {
405 ; CHECK-LABEL: d_to_u32_store:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: xscvdpuxws f0, f1
408 ; CHECK-NEXT: stfiwx f0, 0, r4
411 ; NOVSX-LABEL: d_to_u32_store:
412 ; NOVSX: # %bb.0: # %entry
413 ; NOVSX-NEXT: fctiwuz f0, f1
414 ; NOVSX-NEXT: addi r3, r1, -4
415 ; NOVSX-NEXT: stfiwx f0, 0, r3
416 ; NOVSX-NEXT: lwz r3, -4(r1)
417 ; NOVSX-NEXT: stw r3, 0(r4)
420 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict")
421 store i32 %conv, ptr %addr, align 4
425 define void @f_to_i32_store(float %m, ptr %addr) #0 {
426 ; CHECK-LABEL: f_to_i32_store:
427 ; CHECK: # %bb.0: # %entry
428 ; CHECK-NEXT: xscvdpsxws f0, f1
429 ; CHECK-NEXT: stfiwx f0, 0, r4
432 ; NOVSX-LABEL: f_to_i32_store:
433 ; NOVSX: # %bb.0: # %entry
434 ; NOVSX-NEXT: fctiwz f0, f1
435 ; NOVSX-NEXT: addi r3, r1, -4
436 ; NOVSX-NEXT: stfiwx f0, 0, r3
437 ; NOVSX-NEXT: lwz r3, -4(r1)
438 ; NOVSX-NEXT: stw r3, 0(r4)
441 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict")
442 store i32 %conv, ptr %addr, align 4
446 define void @f_to_i64_store(float %m, ptr %addr) #0 {
447 ; P8-LABEL: f_to_i64_store:
448 ; P8: # %bb.0: # %entry
449 ; P8-NEXT: xscvdpsxds f0, f1
450 ; P8-NEXT: stxsdx f0, 0, r4
453 ; P9-LABEL: f_to_i64_store:
454 ; P9: # %bb.0: # %entry
455 ; P9-NEXT: xscvdpsxds v2, f1
456 ; P9-NEXT: stxsd v2, 0(r4)
459 ; NOVSX-LABEL: f_to_i64_store:
460 ; NOVSX: # %bb.0: # %entry
461 ; NOVSX-NEXT: fctidz f0, f1
462 ; NOVSX-NEXT: stfd f0, -8(r1)
463 ; NOVSX-NEXT: ld r3, -8(r1)
464 ; NOVSX-NEXT: std r3, 0(r4)
467 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict")
468 store i64 %conv, ptr %addr, align 8
472 define void @f_to_u64_store(float %m, ptr %addr) #0 {
473 ; P8-LABEL: f_to_u64_store:
474 ; P8: # %bb.0: # %entry
475 ; P8-NEXT: xscvdpuxds f0, f1
476 ; P8-NEXT: stxsdx f0, 0, r4
479 ; P9-LABEL: f_to_u64_store:
480 ; P9: # %bb.0: # %entry
481 ; P9-NEXT: xscvdpuxds v2, f1
482 ; P9-NEXT: stxsd v2, 0(r4)
485 ; NOVSX-LABEL: f_to_u64_store:
486 ; NOVSX: # %bb.0: # %entry
487 ; NOVSX-NEXT: fctiduz f0, f1
488 ; NOVSX-NEXT: stfd f0, -8(r1)
489 ; NOVSX-NEXT: ld r3, -8(r1)
490 ; NOVSX-NEXT: std r3, 0(r4)
493 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict")
494 store i64 %conv, ptr %addr, align 8
498 define void @f_to_u32_store(float %m, ptr %addr) #0 {
499 ; CHECK-LABEL: f_to_u32_store:
500 ; CHECK: # %bb.0: # %entry
501 ; CHECK-NEXT: xscvdpuxws f0, f1
502 ; CHECK-NEXT: stfiwx f0, 0, r4
505 ; NOVSX-LABEL: f_to_u32_store:
506 ; NOVSX: # %bb.0: # %entry
507 ; NOVSX-NEXT: fctiwuz f0, f1
508 ; NOVSX-NEXT: addi r3, r1, -4
509 ; NOVSX-NEXT: stfiwx f0, 0, r3
510 ; NOVSX-NEXT: lwz r3, -4(r1)
511 ; NOVSX-NEXT: stw r3, 0(r4)
514 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict")
515 store i32 %conv, ptr %addr, align 4
519 define double @load_i32_to_d(ptr %addr) #0 {
520 ; CHECK-LABEL: load_i32_to_d:
521 ; CHECK: # %bb.0: # %entry
522 ; CHECK-NEXT: lfiwax f0, 0, r3
523 ; CHECK-NEXT: xscvsxddp f1, f0
526 ; NOVSX-LABEL: load_i32_to_d:
527 ; NOVSX: # %bb.0: # %entry
528 ; NOVSX-NEXT: lfiwax f0, 0, r3
529 ; NOVSX-NEXT: fcfid f1, f0
532 %m = load i32, ptr %addr, align 4
533 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
537 define double @load_i64_to_d(ptr %addr) #0 {
538 ; CHECK-LABEL: load_i64_to_d:
539 ; CHECK: # %bb.0: # %entry
540 ; CHECK-NEXT: lfd f0, 0(r3)
541 ; CHECK-NEXT: xscvsxddp f1, f0
544 ; NOVSX-LABEL: load_i64_to_d:
545 ; NOVSX: # %bb.0: # %entry
546 ; NOVSX-NEXT: lfd f0, 0(r3)
547 ; NOVSX-NEXT: fcfid f1, f0
550 %m = load i64, ptr %addr, align 8
551 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
555 define double @load_u32_to_d(ptr %addr) #0 {
556 ; CHECK-LABEL: load_u32_to_d:
557 ; CHECK: # %bb.0: # %entry
558 ; CHECK-NEXT: lfiwzx f0, 0, r3
559 ; CHECK-NEXT: xscvuxddp f1, f0
562 ; NOVSX-LABEL: load_u32_to_d:
563 ; NOVSX: # %bb.0: # %entry
564 ; NOVSX-NEXT: lfiwzx f0, 0, r3
565 ; NOVSX-NEXT: fcfidu f1, f0
568 %m = load i32, ptr %addr, align 4
569 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
573 define double @load_u64_to_d(ptr %addr) #0 {
574 ; CHECK-LABEL: load_u64_to_d:
575 ; CHECK: # %bb.0: # %entry
576 ; CHECK-NEXT: lfd f0, 0(r3)
577 ; CHECK-NEXT: xscvuxddp f1, f0
580 ; NOVSX-LABEL: load_u64_to_d:
581 ; NOVSX: # %bb.0: # %entry
582 ; NOVSX-NEXT: lfd f0, 0(r3)
583 ; NOVSX-NEXT: fcfidu f1, f0
586 %m = load i64, ptr %addr, align 8
587 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
591 define float @load_i32_to_f(ptr %addr) #0 {
592 ; CHECK-LABEL: load_i32_to_f:
593 ; CHECK: # %bb.0: # %entry
594 ; CHECK-NEXT: lfiwax f0, 0, r3
595 ; CHECK-NEXT: xscvsxdsp f1, f0
598 ; NOVSX-LABEL: load_i32_to_f:
599 ; NOVSX: # %bb.0: # %entry
600 ; NOVSX-NEXT: lfiwax f0, 0, r3
601 ; NOVSX-NEXT: fcfids f1, f0
604 %m = load i32, ptr %addr, align 4
605 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
609 define float @load_i64_to_f(ptr %addr) #0 {
610 ; CHECK-LABEL: load_i64_to_f:
611 ; CHECK: # %bb.0: # %entry
612 ; CHECK-NEXT: lfd f0, 0(r3)
613 ; CHECK-NEXT: xscvsxdsp f1, f0
616 ; NOVSX-LABEL: load_i64_to_f:
617 ; NOVSX: # %bb.0: # %entry
618 ; NOVSX-NEXT: lfd f0, 0(r3)
619 ; NOVSX-NEXT: fcfids f1, f0
622 %m = load i64, ptr %addr, align 8
623 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
627 define float @load_u32_to_f(ptr %addr) #0 {
628 ; CHECK-LABEL: load_u32_to_f:
629 ; CHECK: # %bb.0: # %entry
630 ; CHECK-NEXT: lfiwzx f0, 0, r3
631 ; CHECK-NEXT: xscvuxdsp f1, f0
634 ; NOVSX-LABEL: load_u32_to_f:
635 ; NOVSX: # %bb.0: # %entry
636 ; NOVSX-NEXT: lfiwzx f0, 0, r3
637 ; NOVSX-NEXT: fcfidus f1, f0
640 %m = load i32, ptr %addr, align 4
641 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
645 define float @load_u64_to_f(ptr %addr) #0 {
646 ; CHECK-LABEL: load_u64_to_f:
647 ; CHECK: # %bb.0: # %entry
648 ; CHECK-NEXT: lfd f0, 0(r3)
649 ; CHECK-NEXT: xscvuxdsp f1, f0
652 ; NOVSX-LABEL: load_u64_to_f:
653 ; NOVSX: # %bb.0: # %entry
654 ; NOVSX-NEXT: lfd f0, 0(r3)
655 ; NOVSX-NEXT: fcfidus f1, f0
658 %m = load i64, ptr %addr, align 8
659 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict")
663 define void @fptoint_nofpexcept_f64(double %m, ptr %addr1, ptr %addr2) #0 {
664 ; MIR-LABEL: name: fptoint_nofpexcept_f64
665 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
666 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
667 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
668 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
670 %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore")
671 %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore")
672 %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore")
673 %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore")
674 store volatile i32 %conv1, ptr %addr1, align 4
675 store volatile i32 %conv2, ptr %addr1, align 4
676 store volatile i64 %conv3, ptr %addr2, align 8
677 store volatile i64 %conv4, ptr %addr2, align 8
681 define void @fptoint_nofpexcept_f32(float %m, ptr %addr1, ptr %addr2) #0 {
682 ; MIR-LABEL: name: fptoint_nofpexcept_f32
683 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
684 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
685 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPSXDS
686 ; MIR: renamable $vf{{[0-9]+}} = nofpexcept XSCVDPUXDS
688 %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore")
689 %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore")
690 %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore")
691 %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore")
692 store volatile i32 %conv1, ptr %addr1, align 4
693 store volatile i32 %conv2, ptr %addr1, align 4
694 store volatile i64 %conv3, ptr %addr2, align 8
695 store volatile i64 %conv4, ptr %addr2, align 8
699 define void @inttofp_nofpexcept_i32(i32 %m, ptr %addr1, ptr %addr2) #0 {
700 ; MIR-LABEL: name: inttofp_nofpexcept_i32
701 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
702 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
703 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
704 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
706 %conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
707 %conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
708 %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
709 %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
710 store volatile float %conv1, ptr %addr1, align 4
711 store volatile float %conv2, ptr %addr1, align 4
712 store volatile double %conv3, ptr %addr2, align 8
713 store volatile double %conv4, ptr %addr2, align 8
717 define void @inttofp_nofpexcept_i64(i64 %m, ptr %addr1, ptr %addr2) #0 {
718 ; MIR-LABEL: name: inttofp_nofpexcept_i64
719 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
720 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
721 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
722 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
724 %conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
725 %conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
726 %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
727 %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
728 store volatile float %conv1, ptr %addr1, align 4
729 store volatile float %conv2, ptr %addr1, align 4
730 store volatile double %conv3, ptr %addr2, align 8
731 store volatile double %conv4, ptr %addr2, align 8
735 define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) #0 {
736 ; MIR-LABEL: name: inttofp_nofpexcept_vec
737 ; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
739 %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore")
740 ret <2 x double> %conv
743 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
745 attributes #0 = { strictfp }