1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
3 ; RUN: < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s
4 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
5 ; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s
6 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
7 ; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
8 ; RUN: FileCheck %s -check-prefix=NOVSX
9 ; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
10 ; RUN: -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
12 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
13 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
14 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
15 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
17 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
18 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
19 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
20 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
22 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
23 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
24 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
25 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
27 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
28 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
29 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
30 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
32 define i32 @d_to_i32(double %m) #0 {
33 ; CHECK-LABEL: d_to_i32:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: xscvdpsxws f0, f1
36 ; CHECK-NEXT: mffprwz r3, f0
39 ; NOVSX-LABEL: d_to_i32:
40 ; NOVSX: # %bb.0: # %entry
41 ; NOVSX-NEXT: fctiwz f0, f1
42 ; NOVSX-NEXT: addi r3, r1, -4
43 ; NOVSX-NEXT: stfiwx f0, 0, r3
44 ; NOVSX-NEXT: lwz r3, -4(r1)
47 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
51 define i64 @d_to_i64(double %m) #0 {
52 ; CHECK-LABEL: d_to_i64:
53 ; CHECK: # %bb.0: # %entry
54 ; CHECK-NEXT: xscvdpsxds f0, f1
55 ; CHECK-NEXT: mffprd r3, f0
58 ; NOVSX-LABEL: d_to_i64:
59 ; NOVSX: # %bb.0: # %entry
60 ; NOVSX-NEXT: fctidz f0, f1
61 ; NOVSX-NEXT: stfd f0, -8(r1)
62 ; NOVSX-NEXT: ld r3, -8(r1)
65 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
69 define i64 @d_to_u64(double %m) #0 {
70 ; CHECK-LABEL: d_to_u64:
71 ; CHECK: # %bb.0: # %entry
72 ; CHECK-NEXT: xscvdpuxds f0, f1
73 ; CHECK-NEXT: mffprd r3, f0
76 ; NOVSX-LABEL: d_to_u64:
77 ; NOVSX: # %bb.0: # %entry
78 ; NOVSX-NEXT: fctiduz f0, f1
79 ; NOVSX-NEXT: stfd f0, -8(r1)
80 ; NOVSX-NEXT: ld r3, -8(r1)
83 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
87 define zeroext i32 @d_to_u32(double %m) #0 {
88 ; CHECK-LABEL: d_to_u32:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: xscvdpuxws f0, f1
91 ; CHECK-NEXT: mffprwz r3, f0
92 ; CHECK-NEXT: clrldi r3, r3, 32
95 ; NOVSX-LABEL: d_to_u32:
96 ; NOVSX: # %bb.0: # %entry
97 ; NOVSX-NEXT: fctiwuz f0, f1
98 ; NOVSX-NEXT: addi r3, r1, -4
99 ; NOVSX-NEXT: stfiwx f0, 0, r3
100 ; NOVSX-NEXT: lwz r3, -4(r1)
103 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
107 define signext i32 @f_to_i32(float %m) #0 {
108 ; CHECK-LABEL: f_to_i32:
109 ; CHECK: # %bb.0: # %entry
110 ; CHECK-NEXT: xscvdpsxws f0, f1
111 ; CHECK-NEXT: mffprwz r3, f0
112 ; CHECK-NEXT: extsw r3, r3
115 ; NOVSX-LABEL: f_to_i32:
116 ; NOVSX: # %bb.0: # %entry
117 ; NOVSX-NEXT: fctiwz f0, f1
118 ; NOVSX-NEXT: addi r3, r1, -4
119 ; NOVSX-NEXT: stfiwx f0, 0, r3
120 ; NOVSX-NEXT: lwa r3, -4(r1)
123 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
127 define i64 @f_to_i64(float %m) #0 {
128 ; CHECK-LABEL: f_to_i64:
129 ; CHECK: # %bb.0: # %entry
130 ; CHECK-NEXT: xscvdpsxds f0, f1
131 ; CHECK-NEXT: mffprd r3, f0
134 ; NOVSX-LABEL: f_to_i64:
135 ; NOVSX: # %bb.0: # %entry
136 ; NOVSX-NEXT: fctidz f0, f1
137 ; NOVSX-NEXT: stfd f0, -8(r1)
138 ; NOVSX-NEXT: ld r3, -8(r1)
141 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
145 define i64 @f_to_u64(float %m) #0 {
146 ; CHECK-LABEL: f_to_u64:
147 ; CHECK: # %bb.0: # %entry
148 ; CHECK-NEXT: xscvdpuxds f0, f1
149 ; CHECK-NEXT: mffprd r3, f0
152 ; NOVSX-LABEL: f_to_u64:
153 ; NOVSX: # %bb.0: # %entry
154 ; NOVSX-NEXT: fctiduz f0, f1
155 ; NOVSX-NEXT: stfd f0, -8(r1)
156 ; NOVSX-NEXT: ld r3, -8(r1)
159 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
163 define zeroext i32 @f_to_u32(float %m) #0 {
164 ; CHECK-LABEL: f_to_u32:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: xscvdpuxws f0, f1
167 ; CHECK-NEXT: mffprwz r3, f0
168 ; CHECK-NEXT: clrldi r3, r3, 32
171 ; NOVSX-LABEL: f_to_u32:
172 ; NOVSX: # %bb.0: # %entry
173 ; NOVSX-NEXT: fctiwuz f0, f1
174 ; NOVSX-NEXT: addi r3, r1, -4
175 ; NOVSX-NEXT: stfiwx f0, 0, r3
176 ; NOVSX-NEXT: lwz r3, -4(r1)
179 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
183 define double @i32_to_d(i32 signext %m) #0 {
184 ; CHECK-LABEL: i32_to_d:
185 ; CHECK: # %bb.0: # %entry
186 ; CHECK-NEXT: mtfprwa f0, r3
187 ; CHECK-NEXT: xscvsxddp f1, f0
190 ; NOVSX-LABEL: i32_to_d:
191 ; NOVSX: # %bb.0: # %entry
192 ; NOVSX-NEXT: addi r4, r1, -4
193 ; NOVSX-NEXT: stw r3, -4(r1)
194 ; NOVSX-NEXT: lfiwax f0, 0, r4
195 ; NOVSX-NEXT: fcfid f1, f0
198 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
202 define double @i64_to_d(i64 %m) #0 {
203 ; CHECK-LABEL: i64_to_d:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: mtfprd f0, r3
206 ; CHECK-NEXT: xscvsxddp f1, f0
209 ; NOVSX-LABEL: i64_to_d:
210 ; NOVSX: # %bb.0: # %entry
211 ; NOVSX-NEXT: std r3, -8(r1)
212 ; NOVSX-NEXT: lfd f0, -8(r1)
213 ; NOVSX-NEXT: fcfid f1, f0
216 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
220 define double @u32_to_d(i32 zeroext %m) #0 {
221 ; CHECK-LABEL: u32_to_d:
222 ; CHECK: # %bb.0: # %entry
223 ; CHECK-NEXT: mtfprwz f0, r3
224 ; CHECK-NEXT: xscvuxddp f1, f0
227 ; NOVSX-LABEL: u32_to_d:
228 ; NOVSX: # %bb.0: # %entry
229 ; NOVSX-NEXT: addi r4, r1, -4
230 ; NOVSX-NEXT: stw r3, -4(r1)
231 ; NOVSX-NEXT: lfiwzx f0, 0, r4
232 ; NOVSX-NEXT: fcfidu f1, f0
235 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
239 define double @u64_to_d(i64 %m) #0 {
240 ; CHECK-LABEL: u64_to_d:
241 ; CHECK: # %bb.0: # %entry
242 ; CHECK-NEXT: mtfprd f0, r3
243 ; CHECK-NEXT: xscvuxddp f1, f0
246 ; NOVSX-LABEL: u64_to_d:
247 ; NOVSX: # %bb.0: # %entry
248 ; NOVSX-NEXT: std r3, -8(r1)
249 ; NOVSX-NEXT: lfd f0, -8(r1)
250 ; NOVSX-NEXT: fcfidu f1, f0
253 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
257 define float @i32_to_f(i32 signext %m) #0 {
258 ; CHECK-LABEL: i32_to_f:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: mtfprwa f0, r3
261 ; CHECK-NEXT: xscvsxdsp f1, f0
264 ; NOVSX-LABEL: i32_to_f:
265 ; NOVSX: # %bb.0: # %entry
266 ; NOVSX-NEXT: addi r4, r1, -4
267 ; NOVSX-NEXT: stw r3, -4(r1)
268 ; NOVSX-NEXT: lfiwax f0, 0, r4
269 ; NOVSX-NEXT: fcfids f1, f0
272 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
276 define float @i64_to_f(i64 %m) #0 {
277 ; CHECK-LABEL: i64_to_f:
278 ; CHECK: # %bb.0: # %entry
279 ; CHECK-NEXT: mtfprd f0, r3
280 ; CHECK-NEXT: xscvsxdsp f1, f0
283 ; NOVSX-LABEL: i64_to_f:
284 ; NOVSX: # %bb.0: # %entry
285 ; NOVSX-NEXT: std r3, -8(r1)
286 ; NOVSX-NEXT: lfd f0, -8(r1)
287 ; NOVSX-NEXT: fcfids f1, f0
290 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
294 define float @u32_to_f(i32 zeroext %m) #0 {
295 ; CHECK-LABEL: u32_to_f:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: mtfprwz f0, r3
298 ; CHECK-NEXT: xscvuxdsp f1, f0
301 ; NOVSX-LABEL: u32_to_f:
302 ; NOVSX: # %bb.0: # %entry
303 ; NOVSX-NEXT: addi r4, r1, -4
304 ; NOVSX-NEXT: stw r3, -4(r1)
305 ; NOVSX-NEXT: lfiwzx f0, 0, r4
306 ; NOVSX-NEXT: fcfidus f1, f0
309 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
313 define float @u64_to_f(i64 %m) #0 {
314 ; CHECK-LABEL: u64_to_f:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: mtfprd f0, r3
317 ; CHECK-NEXT: xscvuxdsp f1, f0
320 ; NOVSX-LABEL: u64_to_f:
321 ; NOVSX: # %bb.0: # %entry
322 ; NOVSX-NEXT: std r3, -8(r1)
323 ; NOVSX-NEXT: lfd f0, -8(r1)
324 ; NOVSX-NEXT: fcfidus f1, f0
327 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
331 define void @fptoint_nofpexcept_f64(double %m, i32* %addr1, i64* %addr2) {
332 ; MIR-LABEL: name: fptoint_nofpexcept_f64
333 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
334 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
335 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXDS
336 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXDS
338 %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.ignore") #0
339 %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.ignore") #0
340 %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.ignore") #0
341 %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.ignore") #0
342 store volatile i32 %conv1, i32* %addr1, align 4
343 store volatile i32 %conv2, i32* %addr1, align 4
344 store volatile i64 %conv3, i64* %addr2, align 8
345 store volatile i64 %conv4, i64* %addr2, align 8
349 define void @fptoint_nofpexcept_f32(float %m, i32* %addr1, i64* %addr2) {
350 ; MIR-LABEL: name: fptoint_nofpexcept_f32
351 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXWS
352 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXWS
353 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPSXDS
354 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVDPUXDS
356 %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.ignore") #0
357 %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.ignore") #0
358 %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.ignore") #0
359 %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.ignore") #0
360 store volatile i32 %conv1, i32* %addr1, align 4
361 store volatile i32 %conv2, i32* %addr1, align 4
362 store volatile i64 %conv3, i64* %addr2, align 8
363 store volatile i64 %conv4, i64* %addr2, align 8
367 define void @inttofp_nofpexcept_i32(i32 %m, float* %addr1, double* %addr2) {
368 ; MIR-LABEL: name: inttofp_nofpexcept_i32
369 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
370 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
371 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
372 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
374 %conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
375 %conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
376 %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
377 %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
378 store volatile float %conv1, float* %addr1, align 4
379 store volatile float %conv2, float* %addr1, align 4
380 store volatile double %conv3, double* %addr2, align 8
381 store volatile double %conv4, double* %addr2, align 8
385 define void @inttofp_nofpexcept_i64(i64 %m, float* %addr1, double* %addr2) {
386 ; MIR-LABEL: name: inttofp_nofpexcept_i64
387 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDSP
388 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDSP
389 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVSXDDP
390 ; MIR: renamable $f{{[0-9]+}} = nofpexcept XSCVUXDDP
392 %conv1 = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
393 %conv2 = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
394 %conv3 = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
395 %conv4 = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
396 store volatile float %conv1, float* %addr1, align 4
397 store volatile float %conv2, float* %addr1, align 4
398 store volatile double %conv3, double* %addr2, align 8
399 store volatile double %conv4, double* %addr2, align 8
403 define <2 x double> @inttofp_nofpexcept_vec(<2 x i16> %m) {
404 ; MIR-LABEL: name: inttofp_nofpexcept_vec
405 ; MIR: renamable $v{{[0-9]+}} = nofpexcept XVCVSXDDP
407 %conv = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16> %m, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
408 ret <2 x double> %conv
411 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
413 attributes #0 = { strictfp }