1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mcpu=e500 \
3 ; RUN: -mtriple=powerpc-unknown-linux-gnu -mattr=spe | FileCheck %s \
4 ; RUN: -check-prefix=SPE
6 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
7 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
8 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
9 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
11 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
12 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
13 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
14 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
16 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
17 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
18 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
19 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
21 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
22 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
23 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
24 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
26 define i32 @d_to_i32(double %m) #0 {
27 ; SPE-LABEL: d_to_i32:
28 ; SPE: # %bb.0: # %entry
29 ; SPE-NEXT: evmergelo r3, r3, r4
30 ; SPE-NEXT: efdctsiz r3, r3
33 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
37 define i64 @d_to_i64(double %m) #0 {
38 ; SPE-LABEL: d_to_i64:
39 ; SPE: # %bb.0: # %entry
41 ; SPE-NEXT: stwu r1, -16(r1)
42 ; SPE-NEXT: stw r0, 20(r1)
43 ; SPE-NEXT: .cfi_def_cfa_offset 16
44 ; SPE-NEXT: .cfi_offset lr, 4
45 ; SPE-NEXT: evmergelo r4, r3, r4
46 ; SPE-NEXT: evmergehi r3, r4, r4
47 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
48 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
49 ; SPE-NEXT: bl __fixdfdi
50 ; SPE-NEXT: lwz r0, 20(r1)
51 ; SPE-NEXT: addi r1, r1, 16
55 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
59 define i64 @d_to_u64(double %m) #0 {
60 ; SPE-LABEL: d_to_u64:
61 ; SPE: # %bb.0: # %entry
63 ; SPE-NEXT: stwu r1, -16(r1)
64 ; SPE-NEXT: stw r0, 20(r1)
65 ; SPE-NEXT: .cfi_def_cfa_offset 16
66 ; SPE-NEXT: .cfi_offset lr, 4
67 ; SPE-NEXT: evmergelo r4, r3, r4
68 ; SPE-NEXT: evmergehi r3, r4, r4
69 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
70 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
71 ; SPE-NEXT: bl __fixunsdfdi
72 ; SPE-NEXT: lwz r0, 20(r1)
73 ; SPE-NEXT: addi r1, r1, 16
77 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
81 define zeroext i32 @d_to_u32(double %m) #0 {
82 ; SPE-LABEL: d_to_u32:
83 ; SPE: # %bb.0: # %entry
84 ; SPE-NEXT: evmergelo r3, r3, r4
85 ; SPE-NEXT: efdctuiz r3, r3
88 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
92 define signext i32 @f_to_i32(float %m) #0 {
93 ; SPE-LABEL: f_to_i32:
94 ; SPE: # %bb.0: # %entry
95 ; SPE-NEXT: efsctsiz r3, r3
98 %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
102 define i64 @f_to_i64(float %m) #0 {
103 ; SPE-LABEL: f_to_i64:
104 ; SPE: # %bb.0: # %entry
106 ; SPE-NEXT: stwu r1, -16(r1)
107 ; SPE-NEXT: stw r0, 20(r1)
108 ; SPE-NEXT: .cfi_def_cfa_offset 16
109 ; SPE-NEXT: .cfi_offset lr, 4
110 ; SPE-NEXT: bl __fixsfdi
111 ; SPE-NEXT: lwz r0, 20(r1)
112 ; SPE-NEXT: addi r1, r1, 16
116 %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
120 define i64 @f_to_u64(float %m) #0 {
121 ; SPE-LABEL: f_to_u64:
122 ; SPE: # %bb.0: # %entry
124 ; SPE-NEXT: stwu r1, -16(r1)
125 ; SPE-NEXT: stw r0, 20(r1)
126 ; SPE-NEXT: .cfi_def_cfa_offset 16
127 ; SPE-NEXT: .cfi_offset lr, 4
128 ; SPE-NEXT: bl __fixunssfdi
129 ; SPE-NEXT: lwz r0, 20(r1)
130 ; SPE-NEXT: addi r1, r1, 16
134 %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
138 define zeroext i32 @f_to_u32(float %m) #0 {
139 ; SPE-LABEL: f_to_u32:
140 ; SPE: # %bb.0: # %entry
141 ; SPE-NEXT: efsctuiz r3, r3
144 %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
148 define double @i32_to_d(i32 signext %m) #0 {
149 ; SPE-LABEL: i32_to_d:
150 ; SPE: # %bb.0: # %entry
151 ; SPE-NEXT: efdcfsi r4, r3
152 ; SPE-NEXT: evmergehi r3, r4, r4
153 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
154 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
157 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
161 define double @i64_to_d(i64 %m) #0 {
162 ; SPE-LABEL: i64_to_d:
163 ; SPE: # %bb.0: # %entry
165 ; SPE-NEXT: stwu r1, -16(r1)
166 ; SPE-NEXT: stw r0, 20(r1)
167 ; SPE-NEXT: .cfi_def_cfa_offset 16
168 ; SPE-NEXT: .cfi_offset lr, 4
169 ; SPE-NEXT: bl __floatdidf
170 ; SPE-NEXT: evmergelo r4, r3, r4
171 ; SPE-NEXT: evmergehi r3, r4, r4
172 ; SPE-NEXT: lwz r0, 20(r1)
173 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
174 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
175 ; SPE-NEXT: addi r1, r1, 16
179 %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
183 define double @u32_to_d(i32 zeroext %m) #0 {
184 ; SPE-LABEL: u32_to_d:
185 ; SPE: # %bb.0: # %entry
186 ; SPE-NEXT: efdcfui r4, r3
187 ; SPE-NEXT: evmergehi r3, r4, r4
188 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
189 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
192 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
196 define double @u64_to_d(i64 %m) #0 {
197 ; SPE-LABEL: u64_to_d:
198 ; SPE: # %bb.0: # %entry
200 ; SPE-NEXT: stwu r1, -16(r1)
201 ; SPE-NEXT: stw r0, 20(r1)
202 ; SPE-NEXT: .cfi_def_cfa_offset 16
203 ; SPE-NEXT: .cfi_offset lr, 4
204 ; SPE-NEXT: bl __floatundidf
205 ; SPE-NEXT: evmergelo r4, r3, r4
206 ; SPE-NEXT: evmergehi r3, r4, r4
207 ; SPE-NEXT: lwz r0, 20(r1)
208 ; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
209 ; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
210 ; SPE-NEXT: addi r1, r1, 16
214 %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
218 define float @i32_to_f(i32 signext %m) #0 {
219 ; SPE-LABEL: i32_to_f:
220 ; SPE: # %bb.0: # %entry
221 ; SPE-NEXT: efscfsi r3, r3
224 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
228 define float @i64_to_f(i64 %m) #0 {
229 ; SPE-LABEL: i64_to_f:
230 ; SPE: # %bb.0: # %entry
232 ; SPE-NEXT: stwu r1, -16(r1)
233 ; SPE-NEXT: stw r0, 20(r1)
234 ; SPE-NEXT: .cfi_def_cfa_offset 16
235 ; SPE-NEXT: .cfi_offset lr, 4
236 ; SPE-NEXT: bl __floatdisf
237 ; SPE-NEXT: lwz r0, 20(r1)
238 ; SPE-NEXT: addi r1, r1, 16
242 %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
246 define float @u32_to_f(i32 zeroext %m) #0 {
247 ; SPE-LABEL: u32_to_f:
248 ; SPE: # %bb.0: # %entry
249 ; SPE-NEXT: efscfui r3, r3
252 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
256 define float @u64_to_f(i64 %m) #0 {
257 ; SPE-LABEL: u64_to_f:
258 ; SPE: # %bb.0: # %entry
260 ; SPE-NEXT: stwu r1, -16(r1)
261 ; SPE-NEXT: stw r0, 20(r1)
262 ; SPE-NEXT: .cfi_def_cfa_offset 16
263 ; SPE-NEXT: .cfi_offset lr, 4
264 ; SPE-NEXT: bl __floatundisf
265 ; SPE-NEXT: lwz r0, 20(r1)
266 ; SPE-NEXT: addi r1, r1, 16
270 %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
274 attributes #0 = { strictfp }