1 ; Test strict multiplication of two f32s, producing an f64 result.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
6 declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
7 declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
8 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
9 declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
11 ; Check register multiplication.
12 define double @f1(float %f1, float %f2) #0 {
14 ; CHECK: mdebr %f0, %f2
16 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
18 metadata !"fpexcept.strict") #0
19 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
21 metadata !"fpexcept.strict") #0
22 %res = call double @llvm.experimental.constrained.fmul.f64(
23 double %f1x, double %f2x,
24 metadata !"round.dynamic",
25 metadata !"fpexcept.strict") #0
29 ; Check the low end of the MDEB range.
30 define double @f2(float %f1, float *%ptr) #0 {
32 ; CHECK: mdeb %f0, 0(%r2)
34 %f2 = load float, float *%ptr
35 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
37 metadata !"fpexcept.strict") #0
38 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
40 metadata !"fpexcept.strict") #0
41 %res = call double @llvm.experimental.constrained.fmul.f64(
42 double %f1x, double %f2x,
43 metadata !"round.dynamic",
44 metadata !"fpexcept.strict") #0
48 ; Check the high end of the aligned MDEB range.
49 define double @f3(float %f1, float *%base) #0 {
51 ; CHECK: mdeb %f0, 4092(%r2)
53 %ptr = getelementptr float, float *%base, i64 1023
54 %f2 = load float, float *%ptr
55 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
57 metadata !"fpexcept.strict") #0
58 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
60 metadata !"fpexcept.strict") #0
61 %res = call double @llvm.experimental.constrained.fmul.f64(
62 double %f1x, double %f2x,
63 metadata !"round.dynamic",
64 metadata !"fpexcept.strict") #0
68 ; Check the next word up, which needs separate address logic.
69 ; Other sequences besides this one would be OK.
70 define double @f4(float %f1, float *%base) #0 {
72 ; CHECK: aghi %r2, 4096
73 ; CHECK: mdeb %f0, 0(%r2)
75 %ptr = getelementptr float, float *%base, i64 1024
76 %f2 = load float, float *%ptr
77 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
79 metadata !"fpexcept.strict") #0
80 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
82 metadata !"fpexcept.strict") #0
83 %res = call double @llvm.experimental.constrained.fmul.f64(
84 double %f1x, double %f2x,
85 metadata !"round.dynamic",
86 metadata !"fpexcept.strict") #0
90 ; Check negative displacements, which also need separate address logic.
91 define double @f5(float %f1, float *%base) #0 {
94 ; CHECK: mdeb %f0, 0(%r2)
96 %ptr = getelementptr float, float *%base, i64 -1
97 %f2 = load float, float *%ptr
98 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
100 metadata !"fpexcept.strict") #0
101 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
103 metadata !"fpexcept.strict") #0
104 %res = call double @llvm.experimental.constrained.fmul.f64(
105 double %f1x, double %f2x,
106 metadata !"round.dynamic",
107 metadata !"fpexcept.strict") #0
111 ; Check that MDEB allows indices.
112 define double @f6(float %f1, float *%base, i64 %index) #0 {
114 ; CHECK: sllg %r1, %r3, 2
115 ; CHECK: mdeb %f0, 400(%r1,%r2)
117 %ptr1 = getelementptr float, float *%base, i64 %index
118 %ptr2 = getelementptr float, float *%ptr1, i64 100
119 %f2 = load float, float *%ptr2
120 %f1x = call double @llvm.experimental.constrained.fpext.f64.f32(
122 metadata !"fpexcept.strict") #0
123 %f2x = call double @llvm.experimental.constrained.fpext.f64.f32(
125 metadata !"fpexcept.strict") #0
126 %res = call double @llvm.experimental.constrained.fmul.f64(
127 double %f1x, double %f2x,
128 metadata !"round.dynamic",
129 metadata !"fpexcept.strict") #0
133 ; Check that multiplications of spilled values can use MDEB rather than MDEBR.
134 define float @f7(float *%ptr0) #0 {
136 ; CHECK: brasl %r14, foo@PLT
137 ; CHECK: mdeb %f0, 16{{[04]}}(%r15)
139 %ptr1 = getelementptr float, float *%ptr0, i64 2
140 %ptr2 = getelementptr float, float *%ptr0, i64 4
141 %ptr3 = getelementptr float, float *%ptr0, i64 6
142 %ptr4 = getelementptr float, float *%ptr0, i64 8
143 %ptr5 = getelementptr float, float *%ptr0, i64 10
144 %ptr6 = getelementptr float, float *%ptr0, i64 12
145 %ptr7 = getelementptr float, float *%ptr0, i64 14
146 %ptr8 = getelementptr float, float *%ptr0, i64 16
147 %ptr9 = getelementptr float, float *%ptr0, i64 18
148 %ptr10 = getelementptr float, float *%ptr0, i64 20
150 %val0 = load float, float *%ptr0
151 %val1 = load float, float *%ptr1
152 %val2 = load float, float *%ptr2
153 %val3 = load float, float *%ptr3
154 %val4 = load float, float *%ptr4
155 %val5 = load float, float *%ptr5
156 %val6 = load float, float *%ptr6
157 %val7 = load float, float *%ptr7
158 %val8 = load float, float *%ptr8
159 %val9 = load float, float *%ptr9
160 %val10 = load float, float *%ptr10
162 %frob0 = call float @llvm.experimental.constrained.fadd.f32(
163 float %val0, float %val0,
164 metadata !"round.dynamic",
165 metadata !"fpexcept.strict") #0
166 %frob1 = call float @llvm.experimental.constrained.fadd.f32(
167 float %val1, float %val1,
168 metadata !"round.dynamic",
169 metadata !"fpexcept.strict") #0
170 %frob2 = call float @llvm.experimental.constrained.fadd.f32(
171 float %val2, float %val2,
172 metadata !"round.dynamic",
173 metadata !"fpexcept.strict") #0
174 %frob3 = call float @llvm.experimental.constrained.fadd.f32(
175 float %val3, float %val3,
176 metadata !"round.dynamic",
177 metadata !"fpexcept.strict") #0
178 %frob4 = call float @llvm.experimental.constrained.fadd.f32(
179 float %val4, float %val4,
180 metadata !"round.dynamic",
181 metadata !"fpexcept.strict") #0
182 %frob5 = call float @llvm.experimental.constrained.fadd.f32(
183 float %val5, float %val5,
184 metadata !"round.dynamic",
185 metadata !"fpexcept.strict") #0
186 %frob6 = call float @llvm.experimental.constrained.fadd.f32(
187 float %val6, float %val6,
188 metadata !"round.dynamic",
189 metadata !"fpexcept.strict") #0
190 %frob7 = call float @llvm.experimental.constrained.fadd.f32(
191 float %val7, float %val7,
192 metadata !"round.dynamic",
193 metadata !"fpexcept.strict") #0
194 %frob8 = call float @llvm.experimental.constrained.fadd.f32(
195 float %val8, float %val8,
196 metadata !"round.dynamic",
197 metadata !"fpexcept.strict") #0
198 %frob9 = call float @llvm.experimental.constrained.fadd.f32(
199 float %val9, float %val9,
200 metadata !"round.dynamic",
201 metadata !"fpexcept.strict") #0
202 %frob10 = call float @llvm.experimental.constrained.fadd.f32(
203 float %val10, float %val10,
204 metadata !"round.dynamic",
205 metadata !"fpexcept.strict") #0
207 store float %frob0, float *%ptr0
208 store float %frob1, float *%ptr1
209 store float %frob2, float *%ptr2
210 store float %frob3, float *%ptr3
211 store float %frob4, float *%ptr4
212 store float %frob5, float *%ptr5
213 store float %frob6, float *%ptr6
214 store float %frob7, float *%ptr7
215 store float %frob8, float *%ptr8
216 store float %frob9, float *%ptr9
217 store float %frob10, float *%ptr10
219 %ret = call float @foo() #0
221 %accext0 = call double @llvm.experimental.constrained.fpext.f64.f32(
223 metadata !"fpexcept.strict") #0
224 %ext0 = call double @llvm.experimental.constrained.fpext.f64.f32(
226 metadata !"fpexcept.strict") #0
227 %mul0 = call double @llvm.experimental.constrained.fmul.f64(
228 double %accext0, double %ext0,
229 metadata !"round.dynamic",
230 metadata !"fpexcept.strict") #0
231 %extra0 = call double @llvm.experimental.constrained.fmul.f64(
232 double %mul0, double 1.01,
233 metadata !"round.dynamic",
234 metadata !"fpexcept.strict") #0
235 %trunc0 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
237 metadata !"round.dynamic",
238 metadata !"fpexcept.strict") #0
240 %accext1 = call double @llvm.experimental.constrained.fpext.f64.f32(
242 metadata !"fpexcept.strict") #0
243 %ext1 = call double @llvm.experimental.constrained.fpext.f64.f32(
245 metadata !"fpexcept.strict") #0
246 %mul1 = call double @llvm.experimental.constrained.fmul.f64(
247 double %accext1, double %ext1,
248 metadata !"round.dynamic",
249 metadata !"fpexcept.strict") #0
250 %extra1 = call double @llvm.experimental.constrained.fmul.f64(
251 double %mul1, double 1.11,
252 metadata !"round.dynamic",
253 metadata !"fpexcept.strict") #0
254 %trunc1 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
256 metadata !"round.dynamic",
257 metadata !"fpexcept.strict") #0
259 %accext2 = call double @llvm.experimental.constrained.fpext.f64.f32(
261 metadata !"fpexcept.strict") #0
262 %ext2 = call double @llvm.experimental.constrained.fpext.f64.f32(
264 metadata !"fpexcept.strict") #0
265 %mul2 = call double @llvm.experimental.constrained.fmul.f64(
266 double %accext2, double %ext2,
267 metadata !"round.dynamic",
268 metadata !"fpexcept.strict") #0
269 %extra2 = call double @llvm.experimental.constrained.fmul.f64(
270 double %mul2, double 1.21,
271 metadata !"round.dynamic",
272 metadata !"fpexcept.strict") #0
273 %trunc2 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
275 metadata !"round.dynamic",
276 metadata !"fpexcept.strict") #0
278 %accext3 = call double @llvm.experimental.constrained.fpext.f64.f32(
280 metadata !"fpexcept.strict") #0
281 %ext3 = call double @llvm.experimental.constrained.fpext.f64.f32(
283 metadata !"fpexcept.strict") #0
284 %mul3 = call double @llvm.experimental.constrained.fmul.f64(
285 double %accext3, double %ext3,
286 metadata !"round.dynamic",
287 metadata !"fpexcept.strict") #0
288 %extra3 = call double @llvm.experimental.constrained.fmul.f64(
289 double %mul3, double 1.31,
290 metadata !"round.dynamic",
291 metadata !"fpexcept.strict") #0
292 %trunc3 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
294 metadata !"round.dynamic",
295 metadata !"fpexcept.strict") #0
297 %accext4 = call double @llvm.experimental.constrained.fpext.f64.f32(
299 metadata !"fpexcept.strict") #0
300 %ext4 = call double @llvm.experimental.constrained.fpext.f64.f32(
302 metadata !"fpexcept.strict") #0
303 %mul4 = call double @llvm.experimental.constrained.fmul.f64(
304 double %accext4, double %ext4,
305 metadata !"round.dynamic",
306 metadata !"fpexcept.strict") #0
307 %extra4 = call double @llvm.experimental.constrained.fmul.f64(
308 double %mul4, double 1.41,
309 metadata !"round.dynamic",
310 metadata !"fpexcept.strict") #0
311 %trunc4 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
313 metadata !"round.dynamic",
314 metadata !"fpexcept.strict") #0
316 %accext5 = call double @llvm.experimental.constrained.fpext.f64.f32(
318 metadata !"fpexcept.strict") #0
319 %ext5 = call double @llvm.experimental.constrained.fpext.f64.f32(
321 metadata !"fpexcept.strict") #0
322 %mul5 = call double @llvm.experimental.constrained.fmul.f64(
323 double %accext5, double %ext5,
324 metadata !"round.dynamic",
325 metadata !"fpexcept.strict") #0
326 %extra5 = call double @llvm.experimental.constrained.fmul.f64(
327 double %mul5, double 1.51,
328 metadata !"round.dynamic",
329 metadata !"fpexcept.strict") #0
330 %trunc5 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
332 metadata !"round.dynamic",
333 metadata !"fpexcept.strict") #0
335 %accext6 = call double @llvm.experimental.constrained.fpext.f64.f32(
337 metadata !"fpexcept.strict") #0
338 %ext6 = call double @llvm.experimental.constrained.fpext.f64.f32(
340 metadata !"fpexcept.strict") #0
341 %mul6 = call double @llvm.experimental.constrained.fmul.f64(
342 double %accext6, double %ext6,
343 metadata !"round.dynamic",
344 metadata !"fpexcept.strict") #0
345 %extra6 = call double @llvm.experimental.constrained.fmul.f64(
346 double %mul6, double 1.61,
347 metadata !"round.dynamic",
348 metadata !"fpexcept.strict") #0
349 %trunc6 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
351 metadata !"round.dynamic",
352 metadata !"fpexcept.strict") #0
354 %accext7 = call double @llvm.experimental.constrained.fpext.f64.f32(
356 metadata !"fpexcept.strict") #0
357 %ext7 = call double @llvm.experimental.constrained.fpext.f64.f32(
359 metadata !"fpexcept.strict") #0
360 %mul7 = call double @llvm.experimental.constrained.fmul.f64(
361 double %accext7, double %ext7,
362 metadata !"round.dynamic",
363 metadata !"fpexcept.strict") #0
364 %extra7 = call double @llvm.experimental.constrained.fmul.f64(
365 double %mul7, double 1.71,
366 metadata !"round.dynamic",
367 metadata !"fpexcept.strict") #0
368 %trunc7 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
370 metadata !"round.dynamic",
371 metadata !"fpexcept.strict") #0
373 %accext8 = call double @llvm.experimental.constrained.fpext.f64.f32(
375 metadata !"fpexcept.strict") #0
376 %ext8 = call double @llvm.experimental.constrained.fpext.f64.f32(
378 metadata !"fpexcept.strict") #0
379 %mul8 = call double @llvm.experimental.constrained.fmul.f64(
380 double %accext8, double %ext8,
381 metadata !"round.dynamic",
382 metadata !"fpexcept.strict") #0
383 %extra8 = call double @llvm.experimental.constrained.fmul.f64(
384 double %mul8, double 1.81,
385 metadata !"round.dynamic",
386 metadata !"fpexcept.strict") #0
387 %trunc8 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
389 metadata !"round.dynamic",
390 metadata !"fpexcept.strict") #0
392 %accext9 = call double @llvm.experimental.constrained.fpext.f64.f32(
394 metadata !"fpexcept.strict") #0
395 %ext9 = call double @llvm.experimental.constrained.fpext.f64.f32(
397 metadata !"fpexcept.strict") #0
398 %mul9 = call double @llvm.experimental.constrained.fmul.f64(
399 double %accext9, double %ext9,
400 metadata !"round.dynamic",
401 metadata !"fpexcept.strict") #0
402 %extra9 = call double @llvm.experimental.constrained.fmul.f64(
403 double %mul9, double 1.91,
404 metadata !"round.dynamic",
405 metadata !"fpexcept.strict") #0
406 %trunc9 = call float @llvm.experimental.constrained.fptrunc.f32.f64(
408 metadata !"round.dynamic",
409 metadata !"fpexcept.strict") #0
414 attributes #0 = { strictfp }