1 ; Test strict multiplication of two f64s, producing an f128 result.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5 declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata)
6 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
7 declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata)
8 declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
12 ; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's
13 ; point of view, because %f2 is the low register of the FP128 %f0. Pass the
14 ; multiplier in %f4 instead.
15 define void @f1(double %f1, double %dummy, double %f2, ptr %dst) #0 {
17 ; CHECK: mxdbr %f0, %f4
18 ; CHECK: std %f0, 0(%r2)
19 ; CHECK: std %f2, 8(%r2)
21 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
23 metadata !"fpexcept.strict") #0
24 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
26 metadata !"fpexcept.strict") #0
27 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
28 fp128 %f1x, fp128 %f2x,
29 metadata !"round.dynamic",
30 metadata !"fpexcept.strict") #0
31 store fp128 %res, ptr %dst
35 ; Check the low end of the MXDB range.
36 define void @f2(double %f1, ptr %ptr, ptr %dst) #0 {
38 ; CHECK: mxdb %f0, 0(%r2)
39 ; CHECK: std %f0, 0(%r3)
40 ; CHECK: std %f2, 8(%r3)
42 %f2 = load double, ptr %ptr
43 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
45 metadata !"fpexcept.strict") #0
46 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
48 metadata !"fpexcept.strict") #0
49 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
50 fp128 %f1x, fp128 %f2x,
51 metadata !"round.dynamic",
52 metadata !"fpexcept.strict") #0
53 store fp128 %res, ptr %dst
57 ; Check the high end of the aligned MXDB range.
58 define void @f3(double %f1, ptr %base, ptr %dst) #0 {
60 ; CHECK: mxdb %f0, 4088(%r2)
61 ; CHECK: std %f0, 0(%r3)
62 ; CHECK: std %f2, 8(%r3)
64 %ptr = getelementptr double, ptr %base, i64 511
65 %f2 = load double, ptr %ptr
66 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
68 metadata !"fpexcept.strict") #0
69 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
71 metadata !"fpexcept.strict") #0
72 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
73 fp128 %f1x, fp128 %f2x,
74 metadata !"round.dynamic",
75 metadata !"fpexcept.strict") #0
76 store fp128 %res, ptr %dst
80 ; Check the next doubleword up, which needs separate address logic.
81 ; Other sequences besides this one would be OK.
82 define void @f4(double %f1, ptr %base, ptr %dst) #0 {
84 ; CHECK: aghi %r2, 4096
85 ; CHECK: mxdb %f0, 0(%r2)
86 ; CHECK: std %f0, 0(%r3)
87 ; CHECK: std %f2, 8(%r3)
89 %ptr = getelementptr double, ptr %base, i64 512
90 %f2 = load double, ptr %ptr
91 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
93 metadata !"fpexcept.strict") #0
94 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
96 metadata !"fpexcept.strict") #0
97 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
98 fp128 %f1x, fp128 %f2x,
99 metadata !"round.dynamic",
100 metadata !"fpexcept.strict") #0
101 store fp128 %res, ptr %dst
105 ; Check negative displacements, which also need separate address logic.
106 define void @f5(double %f1, ptr %base, ptr %dst) #0 {
108 ; CHECK: aghi %r2, -8
109 ; CHECK: mxdb %f0, 0(%r2)
110 ; CHECK: std %f0, 0(%r3)
111 ; CHECK: std %f2, 8(%r3)
113 %ptr = getelementptr double, ptr %base, i64 -1
114 %f2 = load double, ptr %ptr
115 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
117 metadata !"fpexcept.strict") #0
118 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
120 metadata !"fpexcept.strict") #0
121 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
122 fp128 %f1x, fp128 %f2x,
123 metadata !"round.dynamic",
124 metadata !"fpexcept.strict") #0
125 store fp128 %res, ptr %dst
129 ; Check that MXDB allows indices.
130 define void @f6(double %f1, ptr %base, i64 %index, ptr %dst) #0 {
132 ; CHECK: sllg %r1, %r3, 3
133 ; CHECK: mxdb %f0, 800(%r1,%r2)
134 ; CHECK: std %f0, 0(%r4)
135 ; CHECK: std %f2, 8(%r4)
137 %ptr1 = getelementptr double, ptr %base, i64 %index
138 %ptr2 = getelementptr double, ptr %ptr1, i64 100
139 %f2 = load double, ptr %ptr2
140 %f1x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
142 metadata !"fpexcept.strict") #0
143 %f2x = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
145 metadata !"fpexcept.strict") #0
146 %res = call fp128 @llvm.experimental.constrained.fmul.f128(
147 fp128 %f1x, fp128 %f2x,
148 metadata !"round.dynamic",
149 metadata !"fpexcept.strict") #0
150 store fp128 %res, ptr %dst
154 ; Check that multiplications of spilled values can use MXDB rather than MXDBR.
155 define double @f7(ptr %ptr0) #0 {
157 ; CHECK: brasl %r14, foo@PLT
158 ; CHECK: mxdb %f0, 160(%r15)
160 %ptr1 = getelementptr double, ptr %ptr0, i64 2
161 %ptr2 = getelementptr double, ptr %ptr0, i64 4
162 %ptr3 = getelementptr double, ptr %ptr0, i64 6
163 %ptr4 = getelementptr double, ptr %ptr0, i64 8
164 %ptr5 = getelementptr double, ptr %ptr0, i64 10
165 %ptr6 = getelementptr double, ptr %ptr0, i64 12
166 %ptr7 = getelementptr double, ptr %ptr0, i64 14
167 %ptr8 = getelementptr double, ptr %ptr0, i64 16
168 %ptr9 = getelementptr double, ptr %ptr0, i64 18
169 %ptr10 = getelementptr double, ptr %ptr0, i64 20
171 %val0 = load double, ptr %ptr0
172 %val1 = load double, ptr %ptr1
173 %val2 = load double, ptr %ptr2
174 %val3 = load double, ptr %ptr3
175 %val4 = load double, ptr %ptr4
176 %val5 = load double, ptr %ptr5
177 %val6 = load double, ptr %ptr6
178 %val7 = load double, ptr %ptr7
179 %val8 = load double, ptr %ptr8
180 %val9 = load double, ptr %ptr9
181 %val10 = load double, ptr %ptr10
183 %frob0 = call double @llvm.experimental.constrained.fadd.f64(
184 double %val0, double %val0,
185 metadata !"round.dynamic",
186 metadata !"fpexcept.strict") #0
187 %frob1 = call double @llvm.experimental.constrained.fadd.f64(
188 double %val1, double %val1,
189 metadata !"round.dynamic",
190 metadata !"fpexcept.strict") #0
191 %frob2 = call double @llvm.experimental.constrained.fadd.f64(
192 double %val2, double %val2,
193 metadata !"round.dynamic",
194 metadata !"fpexcept.strict") #0
195 %frob3 = call double @llvm.experimental.constrained.fadd.f64(
196 double %val3, double %val3,
197 metadata !"round.dynamic",
198 metadata !"fpexcept.strict") #0
199 %frob4 = call double @llvm.experimental.constrained.fadd.f64(
200 double %val4, double %val4,
201 metadata !"round.dynamic",
202 metadata !"fpexcept.strict") #0
203 %frob5 = call double @llvm.experimental.constrained.fadd.f64(
204 double %val5, double %val5,
205 metadata !"round.dynamic",
206 metadata !"fpexcept.strict") #0
207 %frob6 = call double @llvm.experimental.constrained.fadd.f64(
208 double %val6, double %val6,
209 metadata !"round.dynamic",
210 metadata !"fpexcept.strict") #0
211 %frob7 = call double @llvm.experimental.constrained.fadd.f64(
212 double %val7, double %val7,
213 metadata !"round.dynamic",
214 metadata !"fpexcept.strict") #0
215 %frob8 = call double @llvm.experimental.constrained.fadd.f64(
216 double %val8, double %val8,
217 metadata !"round.dynamic",
218 metadata !"fpexcept.strict") #0
219 %frob9 = call double @llvm.experimental.constrained.fadd.f64(
220 double %val9, double %val9,
221 metadata !"round.dynamic",
222 metadata !"fpexcept.strict") #0
223 %frob10 = call double @llvm.experimental.constrained.fadd.f64(
224 double %val10, double %val10,
225 metadata !"round.dynamic",
226 metadata !"fpexcept.strict") #0
228 store double %frob0, ptr %ptr0
229 store double %frob1, ptr %ptr1
230 store double %frob2, ptr %ptr2
231 store double %frob3, ptr %ptr3
232 store double %frob4, ptr %ptr4
233 store double %frob5, ptr %ptr5
234 store double %frob6, ptr %ptr6
235 store double %frob7, ptr %ptr7
236 store double %frob8, ptr %ptr8
237 store double %frob9, ptr %ptr9
238 store double %frob10, ptr %ptr10
240 %ret = call double @foo() #0
242 %accext0 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
244 metadata !"fpexcept.strict") #0
245 %ext0 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
247 metadata !"fpexcept.strict") #0
248 %mul0 = call fp128 @llvm.experimental.constrained.fmul.f128(
249 fp128 %accext0, fp128 %ext0,
250 metadata !"round.dynamic",
251 metadata !"fpexcept.strict") #0
252 %extra0 = call fp128 @llvm.experimental.constrained.fmul.f128(
253 fp128 %mul0, fp128 0xL00000000000000003fff000001000000,
254 metadata !"round.dynamic",
255 metadata !"fpexcept.strict") #0
256 %trunc0 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
258 metadata !"round.dynamic",
259 metadata !"fpexcept.strict") #0
261 %accext1 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
263 metadata !"fpexcept.strict") #0
264 %ext1 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
266 metadata !"fpexcept.strict") #0
267 %mul1 = call fp128 @llvm.experimental.constrained.fmul.f128(
268 fp128 %accext1, fp128 %ext1,
269 metadata !"round.dynamic",
270 metadata !"fpexcept.strict") #0
271 %extra1 = call fp128 @llvm.experimental.constrained.fmul.f128(
272 fp128 %mul1, fp128 0xL00000000000000003fff000002000000,
273 metadata !"round.dynamic",
274 metadata !"fpexcept.strict") #0
275 %trunc1 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
277 metadata !"round.dynamic",
278 metadata !"fpexcept.strict") #0
280 %accext2 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
282 metadata !"fpexcept.strict") #0
283 %ext2 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
285 metadata !"fpexcept.strict") #0
286 %mul2 = call fp128 @llvm.experimental.constrained.fmul.f128(
287 fp128 %accext2, fp128 %ext2,
288 metadata !"round.dynamic",
289 metadata !"fpexcept.strict") #0
290 %extra2 = call fp128 @llvm.experimental.constrained.fmul.f128(
291 fp128 %mul2, fp128 0xL00000000000000003fff000003000000,
292 metadata !"round.dynamic",
293 metadata !"fpexcept.strict") #0
294 %trunc2 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
296 metadata !"round.dynamic",
297 metadata !"fpexcept.strict") #0
299 %accext3 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
301 metadata !"fpexcept.strict") #0
302 %ext3 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
304 metadata !"fpexcept.strict") #0
305 %mul3 = call fp128 @llvm.experimental.constrained.fmul.f128(
306 fp128 %accext3, fp128 %ext3,
307 metadata !"round.dynamic",
308 metadata !"fpexcept.strict") #0
309 %extra3 = call fp128 @llvm.experimental.constrained.fmul.f128(
310 fp128 %mul3, fp128 0xL00000000000000003fff000004000000,
311 metadata !"round.dynamic",
312 metadata !"fpexcept.strict") #0
313 %trunc3 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
315 metadata !"round.dynamic",
316 metadata !"fpexcept.strict") #0
318 %accext4 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
320 metadata !"fpexcept.strict") #0
321 %ext4 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
323 metadata !"fpexcept.strict") #0
324 %mul4 = call fp128 @llvm.experimental.constrained.fmul.f128(
325 fp128 %accext4, fp128 %ext4,
326 metadata !"round.dynamic",
327 metadata !"fpexcept.strict") #0
328 %extra4 = call fp128 @llvm.experimental.constrained.fmul.f128(
329 fp128 %mul4, fp128 0xL00000000000000003fff000005000000,
330 metadata !"round.dynamic",
331 metadata !"fpexcept.strict") #0
332 %trunc4 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
334 metadata !"round.dynamic",
335 metadata !"fpexcept.strict") #0
337 %accext5 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
339 metadata !"fpexcept.strict") #0
340 %ext5 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
342 metadata !"fpexcept.strict") #0
343 %mul5 = call fp128 @llvm.experimental.constrained.fmul.f128(
344 fp128 %accext5, fp128 %ext5,
345 metadata !"round.dynamic",
346 metadata !"fpexcept.strict") #0
347 %extra5 = call fp128 @llvm.experimental.constrained.fmul.f128(
348 fp128 %mul5, fp128 0xL00000000000000003fff000006000000,
349 metadata !"round.dynamic",
350 metadata !"fpexcept.strict") #0
351 %trunc5 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
353 metadata !"round.dynamic",
354 metadata !"fpexcept.strict") #0
356 %accext6 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
358 metadata !"fpexcept.strict") #0
359 %ext6 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
361 metadata !"fpexcept.strict") #0
362 %mul6 = call fp128 @llvm.experimental.constrained.fmul.f128(
363 fp128 %accext6, fp128 %ext6,
364 metadata !"round.dynamic",
365 metadata !"fpexcept.strict") #0
366 %extra6 = call fp128 @llvm.experimental.constrained.fmul.f128(
367 fp128 %mul6, fp128 0xL00000000000000003fff000007000000,
368 metadata !"round.dynamic",
369 metadata !"fpexcept.strict") #0
370 %trunc6 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
372 metadata !"round.dynamic",
373 metadata !"fpexcept.strict") #0
375 %accext7 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
377 metadata !"fpexcept.strict") #0
378 %ext7 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
380 metadata !"fpexcept.strict") #0
381 %mul7 = call fp128 @llvm.experimental.constrained.fmul.f128(
382 fp128 %accext7, fp128 %ext7,
383 metadata !"round.dynamic",
384 metadata !"fpexcept.strict") #0
385 %extra7 = call fp128 @llvm.experimental.constrained.fmul.f128(
386 fp128 %mul7, fp128 0xL00000000000000003fff000008000000,
387 metadata !"round.dynamic",
388 metadata !"fpexcept.strict") #0
389 %trunc7 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
391 metadata !"round.dynamic",
392 metadata !"fpexcept.strict") #0
394 %accext8 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
396 metadata !"fpexcept.strict") #0
397 %ext8 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
399 metadata !"fpexcept.strict") #0
400 %mul8 = call fp128 @llvm.experimental.constrained.fmul.f128(
401 fp128 %accext8, fp128 %ext8,
402 metadata !"round.dynamic",
403 metadata !"fpexcept.strict") #0
404 %extra8 = call fp128 @llvm.experimental.constrained.fmul.f128(
405 fp128 %mul8, fp128 0xL00000000000000003fff000009000000,
406 metadata !"round.dynamic",
407 metadata !"fpexcept.strict") #0
408 %trunc8 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
410 metadata !"round.dynamic",
411 metadata !"fpexcept.strict") #0
413 %accext9 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
415 metadata !"fpexcept.strict") #0
416 %ext9 = call fp128 @llvm.experimental.constrained.fpext.f128.f64(
418 metadata !"fpexcept.strict") #0
419 %mul9 = call fp128 @llvm.experimental.constrained.fmul.f128(
420 fp128 %accext9, fp128 %ext9,
421 metadata !"round.dynamic",
422 metadata !"fpexcept.strict") #0
423 %extra9 = call fp128 @llvm.experimental.constrained.fmul.f128(
424 fp128 %mul9, fp128 0xL00000000000000003fff00000a000000,
425 metadata !"round.dynamic",
426 metadata !"fpexcept.strict") #0
427 %trunc9 = call double @llvm.experimental.constrained.fptrunc.f64.f128(
429 metadata !"round.dynamic",
430 metadata !"fpexcept.strict") #0
435 attributes #0 = { strictfp }