1 ; Test strict multiplication of two f64s, producing an f64 result.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
4 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
5 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
8 declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
10 ; Check register multiplication.
11 define double @f1(double %f1, double %f2) #0 {
13 ; CHECK: mdbr %f0, %f2
15 %res = call double @llvm.experimental.constrained.fmul.f64(
16 double %f1, double %f2,
17 metadata !"round.dynamic",
18 metadata !"fpexcept.strict") #0
22 ; Check the low end of the MDB range.
23 define double @f2(double %f1, double *%ptr) #0 {
25 ; CHECK: mdb %f0, 0(%r2)
27 %f2 = load double, double *%ptr
28 %res = call double @llvm.experimental.constrained.fmul.f64(
29 double %f1, double %f2,
30 metadata !"round.dynamic",
31 metadata !"fpexcept.strict") #0
35 ; Check the high end of the aligned MDB range.
36 define double @f3(double %f1, double *%base) #0 {
38 ; CHECK: mdb %f0, 4088(%r2)
40 %ptr = getelementptr double, double *%base, i64 511
41 %f2 = load double, double *%ptr
42 %res = call double @llvm.experimental.constrained.fmul.f64(
43 double %f1, double %f2,
44 metadata !"round.dynamic",
45 metadata !"fpexcept.strict") #0
49 ; Check the next doubleword up, which needs separate address logic.
50 ; Other sequences besides this one would be OK.
51 define double @f4(double %f1, double *%base) #0 {
53 ; CHECK: aghi %r2, 4096
54 ; CHECK: mdb %f0, 0(%r2)
56 %ptr = getelementptr double, double *%base, i64 512
57 %f2 = load double, double *%ptr
58 %res = call double @llvm.experimental.constrained.fmul.f64(
59 double %f1, double %f2,
60 metadata !"round.dynamic",
61 metadata !"fpexcept.strict") #0
65 ; Check negative displacements, which also need separate address logic.
66 define double @f5(double %f1, double *%base) #0 {
69 ; CHECK: mdb %f0, 0(%r2)
71 %ptr = getelementptr double, double *%base, i64 -1
72 %f2 = load double, double *%ptr
73 %res = call double @llvm.experimental.constrained.fmul.f64(
74 double %f1, double %f2,
75 metadata !"round.dynamic",
76 metadata !"fpexcept.strict") #0
80 ; Check that MDB allows indices.
81 define double @f6(double %f1, double *%base, i64 %index) #0 {
83 ; CHECK: sllg %r1, %r3, 3
84 ; CHECK: mdb %f0, 800(%r1,%r2)
86 %ptr1 = getelementptr double, double *%base, i64 %index
87 %ptr2 = getelementptr double, double *%ptr1, i64 100
88 %f2 = load double, double *%ptr2
89 %res = call double @llvm.experimental.constrained.fmul.f64(
90 double %f1, double %f2,
91 metadata !"round.dynamic",
92 metadata !"fpexcept.strict") #0
96 ; Check that multiplications of spilled values can use MDB rather than MDBR.
97 define double @f7(double *%ptr0) #0 {
99 ; CHECK: brasl %r14, foo@PLT
100 ; CHECK-SCALAR: mdb %f0, 160(%r15)
102 %ptr1 = getelementptr double, double *%ptr0, i64 2
103 %ptr2 = getelementptr double, double *%ptr0, i64 4
104 %ptr3 = getelementptr double, double *%ptr0, i64 6
105 %ptr4 = getelementptr double, double *%ptr0, i64 8
106 %ptr5 = getelementptr double, double *%ptr0, i64 10
107 %ptr6 = getelementptr double, double *%ptr0, i64 12
108 %ptr7 = getelementptr double, double *%ptr0, i64 14
109 %ptr8 = getelementptr double, double *%ptr0, i64 16
110 %ptr9 = getelementptr double, double *%ptr0, i64 18
111 %ptr10 = getelementptr double, double *%ptr0, i64 20
113 %val0 = load double, double *%ptr0
114 %val1 = load double, double *%ptr1
115 %val2 = load double, double *%ptr2
116 %val3 = load double, double *%ptr3
117 %val4 = load double, double *%ptr4
118 %val5 = load double, double *%ptr5
119 %val6 = load double, double *%ptr6
120 %val7 = load double, double *%ptr7
121 %val8 = load double, double *%ptr8
122 %val9 = load double, double *%ptr9
123 %val10 = load double, double *%ptr10
125 %ret = call double @foo() #0
127 %mul0 = call double @llvm.experimental.constrained.fmul.f64(
128 double %ret, double %val0,
129 metadata !"round.dynamic",
130 metadata !"fpexcept.strict") #0
131 %mul1 = call double @llvm.experimental.constrained.fmul.f64(
132 double %mul0, double %val1,
133 metadata !"round.dynamic",
134 metadata !"fpexcept.strict") #0
135 %mul2 = call double @llvm.experimental.constrained.fmul.f64(
136 double %mul1, double %val2,
137 metadata !"round.dynamic",
138 metadata !"fpexcept.strict") #0
139 %mul3 = call double @llvm.experimental.constrained.fmul.f64(
140 double %mul2, double %val3,
141 metadata !"round.dynamic",
142 metadata !"fpexcept.strict") #0
143 %mul4 = call double @llvm.experimental.constrained.fmul.f64(
144 double %mul3, double %val4,
145 metadata !"round.dynamic",
146 metadata !"fpexcept.strict") #0
147 %mul5 = call double @llvm.experimental.constrained.fmul.f64(
148 double %mul4, double %val5,
149 metadata !"round.dynamic",
150 metadata !"fpexcept.strict") #0
151 %mul6 = call double @llvm.experimental.constrained.fmul.f64(
152 double %mul5, double %val6,
153 metadata !"round.dynamic",
154 metadata !"fpexcept.strict") #0
155 %mul7 = call double @llvm.experimental.constrained.fmul.f64(
156 double %mul6, double %val7,
157 metadata !"round.dynamic",
158 metadata !"fpexcept.strict") #0
159 %mul8 = call double @llvm.experimental.constrained.fmul.f64(
160 double %mul7, double %val8,
161 metadata !"round.dynamic",
162 metadata !"fpexcept.strict") #0
163 %mul9 = call double @llvm.experimental.constrained.fmul.f64(
164 double %mul8, double %val9,
165 metadata !"round.dynamic",
166 metadata !"fpexcept.strict") #0
167 %mul10 = call double @llvm.experimental.constrained.fmul.f64(
168 double %mul9, double %val10,
169 metadata !"round.dynamic",
170 metadata !"fpexcept.strict") #0
175 attributes #0 = { strictfp }