1 ; Test strict 64-bit floating-point addition.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
4 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
5 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs | FileCheck %s
7 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
9 ; Check register addition.
10 define double @f1(double %f1, double %f2) {
12 ; CHECK: adbr %f0, %f2
14 %res = call double @llvm.experimental.constrained.fadd.f64(
15 double %f1, double %f2,
16 metadata !"round.dynamic",
17 metadata !"fpexcept.strict")
21 ; Check the low end of the ADB range.
22 define double @f2(double %f1, double *%ptr) {
24 ; CHECK: adb %f0, 0(%r2)
26 %f2 = load double, double *%ptr
27 %res = call double @llvm.experimental.constrained.fadd.f64(
28 double %f1, double %f2,
29 metadata !"round.dynamic",
30 metadata !"fpexcept.strict")
34 ; Check the high end of the aligned ADB range.
35 define double @f3(double %f1, double *%base) {
37 ; CHECK: adb %f0, 4088(%r2)
39 %ptr = getelementptr double, double *%base, i64 511
40 %f2 = load double, double *%ptr
41 %res = call double @llvm.experimental.constrained.fadd.f64(
42 double %f1, double %f2,
43 metadata !"round.dynamic",
44 metadata !"fpexcept.strict")
48 ; Check the next doubleword up, which needs separate address logic.
49 ; Other sequences besides this one would be OK.
50 define double @f4(double %f1, double *%base) {
52 ; CHECK: aghi %r2, 4096
53 ; CHECK: adb %f0, 0(%r2)
55 %ptr = getelementptr double, double *%base, i64 512
56 %f2 = load double, double *%ptr
57 %res = call double @llvm.experimental.constrained.fadd.f64(
58 double %f1, double %f2,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict")
64 ; Check negative displacements, which also need separate address logic.
65 define double @f5(double %f1, double *%base) {
68 ; CHECK: adb %f0, 0(%r2)
70 %ptr = getelementptr double, double *%base, i64 -1
71 %f2 = load double, double *%ptr
72 %res = call double @llvm.experimental.constrained.fadd.f64(
73 double %f1, double %f2,
74 metadata !"round.dynamic",
75 metadata !"fpexcept.strict")
79 ; Check that ADB allows indices.
80 define double @f6(double %f1, double *%base, i64 %index) {
82 ; CHECK: sllg %r1, %r3, 3
83 ; CHECK: adb %f0, 800(%r1,%r2)
85 %ptr1 = getelementptr double, double *%base, i64 %index
86 %ptr2 = getelementptr double, double *%ptr1, i64 100
87 %f2 = load double, double *%ptr2
88 %res = call double @llvm.experimental.constrained.fadd.f64(
89 double %f1, double %f2,
90 metadata !"round.dynamic",
91 metadata !"fpexcept.strict")
95 ; Check that additions of spilled values can use ADB rather than ADBR.
96 define double @f7(double *%ptr0) {
98 ; CHECK: brasl %r14, foo@PLT
99 ; CHECK-SCALAR: adb %f0, 160(%r15)
101 %ptr1 = getelementptr double, double *%ptr0, i64 2
102 %ptr2 = getelementptr double, double *%ptr0, i64 4
103 %ptr3 = getelementptr double, double *%ptr0, i64 6
104 %ptr4 = getelementptr double, double *%ptr0, i64 8
105 %ptr5 = getelementptr double, double *%ptr0, i64 10
106 %ptr6 = getelementptr double, double *%ptr0, i64 12
107 %ptr7 = getelementptr double, double *%ptr0, i64 14
108 %ptr8 = getelementptr double, double *%ptr0, i64 16
109 %ptr9 = getelementptr double, double *%ptr0, i64 18
110 %ptr10 = getelementptr double, double *%ptr0, i64 20
112 %val0 = load double, double *%ptr0
113 %val1 = load double, double *%ptr1
114 %val2 = load double, double *%ptr2
115 %val3 = load double, double *%ptr3
116 %val4 = load double, double *%ptr4
117 %val5 = load double, double *%ptr5
118 %val6 = load double, double *%ptr6
119 %val7 = load double, double *%ptr7
120 %val8 = load double, double *%ptr8
121 %val9 = load double, double *%ptr9
122 %val10 = load double, double *%ptr10
124 %ret = call double @foo()
126 %add0 = call double @llvm.experimental.constrained.fadd.f64(
127 double %ret, double %val0,
128 metadata !"round.dynamic",
129 metadata !"fpexcept.strict")
130 %add1 = call double @llvm.experimental.constrained.fadd.f64(
131 double %add0, double %val1,
132 metadata !"round.dynamic",
133 metadata !"fpexcept.strict")
134 %add2 = call double @llvm.experimental.constrained.fadd.f64(
135 double %add1, double %val2,
136 metadata !"round.dynamic",
137 metadata !"fpexcept.strict")
138 %add3 = call double @llvm.experimental.constrained.fadd.f64(
139 double %add2, double %val3,
140 metadata !"round.dynamic",
141 metadata !"fpexcept.strict")
142 %add4 = call double @llvm.experimental.constrained.fadd.f64(
143 double %add3, double %val4,
144 metadata !"round.dynamic",
145 metadata !"fpexcept.strict")
146 %add5 = call double @llvm.experimental.constrained.fadd.f64(
147 double %add4, double %val5,
148 metadata !"round.dynamic",
149 metadata !"fpexcept.strict")
150 %add6 = call double @llvm.experimental.constrained.fadd.f64(
151 double %add5, double %val6,
152 metadata !"round.dynamic",
153 metadata !"fpexcept.strict")
154 %add7 = call double @llvm.experimental.constrained.fadd.f64(
155 double %add6, double %val7,
156 metadata !"round.dynamic",
157 metadata !"fpexcept.strict")
158 %add8 = call double @llvm.experimental.constrained.fadd.f64(
159 double %add7, double %val8,
160 metadata !"round.dynamic",
161 metadata !"fpexcept.strict")
162 %add9 = call double @llvm.experimental.constrained.fadd.f64(
163 double %add8, double %val9,
164 metadata !"round.dynamic",
165 metadata !"fpexcept.strict")
166 %add10 = call double @llvm.experimental.constrained.fadd.f64(
167 double %add9, double %val10,
168 metadata !"round.dynamic",
169 metadata !"fpexcept.strict")