1 ; Test 64-bit floating-point strict comparison. The tests assume a z10
2 ; implementation of select, using conditional branches rather than LOCGR.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
5 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
6 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs\
7 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
11 ; Check comparison with registers.
12 define i64 @f1(i64 %a, i64 %b, double %f1, double %f2) #0 {
14 ; CHECK: cdbr %f0, %f2
15 ; CHECK-SCALAR-NEXT: ber %r14
16 ; CHECK-SCALAR: lgr %r2, %r3
17 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
19 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
20 double %f1, double %f2,
22 metadata !"fpexcept.strict") #0
23 %res = select i1 %cond, i64 %a, i64 %b
27 ; Check the low end of the CDB range.
28 define i64 @f2(i64 %a, i64 %b, double %f1, ptr %ptr) #0 {
30 ; CHECK: cdb %f0, 0(%r4)
31 ; CHECK-SCALAR-NEXT: ber %r14
32 ; CHECK-SCALAR: lgr %r2, %r3
33 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
35 %f2 = load double, ptr %ptr
36 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
37 double %f1, double %f2,
39 metadata !"fpexcept.strict") #0
40 %res = select i1 %cond, i64 %a, i64 %b
44 ; Check the high end of the aligned CDB range.
45 define i64 @f3(i64 %a, i64 %b, double %f1, ptr %base) #0 {
47 ; CHECK: cdb %f0, 4088(%r4)
48 ; CHECK-SCALAR-NEXT: ber %r14
49 ; CHECK-SCALAR: lgr %r2, %r3
50 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
52 %ptr = getelementptr double, ptr %base, i64 511
53 %f2 = load double, ptr %ptr
54 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
55 double %f1, double %f2,
57 metadata !"fpexcept.strict") #0
58 %res = select i1 %cond, i64 %a, i64 %b
62 ; Check the next doubleword up, which needs separate address logic.
63 ; Other sequences besides this one would be OK.
64 define i64 @f4(i64 %a, i64 %b, double %f1, ptr %base) #0 {
66 ; CHECK: aghi %r4, 4096
67 ; CHECK: cdb %f0, 0(%r4)
68 ; CHECK-SCALAR-NEXT: ber %r14
69 ; CHECK-SCALAR: lgr %r2, %r3
70 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
72 %ptr = getelementptr double, ptr %base, i64 512
73 %f2 = load double, ptr %ptr
74 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
75 double %f1, double %f2,
77 metadata !"fpexcept.strict") #0
78 %res = select i1 %cond, i64 %a, i64 %b
82 ; Check negative displacements, which also need separate address logic.
83 define i64 @f5(i64 %a, i64 %b, double %f1, ptr %base) #0 {
86 ; CHECK: cdb %f0, 0(%r4)
87 ; CHECK-SCALAR-NEXT: ber %r14
88 ; CHECK-SCALAR: lgr %r2, %r3
89 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
91 %ptr = getelementptr double, ptr %base, i64 -1
92 %f2 = load double, ptr %ptr
93 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
94 double %f1, double %f2,
96 metadata !"fpexcept.strict") #0
97 %res = select i1 %cond, i64 %a, i64 %b
101 ; Check that CDB allows indices.
102 define i64 @f6(i64 %a, i64 %b, double %f1, ptr %base, i64 %index) #0 {
104 ; CHECK: sllg %r1, %r5, 3
105 ; CHECK: cdb %f0, 800(%r1,%r4)
106 ; CHECK-SCALAR-NEXT: ber %r14
107 ; CHECK-SCALAR: lgr %r2, %r3
108 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
110 %ptr1 = getelementptr double, ptr %base, i64 %index
111 %ptr2 = getelementptr double, ptr %ptr1, i64 100
112 %f2 = load double, ptr %ptr2
113 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
114 double %f1, double %f2,
116 metadata !"fpexcept.strict") #0
117 %res = select i1 %cond, i64 %a, i64 %b
121 ; Check that comparisons of spilled values can use CDB rather than CDBR.
122 define double @f7(ptr %ptr0) #0 {
124 ; CHECK: brasl %r14, foo@PLT
125 ; CHECK-SCALAR: cdb {{%f[0-9]+}}, 160(%r15)
127 %ptr1 = getelementptr double, ptr %ptr0, i64 2
128 %ptr2 = getelementptr double, ptr %ptr0, i64 4
129 %ptr3 = getelementptr double, ptr %ptr0, i64 6
130 %ptr4 = getelementptr double, ptr %ptr0, i64 8
131 %ptr5 = getelementptr double, ptr %ptr0, i64 10
132 %ptr6 = getelementptr double, ptr %ptr0, i64 12
133 %ptr7 = getelementptr double, ptr %ptr0, i64 14
134 %ptr8 = getelementptr double, ptr %ptr0, i64 16
135 %ptr9 = getelementptr double, ptr %ptr0, i64 18
136 %ptr10 = getelementptr double, ptr %ptr0, i64 20
138 %val0 = load double, ptr %ptr0
139 %val1 = load double, ptr %ptr1
140 %val2 = load double, ptr %ptr2
141 %val3 = load double, ptr %ptr3
142 %val4 = load double, ptr %ptr4
143 %val5 = load double, ptr %ptr5
144 %val6 = load double, ptr %ptr6
145 %val7 = load double, ptr %ptr7
146 %val8 = load double, ptr %ptr8
147 %val9 = load double, ptr %ptr9
148 %val10 = load double, ptr %ptr10
150 %ret = call double @foo() #0
152 %cmp0 = call i1 @llvm.experimental.constrained.fcmp.f64(
153 double %ret, double %val0,
155 metadata !"fpexcept.strict") #0
156 %cmp1 = call i1 @llvm.experimental.constrained.fcmp.f64(
157 double %ret, double %val1,
159 metadata !"fpexcept.strict") #0
160 %cmp2 = call i1 @llvm.experimental.constrained.fcmp.f64(
161 double %ret, double %val2,
163 metadata !"fpexcept.strict") #0
164 %cmp3 = call i1 @llvm.experimental.constrained.fcmp.f64(
165 double %ret, double %val3,
167 metadata !"fpexcept.strict") #0
168 %cmp4 = call i1 @llvm.experimental.constrained.fcmp.f64(
169 double %ret, double %val4,
171 metadata !"fpexcept.strict") #0
172 %cmp5 = call i1 @llvm.experimental.constrained.fcmp.f64(
173 double %ret, double %val5,
175 metadata !"fpexcept.strict") #0
176 %cmp6 = call i1 @llvm.experimental.constrained.fcmp.f64(
177 double %ret, double %val6,
179 metadata !"fpexcept.strict") #0
180 %cmp7 = call i1 @llvm.experimental.constrained.fcmp.f64(
181 double %ret, double %val7,
183 metadata !"fpexcept.strict") #0
184 %cmp8 = call i1 @llvm.experimental.constrained.fcmp.f64(
185 double %ret, double %val8,
187 metadata !"fpexcept.strict") #0
188 %cmp9 = call i1 @llvm.experimental.constrained.fcmp.f64(
189 double %ret, double %val9,
191 metadata !"fpexcept.strict") #0
192 %cmp10 = call i1 @llvm.experimental.constrained.fcmp.f64(
193 double %ret, double %val10,
195 metadata !"fpexcept.strict") #0
197 %sel0 = select i1 %cmp0, double %ret, double 0.0
198 %sel1 = select i1 %cmp1, double %sel0, double 1.0
199 %sel2 = select i1 %cmp2, double %sel1, double 2.0
200 %sel3 = select i1 %cmp3, double %sel2, double 3.0
201 %sel4 = select i1 %cmp4, double %sel3, double 4.0
202 %sel5 = select i1 %cmp5, double %sel4, double 5.0
203 %sel6 = select i1 %cmp6, double %sel5, double 6.0
204 %sel7 = select i1 %cmp7, double %sel6, double 7.0
205 %sel8 = select i1 %cmp8, double %sel7, double 8.0
206 %sel9 = select i1 %cmp9, double %sel8, double 9.0
207 %sel10 = select i1 %cmp10, double %sel9, double 10.0
212 ; Check comparison with zero.
213 define i64 @f8(i64 %a, i64 %b, double %f) #0 {
215 ; CHECK-SCALAR: ltdbr %f0, %f0
216 ; CHECK-SCALAR-NEXT: ber %r14
217 ; CHECK-SCALAR: lgr %r2, %r3
218 ; CHECK-VECTOR: ltdbr %f0, %f0
219 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
221 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
222 double %f, double 0.0,
224 metadata !"fpexcept.strict") #0
225 %res = select i1 %cond, i64 %a, i64 %b
229 ; Check the comparison can be reversed if that allows CDB to be used,
230 define i64 @f9(i64 %a, i64 %b, double %f2, ptr %ptr) #0 {
232 ; CHECK: cdb %f0, 0(%r4)
233 ; CHECK-SCALAR-NEXT: blr %r14
234 ; CHECK-SCALAR: lgr %r2, %r3
235 ; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
237 %f1 = load double, ptr %ptr
238 %cond = call i1 @llvm.experimental.constrained.fcmp.f64(
239 double %f1, double %f2,
241 metadata !"fpexcept.strict") #0
242 %res = select i1 %cond, i64 %a, i64 %b
246 attributes #0 = { strictfp }
248 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)