1 ; Test 32-bit floating-point strict comparison. The tests assume a z10
2 ; implementation of select, using conditional branches rather than LOCGR.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
5 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
6 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
7 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
11 ; Check comparison with registers.
12 define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) #0 {
14 ; CHECK: cebr %f0, %f2
15 ; CHECK-SCALAR-NEXT: ber %r14
16 ; CHECK-SCALAR: lgr %r2, %r3
17 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
19 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
22 metadata !"fpexcept.strict") #0
23 %res = select i1 %cond, i64 %a, i64 %b
27 ; Check the low end of the CEB range.
28 define i64 @f2(i64 %a, i64 %b, float %f1, ptr %ptr) #0 {
30 ; CHECK: ceb %f0, 0(%r4)
31 ; CHECK-SCALAR-NEXT: ber %r14
32 ; CHECK-SCALAR: lgr %r2, %r3
33 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
35 %f2 = load float, ptr %ptr
36 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
39 metadata !"fpexcept.strict") #0
40 %res = select i1 %cond, i64 %a, i64 %b
44 ; Check the high end of the aligned CEB range.
45 define i64 @f3(i64 %a, i64 %b, float %f1, ptr %base) #0 {
47 ; CHECK: ceb %f0, 4092(%r4)
48 ; CHECK-SCALAR-NEXT: ber %r14
49 ; CHECK-SCALAR: lgr %r2, %r3
50 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
52 %ptr = getelementptr float, ptr %base, i64 1023
53 %f2 = load float, ptr %ptr
54 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
57 metadata !"fpexcept.strict") #0
58 %res = select i1 %cond, i64 %a, i64 %b
62 ; Check the next word up, which needs separate address logic.
63 ; Other sequences besides this one would be OK.
64 define i64 @f4(i64 %a, i64 %b, float %f1, ptr %base) #0 {
66 ; CHECK: aghi %r4, 4096
67 ; CHECK: ceb %f0, 0(%r4)
68 ; CHECK-SCALAR-NEXT: ber %r14
69 ; CHECK-SCALAR: lgr %r2, %r3
70 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
72 %ptr = getelementptr float, ptr %base, i64 1024
73 %f2 = load float, ptr %ptr
74 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
77 metadata !"fpexcept.strict") #0
78 %res = select i1 %cond, i64 %a, i64 %b
82 ; Check negative displacements, which also need separate address logic.
83 define i64 @f5(i64 %a, i64 %b, float %f1, ptr %base) #0 {
86 ; CHECK: ceb %f0, 0(%r4)
87 ; CHECK-SCALAR-NEXT: ber %r14
88 ; CHECK-SCALAR: lgr %r2, %r3
89 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
91 %ptr = getelementptr float, ptr %base, i64 -1
92 %f2 = load float, ptr %ptr
93 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
96 metadata !"fpexcept.strict") #0
97 %res = select i1 %cond, i64 %a, i64 %b
101 ; Check that CEB allows indices.
102 define i64 @f6(i64 %a, i64 %b, float %f1, ptr %base, i64 %index) #0 {
104 ; CHECK: sllg %r1, %r5, 2
105 ; CHECK: ceb %f0, 400(%r1,%r4)
106 ; CHECK-SCALAR-NEXT: ber %r14
107 ; CHECK-SCALAR: lgr %r2, %r3
108 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
110 %ptr1 = getelementptr float, ptr %base, i64 %index
111 %ptr2 = getelementptr float, ptr %ptr1, i64 100
112 %f2 = load float, ptr %ptr2
113 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
114 float %f1, float %f2,
116 metadata !"fpexcept.strict") #0
117 %res = select i1 %cond, i64 %a, i64 %b
121 ; Check that comparisons of spilled values can use CEB rather than CEBR.
122 define float @f7(ptr %ptr0) #0 {
124 ; CHECK: brasl %r14, foo@PLT
125 ; CHECK-SCALAR: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
127 %ptr1 = getelementptr float, ptr %ptr0, i64 2
128 %ptr2 = getelementptr float, ptr %ptr0, i64 4
129 %ptr3 = getelementptr float, ptr %ptr0, i64 6
130 %ptr4 = getelementptr float, ptr %ptr0, i64 8
131 %ptr5 = getelementptr float, ptr %ptr0, i64 10
132 %ptr6 = getelementptr float, ptr %ptr0, i64 12
133 %ptr7 = getelementptr float, ptr %ptr0, i64 14
134 %ptr8 = getelementptr float, ptr %ptr0, i64 16
135 %ptr9 = getelementptr float, ptr %ptr0, i64 18
136 %ptr10 = getelementptr float, ptr %ptr0, i64 20
138 %val0 = load float, ptr %ptr0
139 %val1 = load float, ptr %ptr1
140 %val2 = load float, ptr %ptr2
141 %val3 = load float, ptr %ptr3
142 %val4 = load float, ptr %ptr4
143 %val5 = load float, ptr %ptr5
144 %val6 = load float, ptr %ptr6
145 %val7 = load float, ptr %ptr7
146 %val8 = load float, ptr %ptr8
147 %val9 = load float, ptr %ptr9
148 %val10 = load float, ptr %ptr10
150 %ret = call float @foo() #0
152 %cmp0 = call i1 @llvm.experimental.constrained.fcmp.f32(
153 float %ret, float %val0,
155 metadata !"fpexcept.strict") #0
156 %cmp1 = call i1 @llvm.experimental.constrained.fcmp.f32(
157 float %ret, float %val1,
159 metadata !"fpexcept.strict") #0
160 %cmp2 = call i1 @llvm.experimental.constrained.fcmp.f32(
161 float %ret, float %val2,
163 metadata !"fpexcept.strict") #0
164 %cmp3 = call i1 @llvm.experimental.constrained.fcmp.f32(
165 float %ret, float %val3,
167 metadata !"fpexcept.strict") #0
168 %cmp4 = call i1 @llvm.experimental.constrained.fcmp.f32(
169 float %ret, float %val4,
171 metadata !"fpexcept.strict") #0
172 %cmp5 = call i1 @llvm.experimental.constrained.fcmp.f32(
173 float %ret, float %val5,
175 metadata !"fpexcept.strict") #0
176 %cmp6 = call i1 @llvm.experimental.constrained.fcmp.f32(
177 float %ret, float %val6,
179 metadata !"fpexcept.strict") #0
180 %cmp7 = call i1 @llvm.experimental.constrained.fcmp.f32(
181 float %ret, float %val7,
183 metadata !"fpexcept.strict") #0
184 %cmp8 = call i1 @llvm.experimental.constrained.fcmp.f32(
185 float %ret, float %val8,
187 metadata !"fpexcept.strict") #0
188 %cmp9 = call i1 @llvm.experimental.constrained.fcmp.f32(
189 float %ret, float %val9,
191 metadata !"fpexcept.strict") #0
192 %cmp10 = call i1 @llvm.experimental.constrained.fcmp.f32(
193 float %ret, float %val10,
195 metadata !"fpexcept.strict") #0
197 %sel0 = select i1 %cmp0, float %ret, float 0.0
198 %sel1 = select i1 %cmp1, float %sel0, float 1.0
199 %sel2 = select i1 %cmp2, float %sel1, float 2.0
200 %sel3 = select i1 %cmp3, float %sel2, float 3.0
201 %sel4 = select i1 %cmp4, float %sel3, float 4.0
202 %sel5 = select i1 %cmp5, float %sel4, float 5.0
203 %sel6 = select i1 %cmp6, float %sel5, float 6.0
204 %sel7 = select i1 %cmp7, float %sel6, float 7.0
205 %sel8 = select i1 %cmp8, float %sel7, float 8.0
206 %sel9 = select i1 %cmp9, float %sel8, float 9.0
207 %sel10 = select i1 %cmp10, float %sel9, float 10.0
212 ; Check comparison with zero.
213 define i64 @f8(i64 %a, i64 %b, float %f) #0 {
215 ; CHECK: ltebr %f0, %f0
216 ; CHECK-SCALAR-NEXT: ber %r14
217 ; CHECK-SCALAR: lgr %r2, %r3
218 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
220 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
223 metadata !"fpexcept.strict") #0
224 %res = select i1 %cond, i64 %a, i64 %b
228 ; Check the comparison can be reversed if that allows CEB to be used,
230 define i64 @f9(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
232 ; CHECK: ceb %f0, 0(%r4)
233 ; CHECK-SCALAR-NEXT: ber %r14
234 ; CHECK-SCALAR: lgr %r2, %r3
235 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
237 %f1 = load float, ptr %ptr
238 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
239 float %f1, float %f2,
241 metadata !"fpexcept.strict") #0
242 %res = select i1 %cond, i64 %a, i64 %b
247 define i64 @f10(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
249 ; CHECK: ceb %f0, 0(%r4)
250 ; CHECK-SCALAR-NEXT: blhr %r14
251 ; CHECK-SCALAR: lgr %r2, %r3
252 ; CHECK-VECTOR-NEXT: locgrnlh %r2, %r3
254 %f1 = load float, ptr %ptr
255 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
256 float %f1, float %f2,
258 metadata !"fpexcept.strict") #0
259 %res = select i1 %cond, i64 %a, i64 %b
264 define i64 @f11(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
266 ; CHECK: ceb %f0, 0(%r4)
267 ; CHECK-SCALAR-NEXT: bhr %r14
268 ; CHECK-SCALAR: lgr %r2, %r3
269 ; CHECK-VECTOR-NEXT: locgrnh %r2, %r3
271 %f1 = load float, ptr %ptr
272 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
273 float %f1, float %f2,
275 metadata !"fpexcept.strict") #0
276 %res = select i1 %cond, i64 %a, i64 %b
281 define i64 @f12(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
283 ; CHECK: ceb %f0, 0(%r4)
284 ; CHECK-SCALAR-NEXT: bher %r14
285 ; CHECK-SCALAR: lgr %r2, %r3
286 ; CHECK-VECTOR-NEXT: locgrnhe %r2, %r3
288 %f1 = load float, ptr %ptr
289 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
290 float %f1, float %f2,
292 metadata !"fpexcept.strict") #0
293 %res = select i1 %cond, i64 %a, i64 %b
298 define i64 @f13(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
300 ; CHECK: ceb %f0, 0(%r4)
301 ; CHECK-SCALAR-NEXT: bler %r14
302 ; CHECK-SCALAR: lgr %r2, %r3
303 ; CHECK-VECTOR-NEXT: locgrnle %r2, %r3
305 %f1 = load float, ptr %ptr
306 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
307 float %f1, float %f2,
309 metadata !"fpexcept.strict") #0
310 %res = select i1 %cond, i64 %a, i64 %b
315 define i64 @f14(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
317 ; CHECK: ceb %f0, 0(%r4)
318 ; CHECK-SCALAR-NEXT: blr %r14
319 ; CHECK-SCALAR: lgr %r2, %r3
320 ; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
322 %f1 = load float, ptr %ptr
323 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
324 float %f1, float %f2,
326 metadata !"fpexcept.strict") #0
327 %res = select i1 %cond, i64 %a, i64 %b
332 define i64 @f15(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
334 ; CHECK: ceb %f0, 0(%r4)
335 ; CHECK-SCALAR-NEXT: bnlhr %r14
336 ; CHECK-SCALAR: lgr %r2, %r3
337 ; CHECK-VECTOR-NEXT: locgrlh %r2, %r3
339 %f1 = load float, ptr %ptr
340 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
341 float %f1, float %f2,
343 metadata !"fpexcept.strict") #0
344 %res = select i1 %cond, i64 %a, i64 %b
349 define i64 @f16(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
351 ; CHECK: ceb %f0, 0(%r4)
352 ; CHECK-SCALAR-NEXT: bner %r14
353 ; CHECK-SCALAR: lgr %r2, %r3
354 ; CHECK-VECTOR-NEXT: locgre %r2, %r3
356 %f1 = load float, ptr %ptr
357 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
358 float %f1, float %f2,
360 metadata !"fpexcept.strict") #0
361 %res = select i1 %cond, i64 %a, i64 %b
366 define i64 @f17(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
368 ; CHECK: ceb %f0, 0(%r4)
369 ; CHECK-SCALAR-NEXT: bnler %r14
370 ; CHECK-SCALAR: lgr %r2, %r3
371 ; CHECK-VECTOR-NEXT: locgrle %r2, %r3
373 %f1 = load float, ptr %ptr
374 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
375 float %f1, float %f2,
377 metadata !"fpexcept.strict") #0
378 %res = select i1 %cond, i64 %a, i64 %b
383 define i64 @f18(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
385 ; CHECK: ceb %f0, 0(%r4)
386 ; CHECK-SCALAR-NEXT: bnlr %r14
387 ; CHECK-SCALAR: lgr %r2, %r3
388 ; CHECK-VECTOR-NEXT: locgrl %r2, %r3
390 %f1 = load float, ptr %ptr
391 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
392 float %f1, float %f2,
394 metadata !"fpexcept.strict") #0
395 %res = select i1 %cond, i64 %a, i64 %b
400 define i64 @f19(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
402 ; CHECK: ceb %f0, 0(%r4)
403 ; CHECK-SCALAR-NEXT: bnhr %r14
404 ; CHECK-SCALAR: lgr %r2, %r3
405 ; CHECK-VECTOR-NEXT: locgrh %r2, %r3
407 %f1 = load float, ptr %ptr
408 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
409 float %f1, float %f2,
411 metadata !"fpexcept.strict") #0
412 %res = select i1 %cond, i64 %a, i64 %b
417 define i64 @f20(i64 %a, i64 %b, float %f2, ptr %ptr) #0 {
419 ; CHECK: ceb %f0, 0(%r4)
420 ; CHECK-SCALAR-NEXT: bnher %r14
421 ; CHECK-SCALAR: lgr %r2, %r3
422 ; CHECK-VECTOR-NEXT: locgrhe %r2, %r3
424 %f1 = load float, ptr %ptr
425 %cond = call i1 @llvm.experimental.constrained.fcmp.f32(
426 float %f1, float %f2,
428 metadata !"fpexcept.strict") #0
429 %res = select i1 %cond, i64 %a, i64 %b
433 attributes #0 = { strictfp }
435 declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)