1 ; Test 32-bit floating-point signaling comparison. The tests assume a z10
2 ; implementation of select, using conditional branches rather than LOCGR.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
5 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
6 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 \
7 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
11 ; Check comparison with registers.
12 define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) #0 {
14 ; CHECK: kebr %f0, %f2
15 ; CHECK-SCALAR-NEXT: ber %r14
16 ; CHECK-SCALAR: lgr %r2, %r3
17 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
19 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
22 metadata !"fpexcept.strict") #0
23 %res = select i1 %cond, i64 %a, i64 %b
27 ; Check the low end of the KEB range.
28 define i64 @f2(i64 %a, i64 %b, float %f1, float *%ptr) #0 {
30 ; CHECK: keb %f0, 0(%r4)
31 ; CHECK-SCALAR-NEXT: ber %r14
32 ; CHECK-SCALAR: lgr %r2, %r3
33 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
35 %f2 = load float, float *%ptr
36 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
39 metadata !"fpexcept.strict") #0
40 %res = select i1 %cond, i64 %a, i64 %b
44 ; Check the high end of the aligned KEB range.
45 define i64 @f3(i64 %a, i64 %b, float %f1, float *%base) #0 {
47 ; CHECK: keb %f0, 4092(%r4)
48 ; CHECK-SCALAR-NEXT: ber %r14
49 ; CHECK-SCALAR: lgr %r2, %r3
50 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
52 %ptr = getelementptr float, float *%base, i64 1023
53 %f2 = load float, float *%ptr
54 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
57 metadata !"fpexcept.strict") #0
58 %res = select i1 %cond, i64 %a, i64 %b
62 ; Check the next word up, which needs separate address logic.
63 ; Other sequences besides this one would be OK.
64 define i64 @f4(i64 %a, i64 %b, float %f1, float *%base) #0 {
66 ; CHECK: aghi %r4, 4096
67 ; CHECK: keb %f0, 0(%r4)
68 ; CHECK-SCALAR-NEXT: ber %r14
69 ; CHECK-SCALAR: lgr %r2, %r3
70 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
72 %ptr = getelementptr float, float *%base, i64 1024
73 %f2 = load float, float *%ptr
74 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
77 metadata !"fpexcept.strict") #0
78 %res = select i1 %cond, i64 %a, i64 %b
82 ; Check negative displacements, which also need separate address logic.
83 define i64 @f5(i64 %a, i64 %b, float %f1, float *%base) #0 {
86 ; CHECK: keb %f0, 0(%r4)
87 ; CHECK-SCALAR-NEXT: ber %r14
88 ; CHECK-SCALAR: lgr %r2, %r3
89 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
91 %ptr = getelementptr float, float *%base, i64 -1
92 %f2 = load float, float *%ptr
93 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
96 metadata !"fpexcept.strict") #0
97 %res = select i1 %cond, i64 %a, i64 %b
101 ; Check that KEB allows indices.
102 define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) #0 {
104 ; CHECK: sllg %r1, %r5, 2
105 ; CHECK: keb %f0, 400(%r1,%r4)
106 ; CHECK-SCALAR-NEXT: ber %r14
107 ; CHECK-SCALAR: lgr %r2, %r3
108 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
110 %ptr1 = getelementptr float, float *%base, i64 %index
111 %ptr2 = getelementptr float, float *%ptr1, i64 100
112 %f2 = load float, float *%ptr2
113 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
114 float %f1, float %f2,
116 metadata !"fpexcept.strict") #0
117 %res = select i1 %cond, i64 %a, i64 %b
121 ; Check that comparisons of spilled values can use KEB rather than KEBR.
122 define float @f7(float *%ptr0) #0 {
124 ; CHECK: brasl %r14, foo@PLT
125 ; CHECK-SCALAR: keb {{%f[0-9]+}}, 16{{[04]}}(%r15)
127 %ptr1 = getelementptr float, float *%ptr0, i64 2
128 %ptr2 = getelementptr float, float *%ptr0, i64 4
129 %ptr3 = getelementptr float, float *%ptr0, i64 6
130 %ptr4 = getelementptr float, float *%ptr0, i64 8
131 %ptr5 = getelementptr float, float *%ptr0, i64 10
132 %ptr6 = getelementptr float, float *%ptr0, i64 12
133 %ptr7 = getelementptr float, float *%ptr0, i64 14
134 %ptr8 = getelementptr float, float *%ptr0, i64 16
135 %ptr9 = getelementptr float, float *%ptr0, i64 18
136 %ptr10 = getelementptr float, float *%ptr0, i64 20
138 %val0 = load float, float *%ptr0
139 %val1 = load float, float *%ptr1
140 %val2 = load float, float *%ptr2
141 %val3 = load float, float *%ptr3
142 %val4 = load float, float *%ptr4
143 %val5 = load float, float *%ptr5
144 %val6 = load float, float *%ptr6
145 %val7 = load float, float *%ptr7
146 %val8 = load float, float *%ptr8
147 %val9 = load float, float *%ptr9
148 %val10 = load float, float *%ptr10
150 %ret = call float @foo() #0
152 %cmp0 = call i1 @llvm.experimental.constrained.fcmps.f32(
153 float %ret, float %val0,
155 metadata !"fpexcept.strict") #0
156 %cmp1 = call i1 @llvm.experimental.constrained.fcmps.f32(
157 float %ret, float %val1,
159 metadata !"fpexcept.strict") #0
160 %cmp2 = call i1 @llvm.experimental.constrained.fcmps.f32(
161 float %ret, float %val2,
163 metadata !"fpexcept.strict") #0
164 %cmp3 = call i1 @llvm.experimental.constrained.fcmps.f32(
165 float %ret, float %val3,
167 metadata !"fpexcept.strict") #0
168 %cmp4 = call i1 @llvm.experimental.constrained.fcmps.f32(
169 float %ret, float %val4,
171 metadata !"fpexcept.strict") #0
172 %cmp5 = call i1 @llvm.experimental.constrained.fcmps.f32(
173 float %ret, float %val5,
175 metadata !"fpexcept.strict") #0
176 %cmp6 = call i1 @llvm.experimental.constrained.fcmps.f32(
177 float %ret, float %val6,
179 metadata !"fpexcept.strict") #0
180 %cmp7 = call i1 @llvm.experimental.constrained.fcmps.f32(
181 float %ret, float %val7,
183 metadata !"fpexcept.strict") #0
184 %cmp8 = call i1 @llvm.experimental.constrained.fcmps.f32(
185 float %ret, float %val8,
187 metadata !"fpexcept.strict") #0
188 %cmp9 = call i1 @llvm.experimental.constrained.fcmps.f32(
189 float %ret, float %val9,
191 metadata !"fpexcept.strict") #0
192 %cmp10 = call i1 @llvm.experimental.constrained.fcmps.f32(
193 float %ret, float %val10,
195 metadata !"fpexcept.strict") #0
197 %sel0 = select i1 %cmp0, float %ret, float 0.0
198 %sel1 = select i1 %cmp1, float %sel0, float 1.0
199 %sel2 = select i1 %cmp2, float %sel1, float 2.0
200 %sel3 = select i1 %cmp3, float %sel2, float 3.0
201 %sel4 = select i1 %cmp4, float %sel3, float 4.0
202 %sel5 = select i1 %cmp5, float %sel4, float 5.0
203 %sel6 = select i1 %cmp6, float %sel5, float 6.0
204 %sel7 = select i1 %cmp7, float %sel6, float 7.0
205 %sel8 = select i1 %cmp8, float %sel7, float 8.0
206 %sel9 = select i1 %cmp9, float %sel8, float 9.0
207 %sel10 = select i1 %cmp10, float %sel9, float 10.0
212 ; Check comparison with zero - cannot use LOAD AND TEST.
213 define i64 @f8(i64 %a, i64 %b, float %f) #0 {
215 ; CHECK: lzer [[REG:%f[0-9]+]]
216 ; CHECK-NEXT: kebr %f0, [[REG]]
217 ; CHECK-SCALAR-NEXT: ber %r14
218 ; CHECK-SCALAR: lgr %r2, %r3
219 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
221 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
224 metadata !"fpexcept.strict") #0
225 %res = select i1 %cond, i64 %a, i64 %b
229 ; Check the comparison can be reversed if that allows KEB to be used,
231 define i64 @f9(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
233 ; CHECK: keb %f0, 0(%r4)
234 ; CHECK-SCALAR-NEXT: ber %r14
235 ; CHECK-SCALAR: lgr %r2, %r3
236 ; CHECK-VECTOR-NEXT: locgrne %r2, %r3
238 %f1 = load float, float *%ptr
239 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
240 float %f1, float %f2,
242 metadata !"fpexcept.strict") #0
243 %res = select i1 %cond, i64 %a, i64 %b
248 define i64 @f10(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
250 ; CHECK: keb %f0, 0(%r4)
251 ; CHECK-SCALAR-NEXT: blhr %r14
252 ; CHECK-SCALAR: lgr %r2, %r3
253 ; CHECK-VECTOR-NEXT: locgrnlh %r2, %r3
255 %f1 = load float, float *%ptr
256 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
257 float %f1, float %f2,
259 metadata !"fpexcept.strict") #0
260 %res = select i1 %cond, i64 %a, i64 %b
265 define i64 @f11(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
267 ; CHECK: keb %f0, 0(%r4)
268 ; CHECK-SCALAR-NEXT: bhr %r14
269 ; CHECK-SCALAR: lgr %r2, %r3
270 ; CHECK-VECTOR-NEXT: locgrnh %r2, %r3
272 %f1 = load float, float *%ptr
273 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
274 float %f1, float %f2,
276 metadata !"fpexcept.strict") #0
277 %res = select i1 %cond, i64 %a, i64 %b
282 define i64 @f12(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
284 ; CHECK: keb %f0, 0(%r4)
285 ; CHECK-SCALAR-NEXT: bher %r14
286 ; CHECK-SCALAR: lgr %r2, %r3
287 ; CHECK-VECTOR-NEXT: locgrnhe %r2, %r3
289 %f1 = load float, float *%ptr
290 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
291 float %f1, float %f2,
293 metadata !"fpexcept.strict") #0
294 %res = select i1 %cond, i64 %a, i64 %b
299 define i64 @f13(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
301 ; CHECK: keb %f0, 0(%r4)
302 ; CHECK-SCALAR-NEXT: bler %r14
303 ; CHECK-SCALAR: lgr %r2, %r3
304 ; CHECK-VECTOR-NEXT: locgrnle %r2, %r3
306 %f1 = load float, float *%ptr
307 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
308 float %f1, float %f2,
310 metadata !"fpexcept.strict") #0
311 %res = select i1 %cond, i64 %a, i64 %b
316 define i64 @f14(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
318 ; CHECK: keb %f0, 0(%r4)
319 ; CHECK-SCALAR-NEXT: blr %r14
320 ; CHECK-SCALAR: lgr %r2, %r3
321 ; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
323 %f1 = load float, float *%ptr
324 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
325 float %f1, float %f2,
327 metadata !"fpexcept.strict") #0
328 %res = select i1 %cond, i64 %a, i64 %b
333 define i64 @f15(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
335 ; CHECK: keb %f0, 0(%r4)
336 ; CHECK-SCALAR-NEXT: bnlhr %r14
337 ; CHECK-SCALAR: lgr %r2, %r3
338 ; CHECK-VECTOR-NEXT: locgrlh %r2, %r3
340 %f1 = load float, float *%ptr
341 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
342 float %f1, float %f2,
344 metadata !"fpexcept.strict") #0
345 %res = select i1 %cond, i64 %a, i64 %b
350 define i64 @f16(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
352 ; CHECK: keb %f0, 0(%r4)
353 ; CHECK-SCALAR-NEXT: bner %r14
354 ; CHECK-SCALAR: lgr %r2, %r3
355 ; CHECK-VECTOR-NEXT: locgre %r2, %r3
357 %f1 = load float, float *%ptr
358 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
359 float %f1, float %f2,
361 metadata !"fpexcept.strict") #0
362 %res = select i1 %cond, i64 %a, i64 %b
367 define i64 @f17(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
369 ; CHECK: keb %f0, 0(%r4)
370 ; CHECK-SCALAR-NEXT: bnler %r14
371 ; CHECK-SCALAR: lgr %r2, %r3
372 ; CHECK-VECTOR-NEXT: locgrle %r2, %r3
374 %f1 = load float, float *%ptr
375 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
376 float %f1, float %f2,
378 metadata !"fpexcept.strict") #0
379 %res = select i1 %cond, i64 %a, i64 %b
384 define i64 @f18(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
386 ; CHECK: keb %f0, 0(%r4)
387 ; CHECK-SCALAR-NEXT: bnlr %r14
388 ; CHECK-SCALAR: lgr %r2, %r3
389 ; CHECK-VECTOR-NEXT: locgrl %r2, %r3
391 %f1 = load float, float *%ptr
392 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
393 float %f1, float %f2,
395 metadata !"fpexcept.strict") #0
396 %res = select i1 %cond, i64 %a, i64 %b
401 define i64 @f19(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
403 ; CHECK: keb %f0, 0(%r4)
404 ; CHECK-SCALAR-NEXT: bnhr %r14
405 ; CHECK-SCALAR: lgr %r2, %r3
406 ; CHECK-VECTOR-NEXT: locgrh %r2, %r3
408 %f1 = load float, float *%ptr
409 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
410 float %f1, float %f2,
412 metadata !"fpexcept.strict") #0
413 %res = select i1 %cond, i64 %a, i64 %b
418 define i64 @f20(i64 %a, i64 %b, float %f2, float *%ptr) #0 {
420 ; CHECK: keb %f0, 0(%r4)
421 ; CHECK-SCALAR-NEXT: bnher %r14
422 ; CHECK-SCALAR: lgr %r2, %r3
423 ; CHECK-VECTOR-NEXT: locgrhe %r2, %r3
425 %f1 = load float, float *%ptr
426 %cond = call i1 @llvm.experimental.constrained.fcmps.f32(
427 float %f1, float %f2,
429 metadata !"fpexcept.strict") #0
430 %res = select i1 %cond, i64 %a, i64 %b
434 attributes #0 = { strictfp }
436 declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)