1 ; Test that floating-point strict compares are omitted if CC already has the
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
5 ; RUN: -enable-misched=0 -no-integrated-as | FileCheck %s
7 ; We need -enable-misched=0 to make sure f12 and following routines really
8 ; test the compare elimination pass.
11 declare float @llvm.fabs.f32(float %f)
13 ; Test addition followed by EQ, which can use the CC result of the addition.
14 define float @f1(float %a, float %b, float *%dest) #0 {
16 ; CHECK: aebr %f0, %f2
17 ; CHECK-NEXT: ber %r14
20 %res = call float @llvm.experimental.constrained.fadd.f32(
22 metadata !"round.dynamic",
23 metadata !"fpexcept.strict") #0
24 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
25 float %res, float 0.0,
27 metadata !"fpexcept.strict") #0
28 br i1 %cmp, label %exit, label %store
31 store float %b, float *%dest
38 ; ...and again with LT.
39 define float @f2(float %a, float %b, float *%dest) #0 {
41 ; CHECK: aebr %f0, %f2
42 ; CHECK-NEXT: blr %r14
45 %res = call float @llvm.experimental.constrained.fadd.f32(
47 metadata !"round.dynamic",
48 metadata !"fpexcept.strict") #0
49 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
50 float %res, float 0.0,
52 metadata !"fpexcept.strict") #0
53 br i1 %cmp, label %exit, label %store
56 store float %b, float *%dest
63 ; ...and again with GT.
64 define float @f3(float %a, float %b, float *%dest) #0 {
66 ; CHECK: aebr %f0, %f2
67 ; CHECK-NEXT: bhr %r14
70 %res = call float @llvm.experimental.constrained.fadd.f32(
72 metadata !"round.dynamic",
73 metadata !"fpexcept.strict") #0
74 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
75 float %res, float 0.0,
77 metadata !"fpexcept.strict") #0
78 br i1 %cmp, label %exit, label %store
81 store float %b, float *%dest
88 ; ...and again with UEQ.
89 define float @f4(float %a, float %b, float *%dest) #0 {
91 ; CHECK: aebr %f0, %f2
92 ; CHECK-NEXT: bnlhr %r14
95 %res = call float @llvm.experimental.constrained.fadd.f32(
97 metadata !"round.dynamic",
98 metadata !"fpexcept.strict") #0
99 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
100 float %res, float 0.0,
102 metadata !"fpexcept.strict") #0
103 br i1 %cmp, label %exit, label %store
106 store float %b, float *%dest
113 ; Subtraction also provides a zero-based CC value.
114 define float @f5(float %a, float %b, float *%dest) #0 {
116 ; CHECK: seb %f0, 0(%r2)
117 ; CHECK-NEXT: bnher %r14
120 %cur = load float, float *%dest
121 %res = call float @llvm.experimental.constrained.fsub.f32(
122 float %a, float %cur,
123 metadata !"round.dynamic",
124 metadata !"fpexcept.strict") #0
125 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
126 float %res, float 0.0,
128 metadata !"fpexcept.strict") #0
129 br i1 %cmp, label %exit, label %store
132 store float %b, float *%dest
139 ; Test the result of LOAD POSITIVE. We cannot omit the LTEBR.
140 define float @f6(float %dummy, float %a, float *%dest) #0 {
142 ; CHECK: lpdfr %f0, %f2
143 ; CHECK-NEXT: ltebr %f0, %f0
144 ; CHECK-NEXT: bhr %r14
147 %res = call float @llvm.fabs.f32(float %a) #0
148 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
149 float %res, float 0.0,
151 metadata !"fpexcept.strict") #0
152 br i1 %cmp, label %exit, label %store
155 store float %res, float *%dest
162 ; Test the result of LOAD NEGATIVE. We cannot omit the LTEBR.
163 define float @f7(float %dummy, float %a, float *%dest) #0 {
165 ; CHECK: lndfr %f0, %f2
166 ; CHECK-NEXT: ltebr %f0, %f0
167 ; CHECK-NEXT: blr %r14
170 %abs = call float @llvm.fabs.f32(float %a) #0
171 %res = fneg float %abs
172 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
173 float %res, float 0.0,
175 metadata !"fpexcept.strict") #0
176 br i1 %cmp, label %exit, label %store
179 store float %res, float *%dest
186 ; Test the result of LOAD COMPLEMENT. We cannot omit the LTEBR.
187 define float @f8(float %dummy, float %a, float *%dest) #0 {
189 ; CHECK: lcdfr %f0, %f2
190 ; CHECK-NEXT: ltebr %f0, %f0
191 ; CHECK-NEXT: bler %r14
195 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
196 float %res, float 0.0,
198 metadata !"fpexcept.strict") #0
199 br i1 %cmp, label %exit, label %store
202 store float %res, float *%dest
209 ; Multiplication (for example) does not modify CC.
210 define float @f9(float %a, float %b, float *%dest) #0 {
212 ; CHECK: meebr %f0, %f2
213 ; CHECK-NEXT: ltebr %f0, %f0
214 ; CHECK-NEXT: blhr %r14
217 %res = call float @llvm.experimental.constrained.fmul.f32(
219 metadata !"round.dynamic",
220 metadata !"fpexcept.strict") #0
221 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
222 float %res, float 0.0,
224 metadata !"fpexcept.strict") #0
225 br i1 %cmp, label %exit, label %store
228 store float %b, float *%dest
235 ; Test a combination involving a CC-setting instruction followed by
236 ; a non-CC-setting instruction.
237 define float @f10(float %a, float %b, float %c, float *%dest) #0 {
239 ; CHECK: aebr %f0, %f2
240 ; CHECK-NEXT: debr %f0, %f4
241 ; CHECK-NEXT: ltebr %f0, %f0
242 ; CHECK-NEXT: bner %r14
245 %add = call float @llvm.experimental.constrained.fadd.f32(
247 metadata !"round.dynamic",
248 metadata !"fpexcept.strict") #0
249 %res = call float @llvm.experimental.constrained.fdiv.f32(
250 float %add, float %c,
251 metadata !"round.dynamic",
252 metadata !"fpexcept.strict") #0
253 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
254 float %res, float 0.0,
256 metadata !"fpexcept.strict") #0
257 br i1 %cmp, label %exit, label %store
260 store float %b, float *%dest
267 ; Test a case where CC is set based on a different register from the
269 define float @f11(float %a, float %b, float %c, float *%dest1, float *%dest2) #0 {
271 ; CHECK: aebr %f0, %f2
272 ; CHECK-NEXT: sebr %f4, %f0
273 ; CHECK-DAG: ste %f4, 0(%r2)
274 ; CHECK-DAG: ltebr %f0, %f0
275 ; CHECK-NEXT: ber %r14
278 %add = call float @llvm.experimental.constrained.fadd.f32(
280 metadata !"round.dynamic",
281 metadata !"fpexcept.strict") #0
282 %sub = call float @llvm.experimental.constrained.fsub.f32(
283 float %c, float %add,
284 metadata !"round.dynamic",
285 metadata !"fpexcept.strict") #0
286 store float %sub, float *%dest1
287 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
288 float %add, float 0.0,
290 metadata !"fpexcept.strict") #0
291 br i1 %cmp, label %exit, label %store
294 store float %sub, float *%dest2
301 ; Test that LER gets converted to LTEBR where useful.
302 define float @f12(float %dummy, float %val) #0 {
304 ; CHECK: ltebr %f0, %f2
306 ; CHECK-NEXT: blah %f0
307 ; CHECK-NEXT: #NO_APP
308 ; CHECK-NEXT: blr %r14
311 %ret = call float asm "blah $1", "=f,{f0}"(float %val) #0
312 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
313 float %val, float 0.0,
315 metadata !"fpexcept.strict") #0
316 br i1 %cmp, label %exit, label %store
319 call void asm sideeffect "blah", ""() #0
326 ; Test that LDR gets converted to LTDBR where useful.
327 define double @f13(double %dummy, double %val) #0 {
329 ; CHECK: ltdbr %f0, %f2
331 ; CHECK-NEXT: blah %f0
332 ; CHECK-NEXT: #NO_APP
333 ; CHECK-NEXT: blr %r14
336 %ret = call double asm "blah $1", "=f,{f0}"(double %val) #0
337 %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(
338 double %val, double 0.0,
340 metadata !"fpexcept.strict") #0
341 br i1 %cmp, label %exit, label %store
344 call void asm sideeffect "blah", ""() #0
351 ; Test that LXR gets converted to LTXBR where useful.
352 define void @f14(fp128 *%ptr1, fp128 *%ptr2) #0 {
361 ; CHECK-NEXT: blr %r14
364 %val1 = load fp128, fp128 *%ptr1
365 %val2 = load fp128, fp128 *%ptr2
366 %div = fdiv fp128 %val1, %val2
367 store fp128 %div, fp128 *%ptr1
368 %mul = fmul fp128 %val1, %val2
369 store fp128 %mul, fp128 *%ptr2
370 %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(
371 fp128 %val1, fp128 0xL00000000000000000000000000000000,
373 metadata !"fpexcept.strict") #0
374 br i1 %cmp, label %exit, label %store
377 call void asm sideeffect "blah", ""() #0
384 ; Test a case where it is the source rather than destination of LER that
386 define float @f15(float %val, float %dummy) #0 {
388 ; CHECK: ltebr %f2, %f0
390 ; CHECK-NEXT: blah %f2
391 ; CHECK-NEXT: #NO_APP
392 ; CHECK-NEXT: blr %r14
395 %ret = call float asm "blah $1", "=f,{f2}"(float %val) #0
396 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
397 float %val, float 0.0,
399 metadata !"fpexcept.strict") #0
400 br i1 %cmp, label %exit, label %store
403 call void asm sideeffect "blah", ""() #0
410 ; Test a case where it is the source rather than destination of LDR that
412 define double @f16(double %val, double %dummy) #0 {
414 ; CHECK: ltdbr %f2, %f0
416 ; CHECK-NEXT: blah %f2
417 ; CHECK-NEXT: #NO_APP
418 ; CHECK-NEXT: blr %r14
421 %ret = call double asm "blah $1", "=f,{f2}"(double %val) #0
422 %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(
423 double %val, double 0.0,
425 metadata !"fpexcept.strict") #0
426 br i1 %cmp, label %exit, label %store
429 call void asm sideeffect "blah", ""() #0
436 ; Repeat f2 with a comparison against -0.
437 define float @f17(float %a, float %b, float *%dest) #0 {
439 ; CHECK: aebr %f0, %f2
440 ; CHECK-NEXT: blr %r14
443 %res = call float @llvm.experimental.constrained.fadd.f32(
445 metadata !"round.dynamic",
446 metadata !"fpexcept.strict") #0
447 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
448 float %res, float -0.0,
450 metadata !"fpexcept.strict") #0
451 br i1 %cmp, label %exit, label %store
454 store float %b, float *%dest
461 ; Verify that we cannot omit the compare if there may be an intervening
462 ; change to the exception flags.
463 define float @f18(float %a, float %b, float *%dest) #0 {
465 ; CHECK: aebr %f0, %f2
466 ; CHECK: ltebr %f0, %f0
467 ; CHECK-NEXT: ber %r14
470 %res = call float @llvm.experimental.constrained.fadd.f32(
472 metadata !"round.dynamic",
473 metadata !"fpexcept.strict") #0
474 call void asm sideeffect "blah", ""() #0
475 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
476 float %res, float 0.0,
478 metadata !"fpexcept.strict") #0
479 br i1 %cmp, label %exit, label %store
482 store float %b, float *%dest
489 ; Verify that we cannot convert LER to LTEBR and omit the compare if
490 ; there may be an intervening change to the exception flags.
491 define float @f19(float %dummy, float %val) #0 {
493 ; CHECK: ler %f0, %f2
495 ; CHECK-NEXT: blah %f0
496 ; CHECK-NEXT: #NO_APP
497 ; CHECK-NEXT: ltebr %f2, %f2
498 ; CHECK-NEXT: blr %r14
501 %ret = call float asm sideeffect "blah $1", "=f,{f0}"(float %val) #0
502 %cmp = call i1 @llvm.experimental.constrained.fcmp.f32(
503 float %val, float 0.0,
505 metadata !"fpexcept.strict") #0
506 br i1 %cmp, label %exit, label %store
509 call void asm sideeffect "blah", ""() #0
516 attributes #0 = { strictfp }
518 declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
519 declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
520 declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
521 declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
522 declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
523 declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
524 declare i1 @llvm.experimental.constrained.fcmp.f128(fp128, fp128, metadata, metadata)