1 ; Test 32-bit floating-point strict addition.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
4 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
5 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
8 declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
10 ; Check register addition.
11 define float @f1(float %f1, float %f2) #0 {
13 ; CHECK: aebr %f0, %f2
15 %res = call float @llvm.experimental.constrained.fadd.f32(
17 metadata !"round.dynamic",
18 metadata !"fpexcept.strict") #0
22 ; Check the low end of the AEB range.
23 define float @f2(float %f1, float *%ptr) #0 {
25 ; CHECK: aeb %f0, 0(%r2)
27 %f2 = load float, float *%ptr
28 %res = call float @llvm.experimental.constrained.fadd.f32(
30 metadata !"round.dynamic",
31 metadata !"fpexcept.strict") #0
35 ; Check the high end of the aligned AEB range.
36 define float @f3(float %f1, float *%base) #0 {
38 ; CHECK: aeb %f0, 4092(%r2)
40 %ptr = getelementptr float, float *%base, i64 1023
41 %f2 = load float, float *%ptr
42 %res = call float @llvm.experimental.constrained.fadd.f32(
44 metadata !"round.dynamic",
45 metadata !"fpexcept.strict") #0
49 ; Check the next word up, which needs separate address logic.
50 ; Other sequences besides this one would be OK.
51 define float @f4(float %f1, float *%base) #0 {
53 ; CHECK: aghi %r2, 4096
54 ; CHECK: aeb %f0, 0(%r2)
56 %ptr = getelementptr float, float *%base, i64 1024
57 %f2 = load float, float *%ptr
58 %res = call float @llvm.experimental.constrained.fadd.f32(
60 metadata !"round.dynamic",
61 metadata !"fpexcept.strict") #0
65 ; Check negative displacements, which also need separate address logic.
66 define float @f5(float %f1, float *%base) #0 {
69 ; CHECK: aeb %f0, 0(%r2)
71 %ptr = getelementptr float, float *%base, i64 -1
72 %f2 = load float, float *%ptr
73 %res = call float @llvm.experimental.constrained.fadd.f32(
75 metadata !"round.dynamic",
76 metadata !"fpexcept.strict") #0
80 ; Check that AEB allows indices.
81 define float @f6(float %f1, float *%base, i64 %index) #0 {
83 ; CHECK: sllg %r1, %r3, 2
84 ; CHECK: aeb %f0, 400(%r1,%r2)
86 %ptr1 = getelementptr float, float *%base, i64 %index
87 %ptr2 = getelementptr float, float *%ptr1, i64 100
88 %f2 = load float, float *%ptr2
89 %res = call float @llvm.experimental.constrained.fadd.f32(
91 metadata !"round.dynamic",
92 metadata !"fpexcept.strict") #0
96 ; Check that additions of spilled values can use AEB rather than AEBR.
97 define float @f7(float *%ptr0) #0 {
99 ; CHECK: brasl %r14, foo@PLT
100 ; CHECK-SCALAR: aeb %f0, 16{{[04]}}(%r15)
102 %ptr1 = getelementptr float, float *%ptr0, i64 2
103 %ptr2 = getelementptr float, float *%ptr0, i64 4
104 %ptr3 = getelementptr float, float *%ptr0, i64 6
105 %ptr4 = getelementptr float, float *%ptr0, i64 8
106 %ptr5 = getelementptr float, float *%ptr0, i64 10
107 %ptr6 = getelementptr float, float *%ptr0, i64 12
108 %ptr7 = getelementptr float, float *%ptr0, i64 14
109 %ptr8 = getelementptr float, float *%ptr0, i64 16
110 %ptr9 = getelementptr float, float *%ptr0, i64 18
111 %ptr10 = getelementptr float, float *%ptr0, i64 20
113 %val0 = load float, float *%ptr0
114 %val1 = load float, float *%ptr1
115 %val2 = load float, float *%ptr2
116 %val3 = load float, float *%ptr3
117 %val4 = load float, float *%ptr4
118 %val5 = load float, float *%ptr5
119 %val6 = load float, float *%ptr6
120 %val7 = load float, float *%ptr7
121 %val8 = load float, float *%ptr8
122 %val9 = load float, float *%ptr9
123 %val10 = load float, float *%ptr10
125 %ret = call float @foo() #0
127 %add0 = call float @llvm.experimental.constrained.fadd.f32(
128 float %ret, float %val0,
129 metadata !"round.dynamic",
130 metadata !"fpexcept.strict") #0
131 %add1 = call float @llvm.experimental.constrained.fadd.f32(
132 float %add0, float %val1,
133 metadata !"round.dynamic",
134 metadata !"fpexcept.strict") #0
135 %add2 = call float @llvm.experimental.constrained.fadd.f32(
136 float %add1, float %val2,
137 metadata !"round.dynamic",
138 metadata !"fpexcept.strict") #0
139 %add3 = call float @llvm.experimental.constrained.fadd.f32(
140 float %add2, float %val3,
141 metadata !"round.dynamic",
142 metadata !"fpexcept.strict") #0
143 %add4 = call float @llvm.experimental.constrained.fadd.f32(
144 float %add3, float %val4,
145 metadata !"round.dynamic",
146 metadata !"fpexcept.strict") #0
147 %add5 = call float @llvm.experimental.constrained.fadd.f32(
148 float %add4, float %val5,
149 metadata !"round.dynamic",
150 metadata !"fpexcept.strict") #0
151 %add6 = call float @llvm.experimental.constrained.fadd.f32(
152 float %add5, float %val6,
153 metadata !"round.dynamic",
154 metadata !"fpexcept.strict") #0
155 %add7 = call float @llvm.experimental.constrained.fadd.f32(
156 float %add6, float %val7,
157 metadata !"round.dynamic",
158 metadata !"fpexcept.strict") #0
159 %add8 = call float @llvm.experimental.constrained.fadd.f32(
160 float %add7, float %val8,
161 metadata !"round.dynamic",
162 metadata !"fpexcept.strict") #0
163 %add9 = call float @llvm.experimental.constrained.fadd.f32(
164 float %add8, float %val9,
165 metadata !"round.dynamic",
166 metadata !"fpexcept.strict") #0
167 %add10 = call float @llvm.experimental.constrained.fadd.f32(
168 float %add9, float %val10,
169 metadata !"round.dynamic",
170 metadata !"fpexcept.strict") #0
175 attributes #0 = { strictfp }