1 ; Verify that strict FP operations are not rescheduled
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
5 declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
6 declare float @llvm.sqrt.f32(float)
7 declare void @llvm.s390.sfpc(i32)
9 ; The basic assumption of all following tests is that on z13, we never
10 ; want to see two square root instructions directly in a row, so the
11 ; post-RA scheduler will always schedule something else in between
14 ; We can move any FP operation across a (normal) store.
16 define void @f1(float %f1, float %f2, float *%ptr1, float *%ptr2) {
24 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
25 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
27 store float %sqrt1, float *%ptr1
28 store float %sqrt2, float *%ptr2
33 define void @f2(float %f1, float %f2, float *%ptr1, float *%ptr2) {
41 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
43 metadata !"round.dynamic",
44 metadata !"fpexcept.ignore")
45 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
47 metadata !"round.dynamic",
48 metadata !"fpexcept.ignore")
50 store float %sqrt1, float *%ptr1
51 store float %sqrt2, float *%ptr2
56 define void @f3(float %f1, float %f2, float *%ptr1, float *%ptr2) {
64 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
66 metadata !"round.dynamic",
67 metadata !"fpexcept.strict")
68 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
70 metadata !"round.dynamic",
71 metadata !"fpexcept.strict")
73 store float %sqrt1, float *%ptr1
74 store float %sqrt2, float *%ptr2
80 ; We can move a non-strict FP operation or a fpexcept.ignore
81 ; operation even across a volatile store, but not a fpexcept.strict
84 define void @f4(float %f1, float %f2, float *%ptr1, float *%ptr2) {
92 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
93 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
95 store volatile float %sqrt1, float *%ptr1
96 store volatile float %sqrt2, float *%ptr2
101 define void @f5(float %f1, float %f2, float *%ptr1, float *%ptr2) {
109 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
111 metadata !"round.dynamic",
112 metadata !"fpexcept.ignore")
113 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
115 metadata !"round.dynamic",
116 metadata !"fpexcept.ignore")
118 store volatile float %sqrt1, float *%ptr1
119 store volatile float %sqrt2, float *%ptr2
124 define void @f6(float %f1, float %f2, float *%ptr1, float *%ptr2) {
132 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
134 metadata !"round.dynamic",
135 metadata !"fpexcept.strict")
136 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
138 metadata !"round.dynamic",
139 metadata !"fpexcept.strict")
141 store volatile float %sqrt1, float *%ptr1
142 store volatile float %sqrt2, float *%ptr2
148 ; No variant of FP operations can be scheduled across a SPFC.
150 define void @f7(float %f1, float %f2, float *%ptr1, float *%ptr2) {
158 %sqrt1 = call float @llvm.sqrt.f32(float %f1)
159 %sqrt2 = call float @llvm.sqrt.f32(float %f2)
161 call void @llvm.s390.sfpc(i32 0)
163 store float %sqrt1, float *%ptr1
164 store float %sqrt2, float *%ptr2
169 define void @f8(float %f1, float %f2, float *%ptr1, float *%ptr2) {
177 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
179 metadata !"round.dynamic",
180 metadata !"fpexcept.ignore")
181 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
183 metadata !"round.dynamic",
184 metadata !"fpexcept.ignore")
186 call void @llvm.s390.sfpc(i32 0)
188 store float %sqrt1, float *%ptr1
189 store float %sqrt2, float *%ptr2
194 define void @f9(float %f1, float %f2, float *%ptr1, float *%ptr2) {
202 %sqrt1 = call float @llvm.experimental.constrained.sqrt.f32(
204 metadata !"round.dynamic",
205 metadata !"fpexcept.strict")
206 %sqrt2 = call float @llvm.experimental.constrained.sqrt.f32(
208 metadata !"round.dynamic",
209 metadata !"fpexcept.strict")
211 call void @llvm.s390.sfpc(i32 0)
213 store float %sqrt1, float *%ptr1
214 store float %sqrt2, float *%ptr2