1 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
2 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 \
4 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
6 declare double @llvm.experimental.constrained.fma.f64(double %f1, double %f2, double %f3, metadata, metadata)
8 define double @f1(double %f1, double %f2, double %acc) {
10 ; CHECK-SCALAR: msdbr %f4, %f0, %f2
11 ; CHECK-SCALAR: ldr %f0, %f4
12 ; CHECK-VECTOR: wfmsdb %f0, %f0, %f2, %f4
14 %negacc = fsub double -0.0, %acc
15 %res = call double @llvm.experimental.constrained.fma.f64 (
16 double %f1, double %f2, double %negacc,
17 metadata !"round.dynamic",
18 metadata !"fpexcept.strict")
22 define double @f2(double %f1, double *%ptr, double %acc) {
24 ; CHECK: msdb %f2, %f0, 0(%r2)
27 %f2 = load double, double *%ptr
28 %negacc = fsub double -0.0, %acc
29 %res = call double @llvm.experimental.constrained.fma.f64 (
30 double %f1, double %f2, double %negacc,
31 metadata !"round.dynamic",
32 metadata !"fpexcept.strict")
36 define double @f3(double %f1, double *%base, double %acc) {
38 ; CHECK: msdb %f2, %f0, 4088(%r2)
41 %ptr = getelementptr double, double *%base, i64 511
42 %f2 = load double, double *%ptr
43 %negacc = fsub double -0.0, %acc
44 %res = call double @llvm.experimental.constrained.fma.f64 (
45 double %f1, double %f2, double %negacc,
46 metadata !"round.dynamic",
47 metadata !"fpexcept.strict")
51 define double @f4(double %f1, double *%base, double %acc) {
52 ; The important thing here is that we don't generate an out-of-range
53 ; displacement. Other sequences besides this one would be OK.
56 ; CHECK: aghi %r2, 4096
57 ; CHECK: msdb %f2, %f0, 0(%r2)
60 %ptr = getelementptr double, double *%base, i64 512
61 %f2 = load double, double *%ptr
62 %negacc = fsub double -0.0, %acc
63 %res = call double @llvm.experimental.constrained.fma.f64 (
64 double %f1, double %f2, double %negacc,
65 metadata !"round.dynamic",
66 metadata !"fpexcept.strict")
70 define double @f5(double %f1, double *%base, double %acc) {
71 ; Here too the important thing is that we don't generate an out-of-range
72 ; displacement. Other sequences besides this one would be OK.
76 ; CHECK: msdb %f2, %f0, 0(%r2)
79 %ptr = getelementptr double, double *%base, i64 -1
80 %f2 = load double, double *%ptr
81 %negacc = fsub double -0.0, %acc
82 %res = call double @llvm.experimental.constrained.fma.f64 (
83 double %f1, double %f2, double %negacc,
84 metadata !"round.dynamic",
85 metadata !"fpexcept.strict")
89 define double @f6(double %f1, double *%base, i64 %index, double %acc) {
91 ; CHECK: sllg %r1, %r3, 3
92 ; CHECK: msdb %f2, %f0, 0(%r1,%r2)
95 %ptr = getelementptr double, double *%base, i64 %index
96 %f2 = load double, double *%ptr
97 %negacc = fsub double -0.0, %acc
98 %res = call double @llvm.experimental.constrained.fma.f64 (
99 double %f1, double %f2, double %negacc,
100 metadata !"round.dynamic",
101 metadata !"fpexcept.strict")
105 define double @f7(double %f1, double *%base, i64 %index, double %acc) {
107 ; CHECK: sllg %r1, %r3, 3
108 ; CHECK: msdb %f2, %f0, 4088({{%r1,%r2|%r2,%r1}})
109 ; CHECK: ldr %f0, %f2
111 %index2 = add i64 %index, 511
112 %ptr = getelementptr double, double *%base, i64 %index2
113 %f2 = load double, double *%ptr
114 %negacc = fsub double -0.0, %acc
115 %res = call double @llvm.experimental.constrained.fma.f64 (
116 double %f1, double %f2, double %negacc,
117 metadata !"round.dynamic",
118 metadata !"fpexcept.strict")
122 define double @f8(double %f1, double *%base, i64 %index, double %acc) {
124 ; CHECK: sllg %r1, %r3, 3
125 ; CHECK: lay %r1, 4096({{%r1,%r2|%r2,%r1}})
126 ; CHECK: msdb %f2, %f0, 0(%r1)
127 ; CHECK: ldr %f0, %f2
129 %index2 = add i64 %index, 512
130 %ptr = getelementptr double, double *%base, i64 %index2
131 %f2 = load double, double *%ptr
132 %negacc = fsub double -0.0, %acc
133 %res = call double @llvm.experimental.constrained.fma.f64 (
134 double %f1, double %f2, double %negacc,
135 metadata !"round.dynamic",
136 metadata !"fpexcept.strict")