1 ; Test 64-bit square root.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
4 ; RUN: | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
5 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
7 declare double @llvm.sqrt.f64(double %f)
8 declare double @sqrt(double)
10 ; Check register square root.
11 define double @f1(double %val) {
13 ; CHECK: sqdbr %f0, %f0
15 %res = call double @llvm.sqrt.f64(double %val)
19 ; Check the low end of the SQDB range.
20 define double @f2(ptr %ptr) {
22 ; CHECK: sqdb %f0, 0(%r2)
24 %val = load double, ptr %ptr
25 %res = call double @llvm.sqrt.f64(double %val)
29 ; Check the high end of the aligned SQDB range.
30 define double @f3(ptr %base) {
32 ; CHECK: sqdb %f0, 4088(%r2)
34 %ptr = getelementptr double, ptr %base, i64 511
35 %val = load double, ptr %ptr
36 %res = call double @llvm.sqrt.f64(double %val)
40 ; Check the next doubleword up, which needs separate address logic.
41 ; Other sequences besides this one would be OK.
42 define double @f4(ptr %base) {
44 ; CHECK: aghi %r2, 4096
45 ; CHECK: sqdb %f0, 0(%r2)
47 %ptr = getelementptr double, ptr %base, i64 512
48 %val = load double, ptr %ptr
49 %res = call double @llvm.sqrt.f64(double %val)
53 ; Check negative displacements, which also need separate address logic.
54 define double @f5(ptr %base) {
57 ; CHECK: sqdb %f0, 0(%r2)
59 %ptr = getelementptr double, ptr %base, i64 -1
60 %val = load double, ptr %ptr
61 %res = call double @llvm.sqrt.f64(double %val)
65 ; Check that SQDB allows indices.
66 define double @f6(ptr %base, i64 %index) {
68 ; CHECK: sllg %r1, %r3, 3
69 ; CHECK: sqdb %f0, 800(%r1,%r2)
71 %ptr1 = getelementptr double, ptr %base, i64 %index
72 %ptr2 = getelementptr double, ptr %ptr1, i64 100
73 %val = load double, ptr %ptr2
74 %res = call double @llvm.sqrt.f64(double %val)
78 ; Test a case where we spill the source of at least one SQDBR. We want
79 ; to use SQDB if possible.
80 define void @f7(ptr %ptr) {
82 ; CHECK-SCALAR: sqdb {{%f[0-9]+}}, 160(%r15)
84 %val0 = load volatile double, ptr %ptr
85 %val1 = load volatile double, ptr %ptr
86 %val2 = load volatile double, ptr %ptr
87 %val3 = load volatile double, ptr %ptr
88 %val4 = load volatile double, ptr %ptr
89 %val5 = load volatile double, ptr %ptr
90 %val6 = load volatile double, ptr %ptr
91 %val7 = load volatile double, ptr %ptr
92 %val8 = load volatile double, ptr %ptr
93 %val9 = load volatile double, ptr %ptr
94 %val10 = load volatile double, ptr %ptr
95 %val11 = load volatile double, ptr %ptr
96 %val12 = load volatile double, ptr %ptr
97 %val13 = load volatile double, ptr %ptr
98 %val14 = load volatile double, ptr %ptr
99 %val15 = load volatile double, ptr %ptr
100 %val16 = load volatile double, ptr %ptr
102 %sqrt0 = call double @llvm.sqrt.f64(double %val0)
103 %sqrt1 = call double @llvm.sqrt.f64(double %val1)
104 %sqrt2 = call double @llvm.sqrt.f64(double %val2)
105 %sqrt3 = call double @llvm.sqrt.f64(double %val3)
106 %sqrt4 = call double @llvm.sqrt.f64(double %val4)
107 %sqrt5 = call double @llvm.sqrt.f64(double %val5)
108 %sqrt6 = call double @llvm.sqrt.f64(double %val6)
109 %sqrt7 = call double @llvm.sqrt.f64(double %val7)
110 %sqrt8 = call double @llvm.sqrt.f64(double %val8)
111 %sqrt9 = call double @llvm.sqrt.f64(double %val9)
112 %sqrt10 = call double @llvm.sqrt.f64(double %val10)
113 %sqrt11 = call double @llvm.sqrt.f64(double %val11)
114 %sqrt12 = call double @llvm.sqrt.f64(double %val12)
115 %sqrt13 = call double @llvm.sqrt.f64(double %val13)
116 %sqrt14 = call double @llvm.sqrt.f64(double %val14)
117 %sqrt15 = call double @llvm.sqrt.f64(double %val15)
118 %sqrt16 = call double @llvm.sqrt.f64(double %val16)
120 store volatile double %val0, ptr %ptr
121 store volatile double %val1, ptr %ptr
122 store volatile double %val2, ptr %ptr
123 store volatile double %val3, ptr %ptr
124 store volatile double %val4, ptr %ptr
125 store volatile double %val5, ptr %ptr
126 store volatile double %val6, ptr %ptr
127 store volatile double %val7, ptr %ptr
128 store volatile double %val8, ptr %ptr
129 store volatile double %val9, ptr %ptr
130 store volatile double %val10, ptr %ptr
131 store volatile double %val11, ptr %ptr
132 store volatile double %val12, ptr %ptr
133 store volatile double %val13, ptr %ptr
134 store volatile double %val14, ptr %ptr
135 store volatile double %val15, ptr %ptr
136 store volatile double %val16, ptr %ptr
138 store volatile double %sqrt0, ptr %ptr
139 store volatile double %sqrt1, ptr %ptr
140 store volatile double %sqrt2, ptr %ptr
141 store volatile double %sqrt3, ptr %ptr
142 store volatile double %sqrt4, ptr %ptr
143 store volatile double %sqrt5, ptr %ptr
144 store volatile double %sqrt6, ptr %ptr
145 store volatile double %sqrt7, ptr %ptr
146 store volatile double %sqrt8, ptr %ptr
147 store volatile double %sqrt9, ptr %ptr
148 store volatile double %sqrt10, ptr %ptr
149 store volatile double %sqrt11, ptr %ptr
150 store volatile double %sqrt12, ptr %ptr
151 store volatile double %sqrt13, ptr %ptr
152 store volatile double %sqrt14, ptr %ptr
153 store volatile double %sqrt15, ptr %ptr
154 store volatile double %sqrt16, ptr %ptr
159 ; Check that a call to the normal sqrt function is lowered.
160 define double @f8(double %dummy, double %val) {
162 ; CHECK: sqdbr %f0, %f2
163 ; CHECK: cdbr %f0, %f0
165 ; CHECK: ldr %f0, %f2
167 %res = tail call double @sqrt(double %val)