1 ; Testg 64-bit signed division and remainder.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
7 ; Testg register division. The result is in the second of the two registers.
8 define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
10 ; CHECK-NOT: {{%r[234]}}
11 ; CHECK: dsgr %r2, %r4
12 ; CHECK: stg %r3, 0(%r5)
14 %div = sdiv i64 %a, %b
15 store i64 %div, i64 *%dest
19 ; Testg register remainder. The result is in the first of the two registers.
20 define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
22 ; CHECK-NOT: {{%r[234]}}
23 ; CHECK: dsgr %r2, %r4
24 ; CHECK: stg %r2, 0(%r5)
26 %rem = srem i64 %a, %b
27 store i64 %rem, i64 *%dest
31 ; Testg that division and remainder use a single instruction.
32 define i64 @f3(i64 %dummy1, i64 %a, i64 %b) {
34 ; CHECK-NOT: {{%r[234]}}
35 ; CHECK: dsgr %r2, %r4
39 %div = sdiv i64 %a, %b
40 %rem = srem i64 %a, %b
41 %or = or i64 %rem, %div
45 ; Testg memory division with no displacement.
46 define void @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
48 ; CHECK-NOT: {{%r[234]}}
49 ; CHECK: dsg %r2, 0(%r4)
50 ; CHECK: stg %r3, 0(%r5)
52 %b = load i64, i64 *%src
53 %div = sdiv i64 %a, %b
54 store i64 %div, i64 *%dest
58 ; Testg memory remainder with no displacement.
59 define void @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
61 ; CHECK-NOT: {{%r[234]}}
62 ; CHECK: dsg %r2, 0(%r4)
63 ; CHECK: stg %r2, 0(%r5)
65 %b = load i64, i64 *%src
66 %rem = srem i64 %a, %b
67 store i64 %rem, i64 *%dest
71 ; Testg both memory division and memory remainder.
72 define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
74 ; CHECK-NOT: {{%r[234]}}
75 ; CHECK: dsg %r2, 0(%r4)
76 ; CHECK-NOT: {{dsg|dsgr}}
79 %b = load i64, i64 *%src
80 %div = sdiv i64 %a, %b
81 %rem = srem i64 %a, %b
82 %or = or i64 %rem, %div
86 ; Check the high end of the DSG range.
87 define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
89 ; CHECK: dsg %r2, 524280(%r4)
91 %ptr = getelementptr i64, i64 *%src, i64 65535
92 %b = load i64, i64 *%ptr
93 %rem = srem i64 %a, %b
97 ; Check the next doubleword up, which needs separate address logic.
98 ; Other sequences besides this one would be OK.
99 define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
101 ; CHECK: agfi %r4, 524288
102 ; CHECK: dsg %r2, 0(%r4)
104 %ptr = getelementptr i64, i64 *%src, i64 65536
105 %b = load i64, i64 *%ptr
106 %rem = srem i64 %a, %b
110 ; Check the high end of the negative aligned DSG range.
111 define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
113 ; CHECK: dsg %r2, -8(%r4)
115 %ptr = getelementptr i64, i64 *%src, i64 -1
116 %b = load i64, i64 *%ptr
117 %rem = srem i64 %a, %b
121 ; Check the low end of the DSG range.
122 define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
124 ; CHECK: dsg %r2, -524288(%r4)
126 %ptr = getelementptr i64, i64 *%src, i64 -65536
127 %b = load i64, i64 *%ptr
128 %rem = srem i64 %a, %b
132 ; Check the next doubleword down, which needs separate address logic.
133 ; Other sequences besides this one would be OK.
134 define i64 @f11(i64 %dummy, i64 %a, i64 *%src) {
136 ; CHECK: agfi %r4, -524296
137 ; CHECK: dsg %r2, 0(%r4)
139 %ptr = getelementptr i64, i64 *%src, i64 -65537
140 %b = load i64, i64 *%ptr
141 %rem = srem i64 %a, %b
145 ; Check that DSG allows an index.
146 define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) {
148 ; CHECK: dsg %r2, 524287(%r5,%r4)
150 %add1 = add i64 %src, %index
151 %add2 = add i64 %add1, 524287
152 %ptr = inttoptr i64 %add2 to i64 *
153 %b = load i64, i64 *%ptr
154 %rem = srem i64 %a, %b
158 ; Check that divisions of spilled values can use DSG rather than DSGR.
159 define i64 @f13(i64 *%ptr0) {
161 ; CHECK: brasl %r14, foo@PLT
162 ; CHECK: dsg {{%r[0-9]+}}, 160(%r15)
164 %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
165 %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
166 %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
167 %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
168 %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
169 %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
170 %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
171 %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
172 %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
173 %ptr10 = getelementptr i64, i64 *%ptr0, i64 20
175 %val0 = load i64, i64 *%ptr0
176 %val1 = load i64, i64 *%ptr1
177 %val2 = load i64, i64 *%ptr2
178 %val3 = load i64, i64 *%ptr3
179 %val4 = load i64, i64 *%ptr4
180 %val5 = load i64, i64 *%ptr5
181 %val6 = load i64, i64 *%ptr6
182 %val7 = load i64, i64 *%ptr7
183 %val8 = load i64, i64 *%ptr8
184 %val9 = load i64, i64 *%ptr9
185 %val10 = load i64, i64 *%ptr10
187 %ret = call i64 @foo()
189 %div0 = sdiv i64 %ret, %val0
190 %div1 = sdiv i64 %div0, %val1
191 %div2 = sdiv i64 %div1, %val2
192 %div3 = sdiv i64 %div2, %val3
193 %div4 = sdiv i64 %div3, %val4
194 %div5 = sdiv i64 %div4, %val5
195 %div6 = sdiv i64 %div5, %val6
196 %div7 = sdiv i64 %div6, %val7
197 %div8 = sdiv i64 %div7, %val8
198 %div9 = sdiv i64 %div8, %val9
199 %div10 = sdiv i64 %div9, %val10