1 ; Test high-part i64->i128 multiplications.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
7 ; Check zero-extended multiplication in which only the high part is used.
8 define i64 @f1(i64 %dummy, i64 %a, i64 %b) {
10 ; CHECK-NOT: {{%r[234]}}
11 ; CHECK: mlgr %r2, %r4
13 %ax = zext i64 %a to i128
14 %bx = zext i64 %b to i128
15 %mulx = mul i128 %ax, %bx
16 %highx = lshr i128 %mulx, 64
17 %high = trunc i128 %highx to i64
21 ; Check sign-extended multiplication in which only the high part is used.
22 ; This needs a rather convoluted sequence.
23 define i64 @f2(i64 %dummy, i64 %a, i64 %b) {
25 ; CHECK-DAG: srag [[RES1:%r[0-5]]], %r3, 63
26 ; CHECK-DAG: srag [[RES2:%r[0-5]]], %r4, 63
27 ; CHECK-DAG: ngr [[RES1]], %r4
28 ; CHECK-DAG: ngr [[RES2]], %r3
29 ; CHECK-DAG: agr [[RES2]], [[RES1]]
30 ; CHECK-DAG: mlgr %r2, %r4
31 ; CHECK: sgr %r2, [[RES2]]
33 %ax = sext i64 %a to i128
34 %bx = sext i64 %b to i128
35 %mulx = mul i128 %ax, %bx
36 %highx = lshr i128 %mulx, 64
37 %high = trunc i128 %highx to i64
41 ; Check zero-extended multiplication in which only part of the high half
43 define i64 @f3(i64 %dummy, i64 %a, i64 %b) {
45 ; CHECK-NOT: {{%r[234]}}
46 ; CHECK: mlgr %r2, %r4
47 ; CHECK: srlg %r2, %r2, 3
49 %ax = zext i64 %a to i128
50 %bx = zext i64 %b to i128
51 %mulx = mul i128 %ax, %bx
52 %highx = lshr i128 %mulx, 67
53 %high = trunc i128 %highx to i64
57 ; Check zero-extended multiplication in which the result is split into
58 ; high and low halves.
59 define i64 @f4(i64 %dummy, i64 %a, i64 %b) {
61 ; CHECK-NOT: {{%r[234]}}
62 ; CHECK: mlgr %r2, %r4
65 %ax = zext i64 %a to i128
66 %bx = zext i64 %b to i128
67 %mulx = mul i128 %ax, %bx
68 %highx = lshr i128 %mulx, 64
69 %high = trunc i128 %highx to i64
70 %low = trunc i128 %mulx to i64
71 %or = or i64 %high, %low
75 ; Check division by a constant, which should use multiplication instead.
76 define i64 @f5(i64 %dummy, i64 %a) {
79 ; CHECK: srlg %r2, %r2,
81 %res = udiv i64 %a, 1234
85 ; Check MLG with no displacement.
86 define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
88 ; CHECK-NOT: {{%r[234]}}
89 ; CHECK: mlg %r2, 0(%r4)
91 %b = load i64, i64 *%src
92 %ax = zext i64 %a to i128
93 %bx = zext i64 %b to i128
94 %mulx = mul i128 %ax, %bx
95 %highx = lshr i128 %mulx, 64
96 %high = trunc i128 %highx to i64
100 ; Check the high end of the aligned MLG range.
101 define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
103 ; CHECK: mlg %r2, 524280(%r4)
105 %ptr = getelementptr i64, i64 *%src, i64 65535
106 %b = load i64, i64 *%ptr
107 %ax = zext i64 %a to i128
108 %bx = zext i64 %b to i128
109 %mulx = mul i128 %ax, %bx
110 %highx = lshr i128 %mulx, 64
111 %high = trunc i128 %highx to i64
115 ; Check the next doubleword up, which requires separate address logic.
116 ; Other sequences besides this one would be OK.
117 define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
119 ; CHECK: agfi %r4, 524288
120 ; CHECK: mlg %r2, 0(%r4)
122 %ptr = getelementptr i64, i64 *%src, i64 65536
123 %b = load i64, i64 *%ptr
124 %ax = zext i64 %a to i128
125 %bx = zext i64 %b to i128
126 %mulx = mul i128 %ax, %bx
127 %highx = lshr i128 %mulx, 64
128 %high = trunc i128 %highx to i64
132 ; Check the high end of the negative aligned MLG range.
133 define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
135 ; CHECK: mlg %r2, -8(%r4)
137 %ptr = getelementptr i64, i64 *%src, i64 -1
138 %b = load i64, i64 *%ptr
139 %ax = zext i64 %a to i128
140 %bx = zext i64 %b to i128
141 %mulx = mul i128 %ax, %bx
142 %highx = lshr i128 %mulx, 64
143 %high = trunc i128 %highx to i64
147 ; Check the low end of the MLG range.
148 define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
150 ; CHECK: mlg %r2, -524288(%r4)
152 %ptr = getelementptr i64, i64 *%src, i64 -65536
153 %b = load i64, i64 *%ptr
154 %ax = zext i64 %a to i128
155 %bx = zext i64 %b to i128
156 %mulx = mul i128 %ax, %bx
157 %highx = lshr i128 %mulx, 64
158 %high = trunc i128 %highx to i64
162 ; Check the next doubleword down, which needs separate address logic.
163 ; Other sequences besides this one would be OK.
164 define i64 @f11(i64 *%dest, i64 %a, i64 *%src) {
166 ; CHECK: agfi %r4, -524296
167 ; CHECK: mlg %r2, 0(%r4)
169 %ptr = getelementptr i64, i64 *%src, i64 -65537
170 %b = load i64, i64 *%ptr
171 %ax = zext i64 %a to i128
172 %bx = zext i64 %b to i128
173 %mulx = mul i128 %ax, %bx
174 %highx = lshr i128 %mulx, 64
175 %high = trunc i128 %highx to i64
179 ; Check that MLG allows an index.
180 define i64 @f12(i64 *%dest, i64 %a, i64 %src, i64 %index) {
182 ; CHECK: mlg %r2, 524287(%r5,%r4)
184 %add1 = add i64 %src, %index
185 %add2 = add i64 %add1, 524287
186 %ptr = inttoptr i64 %add2 to i64 *
187 %b = load i64, i64 *%ptr
188 %ax = zext i64 %a to i128
189 %bx = zext i64 %b to i128
190 %mulx = mul i128 %ax, %bx
191 %highx = lshr i128 %mulx, 64
192 %high = trunc i128 %highx to i64
196 ; Check that multiplications of spilled values can use MLG rather than MLGR.
197 define i64 @f13(i64 *%ptr0) {
199 ; CHECK: brasl %r14, foo@PLT
200 ; CHECK: mlg {{%r[0-9]+}}, 160(%r15)
202 %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
203 %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
204 %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
205 %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
206 %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
207 %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
208 %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
209 %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
210 %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
212 %val0 = load i64, i64 *%ptr0
213 %val1 = load i64, i64 *%ptr1
214 %val2 = load i64, i64 *%ptr2
215 %val3 = load i64, i64 *%ptr3
216 %val4 = load i64, i64 *%ptr4
217 %val5 = load i64, i64 *%ptr5
218 %val6 = load i64, i64 *%ptr6
219 %val7 = load i64, i64 *%ptr7
220 %val8 = load i64, i64 *%ptr8
221 %val9 = load i64, i64 *%ptr9
223 %ret = call i64 @foo()
225 %retx = zext i64 %ret to i128
226 %val0x = zext i64 %val0 to i128
227 %mul0d = mul i128 %retx, %val0x
228 %mul0x = lshr i128 %mul0d, 64
230 %val1x = zext i64 %val1 to i128
231 %mul1d = mul i128 %mul0x, %val1x
232 %mul1x = lshr i128 %mul1d, 64
234 %val2x = zext i64 %val2 to i128
235 %mul2d = mul i128 %mul1x, %val2x
236 %mul2x = lshr i128 %mul2d, 64
238 %val3x = zext i64 %val3 to i128
239 %mul3d = mul i128 %mul2x, %val3x
240 %mul3x = lshr i128 %mul3d, 64
242 %val4x = zext i64 %val4 to i128
243 %mul4d = mul i128 %mul3x, %val4x
244 %mul4x = lshr i128 %mul4d, 64
246 %val5x = zext i64 %val5 to i128
247 %mul5d = mul i128 %mul4x, %val5x
248 %mul5x = lshr i128 %mul5d, 64
250 %val6x = zext i64 %val6 to i128
251 %mul6d = mul i128 %mul5x, %val6x
252 %mul6x = lshr i128 %mul6d, 64
254 %val7x = zext i64 %val7 to i128
255 %mul7d = mul i128 %mul6x, %val7x
256 %mul7x = lshr i128 %mul7d, 64
258 %val8x = zext i64 %val8 to i128
259 %mul8d = mul i128 %mul7x, %val8x
260 %mul8x = lshr i128 %mul8d, 64
262 %val9x = zext i64 %val9 to i128
263 %mul9d = mul i128 %mul8x, %val9x
264 %mul9x = lshr i128 %mul9d, 64
266 %mul9 = trunc i128 %mul9x to i64