3 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 -disable-block-placement | FileCheck %s
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -disable-block-placement \
5 ; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-Z13
7 ; Test that strength reduction is applied to addresses with a scale factor,
8 ; but that indexed addressing can still be used.
9 define void @f1(i32 *%dest, i32 %a) {
12 ; CHECK: st %r3, 400({{%r[1-5],%r[1-5]}})
18 %index = phi i64 [ 0, %entry ], [ %next, %loop ]
19 %ptr = getelementptr i32, i32 *%dest, i64 %index
20 store i32 %a, i32 *%ptr
21 %next = add i64 %index, 1
22 %cmp = icmp ne i64 %next, 100
23 br i1 %cmp, label %loop, label %exit
29 ; Test a loop that should be converted into dbr form and then use BRCT.
30 define void @f2(i32 *%src, i32 *%dest) {
32 ; CHECK: lhi [[REG:%r[0-5]]], 100
33 ; CHECK: [[LABEL:\.[^:]*]]:{{.*}} %loop
34 ; CHECK: brct [[REG]], [[LABEL]]
40 %count = phi i32 [ 0, %entry ], [ %next, %loop.next ]
41 %next = add i32 %count, 1
42 %val = load volatile i32, i32 *%src
43 %cmp = icmp eq i32 %val, 0
44 br i1 %cmp, label %loop.next, label %loop.store
47 %add = add i32 %val, 1
48 store volatile i32 %add, i32 *%dest
52 %cont = icmp ne i32 %next, 100
53 br i1 %cont, label %loop, label %exit
59 ; Like f2, but for BRCTG.
60 define void @f3(i64 *%src, i64 *%dest) {
62 ; CHECK: lghi [[REG:%r[0-5]]], 100
63 ; CHECK: [[LABEL:\.[^:]*]]:{{.*}} %loop
64 ; CHECK: brctg [[REG]], [[LABEL]]
70 %count = phi i64 [ 0, %entry ], [ %next, %loop.next ]
71 %next = add i64 %count, 1
72 %val = load volatile i64, i64 *%src
73 %cmp = icmp eq i64 %val, 0
74 br i1 %cmp, label %loop.next, label %loop.store
77 %add = add i64 %val, 1
78 store volatile i64 %add, i64 *%dest
82 %cont = icmp ne i64 %next, 100
83 br i1 %cont, label %loop, label %exit
89 ; Test a loop with a 64-bit decremented counter in which the 32-bit
90 ; low part of the counter is used after the decrement. This is an example
91 ; of a subregister use being the only thing that blocks a conversion to BRCTG.
92 define void @f4(i32 *%src, i32 *%dest, i64 *%dest2, i64 %count) {
94 ; CHECK: aghi [[REG:%r[0-5]]], -1
95 ; CHECK: lr [[REG2:%r[0-5]]], [[REG]]
96 ; CHECK: stg [[REG2]],
103 %left = phi i64 [ %count, %entry ], [ %next, %loop.next ]
104 store volatile i64 %left, i64 *%dest2
105 %val = load volatile i32, i32 *%src
106 %cmp = icmp eq i32 %val, 0
107 br i1 %cmp, label %loop.next, label %loop.store
110 %add = add i32 %val, 1
111 store volatile i32 %add, i32 *%dest
115 %next = add i64 %left, -1
116 %ext = zext i32 %val to i64
117 %shl = shl i64 %ext, 32
118 %and = and i64 %next, 4294967295
119 %or = or i64 %shl, %and
120 store volatile i64 %or, i64 *%dest2
121 %cont = icmp ne i64 %next, 0
122 br i1 %cont, label %loop, label %exit
128 ; Test that negative offsets are avoided for loads of floating point.
129 %s.float = type { float, float, float }
130 define void @f5(%s.float* nocapture %a,
131 %s.float* nocapture readonly %b,
133 ; CHECK-Z13-LABEL: f5:
134 ; CHECK-Z13-NOT: -{{[0-9]+}}(%r
137 %cmp9 = icmp eq i32 %S, 0
138 br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
140 for.body.preheader: ; preds = %entry
143 for.cond.cleanup.loopexit: ; preds = %for.body
144 br label %for.cond.cleanup
146 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
149 for.body: ; preds = %for.body.preheader, %for.body
150 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
151 %a1 = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 0
152 %tmp = load float, float* %a1, align 4
153 %b4 = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 1
154 %tmp1 = load float, float* %b4, align 4
155 %add = fadd float %tmp, %tmp1
156 %c = getelementptr inbounds %s.float, %s.float* %b, i64 %indvars.iv, i32 2
157 %tmp2 = load float, float* %c, align 4
158 %add7 = fadd float %add, %tmp2
159 %a10 = getelementptr inbounds %s.float, %s.float* %a, i64 %indvars.iv, i32 0
160 store float %add7, float* %a10, align 4
161 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
162 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
163 %exitcond = icmp eq i32 %lftr.wideiv, %S
164 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
167 ; Test that negative offsets are avoided for loads of double.
168 %s.double = type { double, double, double }
169 define void @f6(%s.double* nocapture %a,
170 %s.double* nocapture readonly %b,
172 ; CHECK-Z13-LABEL: f6:
173 ; CHECK-Z13-NOT: -{{[0-9]+}}(%r
175 %cmp9 = icmp eq i32 %S, 0
176 br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
178 for.body.preheader: ; preds = %entry
181 for.cond.cleanup.loopexit: ; preds = %for.body
182 br label %for.cond.cleanup
184 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
187 for.body: ; preds = %for.body.preheader, %for.body
188 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
189 %a1 = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 0
190 %tmp = load double, double* %a1, align 4
191 %b4 = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 1
192 %tmp1 = load double, double* %b4, align 4
193 %add = fadd double %tmp, %tmp1
194 %c = getelementptr inbounds %s.double, %s.double* %b, i64 %indvars.iv, i32 2
195 %tmp2 = load double, double* %c, align 4
196 %add7 = fadd double %add, %tmp2
197 %a10 = getelementptr inbounds %s.double, %s.double* %a, i64 %indvars.iv, i32 0
198 store double %add7, double* %a10, align 4
199 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
200 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
201 %exitcond = icmp eq i32 %lftr.wideiv, %S
202 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
205 ; Test that negative offsets are avoided for memory accesses of vector type.
206 %s.vec = type { <4 x i32>, <4 x i32>, <4 x i32> }
207 define void @f7(%s.vec* nocapture %a,
208 %s.vec* nocapture readonly %b,
210 ; CHECK-Z13-LABEL: f7:
211 ; CHECK-Z13-NOT: -{{[0-9]+}}(%r
213 %cmp9 = icmp eq i32 %S, 0
214 br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader
216 for.body.preheader: ; preds = %entry
219 for.cond.cleanup.loopexit: ; preds = %for.body
220 br label %for.cond.cleanup
222 for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
225 for.body: ; preds = %for.body.preheader, %for.body
226 %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
227 %a1 = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 0
228 %tmp = load <4 x i32>, <4 x i32>* %a1, align 4
229 %b4 = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 1
230 %tmp1 = load <4 x i32>, <4 x i32>* %b4, align 4
231 %add = add <4 x i32> %tmp1, %tmp
232 %c = getelementptr inbounds %s.vec, %s.vec* %b, i64 %indvars.iv, i32 2
233 %tmp2 = load <4 x i32>, <4 x i32>* %c, align 4
234 %add7 = add <4 x i32> %add, %tmp2
235 %a10 = getelementptr inbounds %s.vec, %s.vec* %a, i64 %indvars.iv, i32 0
236 store <4 x i32> %add7, <4 x i32>* %a10, align 4
237 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
238 %lftr.wideiv = trunc i64 %indvars.iv.next to i32
239 %exitcond = icmp eq i32 %lftr.wideiv, %S
240 br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body
243 ; Test that a memcpy loop does not get a lot of lays before each mvc (D12 and no index-reg).
244 %0 = type { %1, %2* }
245 %1 = type { %2*, %2* }
246 %2 = type <{ %3, i32, [4 x i8] }>
247 %3 = type { i16*, i16*, i16* }
249 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #0
252 ; CHECK-Z13-LABEL: f8:
254 ; CHECK-Z13-NEXT: mvc
255 ; CHECK-Z13-NEXT: mvc
256 ; CHECK-Z13-NEXT: mvc
259 %tmp = load %0*, %0** undef, align 8
260 br i1 undef, label %bb2, label %bb1
265 bb2: ; preds = %bb1, %bb
266 %tmp3 = phi %0* [ %tmp, %bb ], [ undef, %bb1 ]
267 %tmp4 = phi %0* [ undef, %bb ], [ undef, %bb1 ]
270 bb5: ; preds = %bb5, %bb2
271 %tmp6 = phi %0* [ %tmp21, %bb5 ], [ %tmp3, %bb2 ]
272 %tmp7 = phi %0* [ %tmp20, %bb5 ], [ %tmp4, %bb2 ]
273 %tmp8 = getelementptr inbounds %0, %0* %tmp7, i64 -1
274 %tmp9 = getelementptr inbounds %0, %0* %tmp6, i64 -1
275 %tmp10 = bitcast %0* %tmp9 to i8*
276 %tmp11 = bitcast %0* %tmp8 to i8*
277 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp10, i8* align 8 %tmp11, i64 24, i1 false)
278 %tmp12 = getelementptr inbounds %0, %0* %tmp7, i64 -2
279 %tmp13 = getelementptr inbounds %0, %0* %tmp6, i64 -2
280 %tmp14 = bitcast %0* %tmp13 to i8*
281 %tmp15 = bitcast %0* %tmp12 to i8*
282 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp14, i8* align 8 %tmp15, i64 24, i1 false)
283 %tmp16 = getelementptr inbounds %0, %0* %tmp7, i64 -3
284 %tmp17 = getelementptr inbounds %0, %0* %tmp6, i64 -3
285 %tmp18 = bitcast %0* %tmp17 to i8*
286 %tmp19 = bitcast %0* %tmp16 to i8*
287 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp18, i8* align 8 %tmp19, i64 24, i1 false)
288 %tmp20 = getelementptr inbounds %0, %0* %tmp7, i64 -4
289 %tmp21 = getelementptr inbounds %0, %0* %tmp6, i64 -4
290 %tmp22 = bitcast %0* %tmp21 to i8*
291 %tmp23 = bitcast %0* %tmp20 to i8*
292 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %tmp22, i8* align 8 %tmp23, i64 24, i1 false)
296 ; Test that a chsi does not need an aghik inside the loop (no index reg)
298 ; CHECK-Z13-LABEL: f9:
299 ; CHECK-Z13: # =>This Inner Loop Header: Depth=1
300 ; CHECK-Z13-NOT: aghik
304 br label %for.body.i63
306 for.body.i63: ; preds = %for.inc.i, %entry
307 %indvars.iv155.i = phi i64 [ 0, %entry ], [ %indvars.iv.next156.i.3, %for.inc.i ]
308 %arrayidx.i62 = getelementptr inbounds i32, i32* undef, i64 %indvars.iv155.i
309 %tmp = load i32, i32* %arrayidx.i62, align 4
310 %cmp9.i = icmp eq i32 %tmp, 0
311 br i1 %cmp9.i, label %for.inc.i, label %if.then10.i
313 if.then10.i: ; preds = %for.body.i63
316 for.inc.i: ; preds = %for.body.i63
317 %indvars.iv.next156.i = or i64 %indvars.iv155.i, 1
318 %arrayidx.i62.1 = getelementptr inbounds i32, i32* undef, i64 %indvars.iv.next156.i
319 %tmp1 = load i32, i32* %arrayidx.i62.1, align 4
320 %indvars.iv.next156.i.3 = add nsw i64 %indvars.iv155.i, 4
321 br label %for.body.i63