1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes="function(loop(indvars,loop-idiom,loop-deletion),simplifycfg)" -S < %s | FileCheck %s
4 ; $ clang -m64 -fno-discard-value-names -O0 -S -emit-llvm -Xclang -disable-O0-optnone Code.c
5 ; $ bin/opt -S -basic-aa -mem2reg -loop-simplify -lcssa -loop-rotate \
6 ; -licm -simple-loop-unswitch -enable-nontrivial-unswitch -loop-simplify \
7 ; -loop-deletion -simplifycfg -indvars Code.ll > CodeOpt.ll
8 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
9 ; void PositiveFor64(int *ar, long long n, long long m)
12 ; for (i=0; i<n; ++i) {
13 ; int *arr = ar + i * m;
14 ; memset(arr, 0, m * sizeof(int));
17 define dso_local void @PositiveFor64(i32* %ar, i64 %n, i64 %m) {
18 ; CHECK-LABEL: @PositiveFor64(
20 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
21 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[N:%.*]]
22 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
23 ; CHECK: for.body.lr.ph:
24 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[M:%.*]], 4
25 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[M]], [[N]]
26 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
27 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR1]], i8 0, i64 [[TMP1]], i1 false)
28 ; CHECK-NEXT: br label [[FOR_END]]
30 ; CHECK-NEXT: ret void
33 %cmp1 = icmp slt i64 0, %n
34 br i1 %cmp1, label %for.body.lr.ph, label %for.end
36 for.body.lr.ph: ; preds = %entry
40 for.body: ; preds = %for.body.lr.ph, %for.body
41 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
42 %mul = mul nsw i64 %i.02, %m
43 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
44 %0 = bitcast i32* %add.ptr to i8*
45 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul1, i1 false)
46 %inc = add nsw i64 %i.02, 1
47 %cmp = icmp slt i64 %inc, %n
48 br i1 %cmp, label %for.body, label %for.end
50 for.end: ; preds = %for.body, %entry
53 ; void NegativeFor64(int *ar, long long n, long long m)
56 ; for (i=n-1; i>=0; --i) {
57 ; int *arr = ar + i * m;
58 ; memset(arr, 0, m * sizeof(int));
61 define dso_local void @NegativeFor64(i32* %ar, i64 %n, i64 %m) {
62 ; CHECK-LABEL: @NegativeFor64(
64 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
65 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[N:%.*]], 1
66 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[SUB]], 0
67 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
68 ; CHECK: for.body.lr.ph:
69 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[M:%.*]], 4
70 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[M]], [[N]]
71 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
72 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR1]], i8 0, i64 [[TMP1]], i1 false)
73 ; CHECK-NEXT: br label [[FOR_END]]
75 ; CHECK-NEXT: ret void
78 %sub = sub nsw i64 %n, 1
79 %cmp1 = icmp sge i64 %sub, 0
80 br i1 %cmp1, label %for.body.lr.ph, label %for.end
82 for.body.lr.ph: ; preds = %entry
86 for.body: ; preds = %for.body.lr.ph, %for.body
87 %i.02 = phi i64 [ %sub, %for.body.lr.ph ], [ %dec, %for.body ]
88 %mul = mul nsw i64 %i.02, %m
89 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
90 %0 = bitcast i32* %add.ptr to i8*
91 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul1, i1 false)
92 %dec = add nsw i64 %i.02, -1
93 %cmp = icmp sge i64 %dec, 0
94 br i1 %cmp, label %for.body, label %for.end
96 for.end: ; preds = %for.body, %entry
99 ; void NestedFor64(int *ar, long long n, long long m, long long o)
102 ; for (i=0; i<n; ++i) {
103 ; for (j=0; j<m; j++) {
104 ; int *arr = ar + i * m * o + j * o;
105 ; memset(arr, 0, o * sizeof(int));
109 define void @NestedFor64(i32* %ar, i64 %n, i64 %m, i64 %o) {
110 ; CHECK-LABEL: @NestedFor64(
112 ; CHECK-NEXT: [[AR2:%.*]] = bitcast i32* [[AR:%.*]] to i8*
113 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i64 0, [[N:%.*]]
114 ; CHECK-NEXT: [[CMP21:%.*]] = icmp slt i64 0, [[M:%.*]]
115 ; CHECK-NEXT: [[MUL7:%.*]] = mul i64 [[O:%.*]], 4
116 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP3]], i1 [[CMP21]], i1 false
117 ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END10:%.*]]
118 ; CHECK: for.body.us.preheader:
119 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[O]], [[M]]
120 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
121 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0]], [[N]]
122 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
123 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR2]], i8 0, i64 [[TMP3]], i1 false)
124 ; CHECK-NEXT: br label [[FOR_END10]]
126 ; CHECK-NEXT: ret void
129 %cmp3 = icmp slt i64 0, %n
130 br i1 %cmp3, label %for.body.lr.ph, label %for.end10
132 for.body.lr.ph: ; preds = %entry
133 %cmp21 = icmp slt i64 0, %m
134 %mul7 = mul i64 %o, 4
135 br i1 %cmp21, label %for.body.us.preheader, label %for.end10
137 for.body.us.preheader: ; preds = %for.body.lr.ph
138 br label %for.body.us
140 for.body.us: ; preds = %for.body.us.preheader, %for.cond1.for.end_crit_edge.us
141 %i.04.us = phi i64 [ %inc9.us, %for.cond1.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
142 %mul.us = mul nsw i64 %i.04.us, %m
143 %mul4.us = mul nsw i64 %mul.us, %o
144 %add.ptr.us = getelementptr inbounds i32, i32* %ar, i64 %mul4.us
145 br label %for.body3.us
147 for.body3.us: ; preds = %for.body.us, %for.body3.us
148 %j.02.us = phi i64 [ 0, %for.body.us ], [ %inc.us, %for.body3.us ]
149 %mul5.us = mul nsw i64 %j.02.us, %o
150 %add.ptr6.us = getelementptr inbounds i32, i32* %add.ptr.us, i64 %mul5.us
151 %0 = bitcast i32* %add.ptr6.us to i8*
152 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul7, i1 false)
153 %inc.us = add nuw nsw i64 %j.02.us, 1
154 %exitcond = icmp ne i64 %inc.us, %m
155 br i1 %exitcond, label %for.body3.us, label %for.cond1.for.end_crit_edge.us
157 for.cond1.for.end_crit_edge.us: ; preds = %for.body3.us
158 %inc9.us = add nuw nsw i64 %i.04.us, 1
159 %exitcond5 = icmp ne i64 %inc9.us, %n
160 br i1 %exitcond5, label %for.body.us, label %for.end10.loopexit
162 for.end10.loopexit: ; preds = %for.cond1.for.end_crit_edge.us
165 for.end10: ; preds = %for.end10.loopexit, %for.body.lr.ph, %entry
168 ; void PositiveFor32(int *ar, int n, int m)
171 ; for (i=0; i<n; ++i) {
172 ; int *arr = ar + i * m;
173 ; memset(arr, 0, m * sizeof(int));
176 define void @PositiveFor32(i32* %ar, i32 %n, i32 %m) {
177 ; CHECK-LABEL: @PositiveFor32(
179 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
180 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
181 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
182 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
183 ; CHECK: for.body.lr.ph:
184 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
185 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
186 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
187 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[CONV1]], [[CONV]]
188 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
189 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR1]], i8 0, i64 [[TMP1]], i1 false)
190 ; CHECK-NEXT: br label [[FOR_END]]
192 ; CHECK-NEXT: ret void
195 %conv = sext i32 %n to i64
196 %cmp1 = icmp slt i64 0, %conv
197 br i1 %cmp1, label %for.body.lr.ph, label %for.end
199 for.body.lr.ph: ; preds = %entry
200 %conv1 = sext i32 %m to i64
201 %conv2 = sext i32 %m to i64
202 %mul3 = mul i64 %conv2, 4
205 for.body: ; preds = %for.body.lr.ph, %for.body
206 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
207 %mul = mul nsw i64 %i.02, %conv1
208 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
209 %0 = bitcast i32* %add.ptr to i8*
210 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul3, i1 false)
211 %inc = add nsw i64 %i.02, 1
212 %cmp = icmp slt i64 %inc, %conv
213 br i1 %cmp, label %for.body, label %for.end
215 for.end: ; preds = %for.body, %entry
218 ; void Negative32(int *ar, int n, int m)
221 ; for (i=n-1; i>=0; i--) {
222 ; int *arr = ar + i * m;
223 ; memset(arr, 0, m * sizeof(int));
226 define void @Negative32(i32* %ar, i32 %n, i32 %m) {
227 ; CHECK-LABEL: @Negative32(
229 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
230 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N:%.*]], 1
231 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SUB]] to i64
232 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[CONV]], 0
233 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
234 ; CHECK: for.body.lr.ph:
235 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
236 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
237 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
238 ; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[CONV]], -1
239 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[CONV1]], [[TMP0]]
240 ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2
241 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR1]], i8 0, i64 [[TMP2]], i1 false)
242 ; CHECK-NEXT: br label [[FOR_END]]
244 ; CHECK-NEXT: ret void
247 %sub = sub nsw i32 %n, 1
248 %conv = sext i32 %sub to i64
249 %cmp1 = icmp sge i64 %conv, 0
250 br i1 %cmp1, label %for.body.lr.ph, label %for.end
252 for.body.lr.ph: ; preds = %entry
253 %conv1 = sext i32 %m to i64
254 %conv2 = sext i32 %m to i64
255 %mul3 = mul i64 %conv2, 4
258 for.body: ; preds = %for.body.lr.ph, %for.body
259 %i.02 = phi i64 [ %conv, %for.body.lr.ph ], [ %dec, %for.body ]
260 %mul = mul nsw i64 %i.02, %conv1
261 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %mul
262 %0 = bitcast i32* %add.ptr to i8*
263 call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 %mul3, i1 false)
264 %dec = add nsw i64 %i.02, -1
265 %cmp = icmp sge i64 %dec, 0
266 br i1 %cmp, label %for.body, label %for.end
268 for.end: ; preds = %for.body, %entry
271 ; This case requires SCEVFolder in LoopIdiomRecognize.cpp to fold SCEV prior to comparison.
272 ; For the inner-loop, SCEVFolder is not needed, however the promoted memset size would be based
273 ; on the trip count of inner-loop (which is an unsigned integer).
274 ; Then in the outer loop, the pointer stride SCEV for memset needs to be converted based on the
275 ; loop guard for it to equal to the memset size SCEV. The loop guard guaranteeds that m >= 0
276 ; inside the loop, so m can be converted from sext to zext, making the two SCEV-s equal.
277 ; void NestedFor32(int *ar, int n, int m, int o)
280 ; for (i=0; i<n; ++i) {
281 ; for (j=0; j<m; j++) {
282 ; int *arr = ar + i * m * o + j * o;
283 ; memset(arr, 0, o * sizeof(int));
287 define void @NestedFor32(i32* %ar, i32 %n, i32 %m, i32 %o) {
288 ; CHECK-LABEL: @NestedFor32(
290 ; CHECK-NEXT: [[AR2:%.*]] = bitcast i32* [[AR:%.*]] to i8*
291 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 0, [[N:%.*]]
292 ; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END11:%.*]]
293 ; CHECK: for.body.lr.ph:
294 ; CHECK-NEXT: [[CMP21:%.*]] = icmp slt i32 0, [[M:%.*]]
295 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[O:%.*]] to i64
296 ; CHECK-NEXT: [[MUL8:%.*]] = mul i64 [[CONV]], 4
297 ; CHECK-NEXT: br i1 [[CMP21]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END11]]
298 ; CHECK: for.body.us.preheader:
299 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[O]] to i64
300 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[M]] to i64
301 ; CHECK-NEXT: [[WIDE_TRIP_COUNT10:%.*]] = zext i32 [[N]] to i64
302 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0]], [[TMP1]]
303 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[M]] to i64
304 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP0]], [[TMP3]]
305 ; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 2
306 ; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP4]], [[WIDE_TRIP_COUNT10]]
307 ; CHECK-NEXT: [[TMP7:%.*]] = shl i64 [[TMP6]], 2
308 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR2]], i8 0, i64 [[TMP7]], i1 false)
309 ; CHECK-NEXT: br label [[FOR_END11]]
311 ; CHECK-NEXT: ret void
314 %cmp3 = icmp slt i32 0, %n
315 br i1 %cmp3, label %for.body.lr.ph, label %for.end11
317 for.body.lr.ph: ; preds = %entry
318 %cmp21 = icmp slt i32 0, %m
319 %conv = sext i32 %o to i64
320 %mul8 = mul i64 %conv, 4
321 br i1 %cmp21, label %for.body.us.preheader, label %for.end11
323 for.body.us.preheader: ; preds = %for.body.lr.ph
324 %0 = sext i32 %o to i64
325 %1 = sext i32 %m to i64
326 %2 = sext i32 %o to i64
327 %wide.trip.count10 = zext i32 %n to i64
328 br label %for.body.us
330 for.body.us: ; preds = %for.body.us.preheader, %for.cond1.for.end_crit_edge.us
331 %indvars.iv6 = phi i64 [ 0, %for.body.us.preheader ], [ %indvars.iv.next7, %for.cond1.for.end_crit_edge.us ]
332 %3 = mul nsw i64 %indvars.iv6, %1
333 %4 = mul nsw i64 %3, %2
334 %add.ptr.us = getelementptr inbounds i32, i32* %ar, i64 %4
335 %wide.trip.count = zext i32 %m to i64
336 br label %for.body3.us
338 for.body3.us: ; preds = %for.body.us, %for.body3.us
339 %indvars.iv = phi i64 [ 0, %for.body.us ], [ %indvars.iv.next, %for.body3.us ]
340 %5 = mul nsw i64 %indvars.iv, %0
341 %add.ptr7.us = getelementptr inbounds i32, i32* %add.ptr.us, i64 %5
342 %6 = bitcast i32* %add.ptr7.us to i8*
343 call void @llvm.memset.p0i8.i64(i8* align 4 %6, i8 0, i64 %mul8, i1 false)
344 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
345 %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
346 br i1 %exitcond, label %for.body3.us, label %for.cond1.for.end_crit_edge.us
348 for.cond1.for.end_crit_edge.us: ; preds = %for.body3.us
349 %indvars.iv.next7 = add nuw nsw i64 %indvars.iv6, 1
350 %exitcond11 = icmp ne i64 %indvars.iv.next7, %wide.trip.count10
351 br i1 %exitcond11, label %for.body.us, label %for.end11.loopexit
353 for.end11.loopexit: ; preds = %for.cond1.for.end_crit_edge.us
356 for.end11: ; preds = %for.end11.loopexit, %for.body.lr.ph, %entry
360 ; void NegStart(int n, int m, int *ar) {
361 ; for (int i = -100; i < n; i++) {
362 ; int *arr = ar + (i + 100) * m;
363 ; memset(arr, 0, m * sizeof(int));
366 define void @NegStart(i32 %n, i32 %m, i32* %ar) {
367 ; CHECK-LABEL: @NegStart(
369 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
370 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 -100, [[N:%.*]]
371 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
372 ; CHECK: for.body.lr.ph:
373 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[M:%.*]] to i64
374 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[CONV]], 4
375 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
376 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = sext i32 [[N]] to i64
377 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[WIDE_TRIP_COUNT]], 100
378 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], [[TMP0]]
379 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
380 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[AR1]], i8 0, i64 [[TMP3]], i1 false)
381 ; CHECK-NEXT: br label [[FOR_END]]
383 ; CHECK-NEXT: ret void
386 %cmp1 = icmp slt i32 -100, %n
387 br i1 %cmp1, label %for.body.lr.ph, label %for.end
389 for.body.lr.ph: ; preds = %entry
390 %conv = sext i32 %m to i64
391 %mul1 = mul i64 %conv, 4
392 %0 = sext i32 %m to i64
393 %wide.trip.count = sext i32 %n to i64
396 for.body: ; preds = %for.body.lr.ph, %for.body
397 %indvars.iv = phi i64 [ -100, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
398 %1 = add nsw i64 %indvars.iv, 100
399 %2 = mul nsw i64 %1, %0
400 %add.ptr = getelementptr inbounds i32, i32* %ar, i64 %2
401 %3 = bitcast i32* %add.ptr to i8*
402 call void @llvm.memset.p0i8.i64(i8* align 4 %3, i8 0, i64 %mul1, i1 false)
403 %indvars.iv.next = add nsw i64 %indvars.iv, 1
404 %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
405 br i1 %exitcond, label %for.body, label %for.end.loopexit
407 for.end.loopexit: ; preds = %for.body
410 for.end: ; preds = %for.end.loopexit, %entry
414 declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)