1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes="function(loop(indvars,loop-idiom,loop-deletion),simplifycfg)" -S < %s | FileCheck %s
4 ; $ clang -m64 -fno-discard-value-names -O0 -S -emit-llvm -Xclang -disable-O0-optnone Code.c
5 ; $ bin/opt -S -passes=mem2reg,loop-simplify,lcssa,loop-rotate \
6 ; -passes=licm,simple-loop-unswitch -enable-nontrivial-unswitch -passes=loop-simplify \
7 ; -passes=loop-deletion,simplifycfg,indvars Code.ll > CodeOpt.ll
8 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
9 ; void PositiveFor64(int *ar, long long n, long long m)
12 ; for (i=0; i<n; ++i) {
13 ; int *arr = ar + i * m;
14 ; memset(arr, 0, m * sizeof(int));
17 define dso_local void @PositiveFor64(ptr %ar, i64 %n, i64 %m) {
18 ; CHECK-LABEL: @PositiveFor64(
20 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[N:%.*]]
21 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
22 ; CHECK: for.body.lr.ph:
23 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[M:%.*]], 4
24 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[M]], [[N]]
25 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
26 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP1]], i1 false)
27 ; CHECK-NEXT: br label [[FOR_END]]
29 ; CHECK-NEXT: ret void
32 %cmp1 = icmp slt i64 0, %n
33 br i1 %cmp1, label %for.body.lr.ph, label %for.end
35 for.body.lr.ph: ; preds = %entry
39 for.body: ; preds = %for.body.lr.ph, %for.body
40 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
41 %mul = mul nsw i64 %i.02, %m
42 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul
43 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul1, i1 false)
44 %inc = add nsw i64 %i.02, 1
45 %cmp = icmp slt i64 %inc, %n
46 br i1 %cmp, label %for.body, label %for.end
48 for.end: ; preds = %for.body, %entry
51 ; void NegativeFor64(int *ar, long long n, long long m)
54 ; for (i=n-1; i>=0; --i) {
55 ; int *arr = ar + i * m;
56 ; memset(arr, 0, m * sizeof(int));
59 define dso_local void @NegativeFor64(ptr %ar, i64 %n, i64 %m) {
60 ; CHECK-LABEL: @NegativeFor64(
62 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[N:%.*]], 1
63 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[SUB]], 0
64 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
65 ; CHECK: for.body.lr.ph:
66 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[M:%.*]], 4
67 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[M]], [[N]]
68 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
69 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP1]], i1 false)
70 ; CHECK-NEXT: br label [[FOR_END]]
72 ; CHECK-NEXT: ret void
75 %sub = sub nsw i64 %n, 1
76 %cmp1 = icmp sge i64 %sub, 0
77 br i1 %cmp1, label %for.body.lr.ph, label %for.end
79 for.body.lr.ph: ; preds = %entry
83 for.body: ; preds = %for.body.lr.ph, %for.body
84 %i.02 = phi i64 [ %sub, %for.body.lr.ph ], [ %dec, %for.body ]
85 %mul = mul nsw i64 %i.02, %m
86 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul
87 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul1, i1 false)
88 %dec = add nsw i64 %i.02, -1
89 %cmp = icmp sge i64 %dec, 0
90 br i1 %cmp, label %for.body, label %for.end
92 for.end: ; preds = %for.body, %entry
95 ; void NestedFor64(int *ar, long long n, long long m, long long o)
98 ; for (i=0; i<n; ++i) {
99 ; for (j=0; j<m; j++) {
100 ; int *arr = ar + i * m * o + j * o;
101 ; memset(arr, 0, o * sizeof(int));
105 define void @NestedFor64(ptr %ar, i64 %n, i64 %m, i64 %o) {
106 ; CHECK-LABEL: @NestedFor64(
108 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i64 0, [[N:%.*]]
109 ; CHECK-NEXT: [[CMP21:%.*]] = icmp slt i64 0, [[M:%.*]]
110 ; CHECK-NEXT: [[MUL7:%.*]] = mul i64 [[O:%.*]], 4
111 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP3]], i1 [[CMP21]], i1 false
112 ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END10:%.*]]
113 ; CHECK: for.body.us.preheader:
114 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[O]], [[M]]
115 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
116 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0]], [[N]]
117 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
118 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP3]], i1 false)
119 ; CHECK-NEXT: br label [[FOR_END10]]
121 ; CHECK-NEXT: ret void
124 %cmp3 = icmp slt i64 0, %n
125 br i1 %cmp3, label %for.body.lr.ph, label %for.end10
127 for.body.lr.ph: ; preds = %entry
128 %cmp21 = icmp slt i64 0, %m
129 %mul7 = mul i64 %o, 4
130 br i1 %cmp21, label %for.body.us.preheader, label %for.end10
132 for.body.us.preheader: ; preds = %for.body.lr.ph
133 br label %for.body.us
135 for.body.us: ; preds = %for.body.us.preheader, %for.cond1.for.end_crit_edge.us
136 %i.04.us = phi i64 [ %inc9.us, %for.cond1.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
137 %mul.us = mul nsw i64 %i.04.us, %m
138 %mul4.us = mul nsw i64 %mul.us, %o
139 %add.ptr.us = getelementptr inbounds i32, ptr %ar, i64 %mul4.us
140 br label %for.body3.us
142 for.body3.us: ; preds = %for.body.us, %for.body3.us
143 %j.02.us = phi i64 [ 0, %for.body.us ], [ %inc.us, %for.body3.us ]
144 %mul5.us = mul nsw i64 %j.02.us, %o
145 %add.ptr6.us = getelementptr inbounds i32, ptr %add.ptr.us, i64 %mul5.us
146 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr6.us, i8 0, i64 %mul7, i1 false)
147 %inc.us = add nuw nsw i64 %j.02.us, 1
148 %exitcond = icmp ne i64 %inc.us, %m
149 br i1 %exitcond, label %for.body3.us, label %for.cond1.for.end_crit_edge.us
151 for.cond1.for.end_crit_edge.us: ; preds = %for.body3.us
152 %inc9.us = add nuw nsw i64 %i.04.us, 1
153 %exitcond5 = icmp ne i64 %inc9.us, %n
154 br i1 %exitcond5, label %for.body.us, label %for.end10.loopexit
156 for.end10.loopexit: ; preds = %for.cond1.for.end_crit_edge.us
159 for.end10: ; preds = %for.end10.loopexit, %for.body.lr.ph, %entry
162 ; void PositiveFor32(int *ar, int n, int m)
165 ; for (i=0; i<n; ++i) {
166 ; int *arr = ar + i * m;
167 ; memset(arr, 0, m * sizeof(int));
170 define void @PositiveFor32(ptr %ar, i32 %n, i32 %m) {
171 ; CHECK-LABEL: @PositiveFor32(
173 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[N:%.*]] to i64
174 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[CONV]]
175 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
176 ; CHECK: for.body.lr.ph:
177 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
178 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
179 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
180 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[CONV1]], [[CONV]]
181 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2
182 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP1]], i1 false)
183 ; CHECK-NEXT: br label [[FOR_END]]
185 ; CHECK-NEXT: ret void
188 %conv = sext i32 %n to i64
189 %cmp1 = icmp slt i64 0, %conv
190 br i1 %cmp1, label %for.body.lr.ph, label %for.end
192 for.body.lr.ph: ; preds = %entry
193 %conv1 = sext i32 %m to i64
194 %conv2 = sext i32 %m to i64
195 %mul3 = mul i64 %conv2, 4
198 for.body: ; preds = %for.body.lr.ph, %for.body
199 %i.02 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
200 %mul = mul nsw i64 %i.02, %conv1
201 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul
202 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul3, i1 false)
203 %inc = add nsw i64 %i.02, 1
204 %cmp = icmp slt i64 %inc, %conv
205 br i1 %cmp, label %for.body, label %for.end
207 for.end: ; preds = %for.body, %entry
210 ; void Negative32(int *ar, int n, int m)
213 ; for (i=n-1; i>=0; i--) {
214 ; int *arr = ar + i * m;
215 ; memset(arr, 0, m * sizeof(int));
218 define void @Negative32(ptr %ar, i32 %n, i32 %m) {
219 ; CHECK-LABEL: @Negative32(
221 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N:%.*]], 1
222 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SUB]] to i64
223 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[CONV]], 0
224 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
225 ; CHECK: for.body.lr.ph:
226 ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64
227 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64
228 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4
229 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[N]] to i64
230 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[CONV1]], [[TMP0]]
231 ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2
232 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP2]], i1 false)
233 ; CHECK-NEXT: br label [[FOR_END]]
235 ; CHECK-NEXT: ret void
238 %sub = sub nsw i32 %n, 1
239 %conv = sext i32 %sub to i64
240 %cmp1 = icmp sge i64 %conv, 0
241 br i1 %cmp1, label %for.body.lr.ph, label %for.end
243 for.body.lr.ph: ; preds = %entry
244 %conv1 = sext i32 %m to i64
245 %conv2 = sext i32 %m to i64
246 %mul3 = mul i64 %conv2, 4
249 for.body: ; preds = %for.body.lr.ph, %for.body
250 %i.02 = phi i64 [ %conv, %for.body.lr.ph ], [ %dec, %for.body ]
251 %mul = mul nsw i64 %i.02, %conv1
252 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul
253 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul3, i1 false)
254 %dec = add nsw i64 %i.02, -1
255 %cmp = icmp sge i64 %dec, 0
256 br i1 %cmp, label %for.body, label %for.end
258 for.end: ; preds = %for.body, %entry
261 ; This case requires SCEVFolder in LoopIdiomRecognize.cpp to fold SCEV prior to comparison.
262 ; For the inner-loop, SCEVFolder is not needed, however the promoted memset size would be based
263 ; on the trip count of inner-loop (which is an unsigned integer).
264 ; Then in the outer loop, the pointer stride SCEV for memset needs to be converted based on the
265 ; loop guard for it to equal to the memset size SCEV. The loop guard guaranteeds that m >= 0
266 ; inside the loop, so m can be converted from sext to zext, making the two SCEV-s equal.
267 ; void NestedFor32(int *ar, int n, int m, int o)
270 ; for (i=0; i<n; ++i) {
271 ; for (j=0; j<m; j++) {
272 ; int *arr = ar + i * m * o + j * o;
273 ; memset(arr, 0, o * sizeof(int));
277 define void @NestedFor32(ptr %ar, i32 %n, i32 %m, i32 %o) {
278 ; CHECK-LABEL: @NestedFor32(
280 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 0, [[N:%.*]]
281 ; CHECK-NEXT: br i1 [[CMP3]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END11:%.*]]
282 ; CHECK: for.body.lr.ph:
283 ; CHECK-NEXT: [[CMP21:%.*]] = icmp slt i32 0, [[M:%.*]]
284 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[O:%.*]] to i64
285 ; CHECK-NEXT: [[MUL8:%.*]] = mul i64 [[CONV]], 4
286 ; CHECK-NEXT: br i1 [[CMP21]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END11]]
287 ; CHECK: for.body.us.preheader:
288 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[O]] to i64
289 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[M]] to i64
290 ; CHECK-NEXT: [[WIDE_TRIP_COUNT10:%.*]] = zext i32 [[N]] to i64
291 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0]], [[TMP1]]
292 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
293 ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[M]] to i64
294 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP0]], [[TMP4]]
295 ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 2
296 ; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP5]], [[WIDE_TRIP_COUNT10]]
297 ; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 2
298 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP8]], i1 false)
299 ; CHECK-NEXT: br label [[FOR_END11]]
301 ; CHECK-NEXT: ret void
304 %cmp3 = icmp slt i32 0, %n
305 br i1 %cmp3, label %for.body.lr.ph, label %for.end11
307 for.body.lr.ph: ; preds = %entry
308 %cmp21 = icmp slt i32 0, %m
309 %conv = sext i32 %o to i64
310 %mul8 = mul i64 %conv, 4
311 br i1 %cmp21, label %for.body.us.preheader, label %for.end11
313 for.body.us.preheader: ; preds = %for.body.lr.ph
314 %0 = sext i32 %o to i64
315 %1 = sext i32 %m to i64
316 %2 = sext i32 %o to i64
317 %wide.trip.count10 = zext i32 %n to i64
318 br label %for.body.us
320 for.body.us: ; preds = %for.body.us.preheader, %for.cond1.for.end_crit_edge.us
321 %indvars.iv6 = phi i64 [ 0, %for.body.us.preheader ], [ %indvars.iv.next7, %for.cond1.for.end_crit_edge.us ]
322 %3 = mul nsw i64 %indvars.iv6, %1
323 %4 = mul nsw i64 %3, %2
324 %add.ptr.us = getelementptr inbounds i32, ptr %ar, i64 %4
325 %wide.trip.count = zext i32 %m to i64
326 br label %for.body3.us
328 for.body3.us: ; preds = %for.body.us, %for.body3.us
329 %indvars.iv = phi i64 [ 0, %for.body.us ], [ %indvars.iv.next, %for.body3.us ]
330 %5 = mul nsw i64 %indvars.iv, %0
331 %add.ptr7.us = getelementptr inbounds i32, ptr %add.ptr.us, i64 %5
332 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr7.us, i8 0, i64 %mul8, i1 false)
333 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
334 %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
335 br i1 %exitcond, label %for.body3.us, label %for.cond1.for.end_crit_edge.us
337 for.cond1.for.end_crit_edge.us: ; preds = %for.body3.us
338 %indvars.iv.next7 = add nuw nsw i64 %indvars.iv6, 1
339 %exitcond11 = icmp ne i64 %indvars.iv.next7, %wide.trip.count10
340 br i1 %exitcond11, label %for.body.us, label %for.end11.loopexit
342 for.end11.loopexit: ; preds = %for.cond1.for.end_crit_edge.us
345 for.end11: ; preds = %for.end11.loopexit, %for.body.lr.ph, %entry
349 ; void NegStart(int n, int m, int *ar) {
350 ; for (int i = -100; i < n; i++) {
351 ; int *arr = ar + (i + 100) * m;
352 ; memset(arr, 0, m * sizeof(int));
355 define void @NegStart(i32 %n, i32 %m, ptr %ar) {
356 ; CHECK-LABEL: @NegStart(
358 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 -100, [[N:%.*]]
359 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
360 ; CHECK: for.body.lr.ph:
361 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[M:%.*]] to i64
362 ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[CONV]], 4
363 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[M]] to i64
364 ; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = sext i32 [[N]] to i64
365 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[WIDE_TRIP_COUNT]], 100
366 ; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], [[TMP0]]
367 ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2
368 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP3]], i1 false)
369 ; CHECK-NEXT: br label [[FOR_END]]
371 ; CHECK-NEXT: ret void
374 %cmp1 = icmp slt i32 -100, %n
375 br i1 %cmp1, label %for.body.lr.ph, label %for.end
377 for.body.lr.ph: ; preds = %entry
378 %conv = sext i32 %m to i64
379 %mul1 = mul i64 %conv, 4
380 %0 = sext i32 %m to i64
381 %wide.trip.count = sext i32 %n to i64
384 for.body: ; preds = %for.body.lr.ph, %for.body
385 %indvars.iv = phi i64 [ -100, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
386 %1 = add nsw i64 %indvars.iv, 100
387 %2 = mul nsw i64 %1, %0
388 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %2
389 call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul1, i1 false)
390 %indvars.iv.next = add nsw i64 %indvars.iv, 1
391 %exitcond = icmp ne i64 %indvars.iv.next, %wide.trip.count
392 br i1 %exitcond, label %for.body, label %for.end.loopexit
394 for.end.loopexit: ; preds = %for.body
397 for.end: ; preds = %for.end.loopexit, %entry
401 declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)