1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes="function(loop(indvars,loop-idiom,loop-deletion),simplifycfg)" -S < %s | FileCheck %s
4 ; $ clang -m32 -fno-discard-value-names -O0 -S -emit-llvm -Xclang -disable-O0-optnone Code.c
5 ; $ bin/opt -S -basic-aa -mem2reg -loop-simplify -lcssa -loop-rotate \
6 ; -licm -simple-loop-unswitch -enable-nontrivial-unswitch -loop-simplify \
7 ; -loop-deletion -simplifycfg -indvars Code.ll > CodeOpt.ll
8 target datalayout = "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-f80:32-n8:16:32-S128"
9 ; void PositiveFor32(int *ar, int n, int m)
12 ; for (i=0; i<n; ++i) {
13 ; int *arr = ar + i * m;
14 ; memset(arr, 0, m * sizeof(int));
17 define dso_local void @PositiveFor32(i32* %ar, i32 %n, i32 %m) {
18 ; CHECK-LABEL: @PositiveFor32(
20 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
21 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N:%.*]]
22 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
23 ; CHECK: for.body.lr.ph:
24 ; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[M:%.*]], 4
25 ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[M]], [[N]]
26 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 2
27 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR1]], i8 0, i32 [[TMP1]], i1 false)
28 ; CHECK-NEXT: br label [[FOR_END]]
30 ; CHECK-NEXT: ret void
33 %cmp1 = icmp slt i32 0, %n
34 br i1 %cmp1, label %for.body.lr.ph, label %for.end
36 for.body.lr.ph: ; preds = %entry
40 for.body: ; preds = %for.body.lr.ph, %for.body
41 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
42 %mul = mul nsw i32 %i.02, %m
43 %add.ptr = getelementptr inbounds i32, i32* %ar, i32 %mul
44 %0 = bitcast i32* %add.ptr to i8*
45 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %mul1, i1 false)
46 %inc = add nsw i32 %i.02, 1
47 %cmp = icmp slt i32 %inc, %n
48 br i1 %cmp, label %for.body, label %for.end
50 for.end: ; preds = %for.body, %entry
53 ; void Negative32(int *ar, int n, int m)
56 ; for (i=n-1; i>=0; i--) {
57 ; int *arr = ar + i * m;
58 ; memset(arr, 0, m * sizeof(int));
61 define void @NegativeFor32(i32* %ar, i32 %n, i32 %m) {
62 ; CHECK-LABEL: @NegativeFor32(
64 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
65 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N:%.*]], 1
66 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i32 [[SUB]], 0
67 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
68 ; CHECK: for.body.lr.ph:
69 ; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[M:%.*]], 4
70 ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[M]], [[N]]
71 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 2
72 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR1]], i8 0, i32 [[TMP1]], i1 false)
73 ; CHECK-NEXT: br label [[FOR_END]]
75 ; CHECK-NEXT: ret void
78 %sub = sub nsw i32 %n, 1
79 %cmp1 = icmp sge i32 %sub, 0
80 br i1 %cmp1, label %for.body.lr.ph, label %for.end
82 for.body.lr.ph: ; preds = %entry
86 for.body: ; preds = %for.body.lr.ph, %for.body
87 %i.02 = phi i32 [ %sub, %for.body.lr.ph ], [ %dec, %for.body ]
88 %mul = mul nsw i32 %i.02, %m
89 %add.ptr = getelementptr inbounds i32, i32* %ar, i32 %mul
90 %0 = bitcast i32* %add.ptr to i8*
91 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %mul1, i1 false)
92 %dec = add nsw i32 %i.02, -1
93 %cmp = icmp sge i32 %dec, 0
94 br i1 %cmp, label %for.body, label %for.end
96 for.end: ; preds = %for.body, %entry
99 ; void NestedFor32(int *ar, int n, int m, int o)
102 ; for (i=0; i<n; ++i) {
103 ; for (j=0; j<m; ++j) {
104 ; int *arr = ar + i * m * o + j * o;
105 ; memset(arr, 0, o * sizeof(int));
109 define void @NestedFor32(i32* %ar, i32 %n, i32 %m, i32 %o) {
110 ; CHECK-LABEL: @NestedFor32(
112 ; CHECK-NEXT: [[AR2:%.*]] = bitcast i32* [[AR:%.*]] to i8*
113 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 0, [[N:%.*]]
114 ; CHECK-NEXT: [[CMP21:%.*]] = icmp slt i32 0, [[M:%.*]]
115 ; CHECK-NEXT: [[MUL7:%.*]] = mul i32 [[O:%.*]], 4
116 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP3]], i1 [[CMP21]], i1 false
117 ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END10:%.*]]
118 ; CHECK: for.body.us.preheader:
119 ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[O]], [[M]]
120 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 2
121 ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0]], [[N]]
122 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2
123 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR2]], i8 0, i32 [[TMP3]], i1 false)
124 ; CHECK-NEXT: br label [[FOR_END10]]
126 ; CHECK-NEXT: ret void
129 %cmp3 = icmp slt i32 0, %n
130 br i1 %cmp3, label %for.body.lr.ph, label %for.end10
132 for.body.lr.ph: ; preds = %entry
133 %cmp21 = icmp slt i32 0, %m
134 %mul7 = mul i32 %o, 4
135 br i1 %cmp21, label %for.body.us.preheader, label %for.end10
137 for.body.us.preheader: ; preds = %for.body.lr.ph
138 br label %for.body.us
140 for.body.us: ; preds = %for.body.us.preheader, %for.cond1.for.end_crit_edge.us
141 %i.04.us = phi i32 [ %inc9.us, %for.cond1.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
142 %mul.us = mul nsw i32 %i.04.us, %m
143 %mul4.us = mul nsw i32 %mul.us, %o
144 %add.ptr.us = getelementptr inbounds i32, i32* %ar, i32 %mul4.us
145 br label %for.body3.us
147 for.body3.us: ; preds = %for.body.us, %for.body3.us
148 %j.02.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body3.us ]
149 %mul5.us = mul nsw i32 %j.02.us, %o
150 %add.ptr6.us = getelementptr inbounds i32, i32* %add.ptr.us, i32 %mul5.us
151 %0 = bitcast i32* %add.ptr6.us to i8*
152 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %mul7, i1 false)
153 %inc.us = add nuw nsw i32 %j.02.us, 1
154 %exitcond = icmp ne i32 %inc.us, %m
155 br i1 %exitcond, label %for.body3.us, label %for.cond1.for.end_crit_edge.us
157 for.cond1.for.end_crit_edge.us: ; preds = %for.body3.us
158 %inc9.us = add nuw nsw i32 %i.04.us, 1
159 %exitcond5 = icmp ne i32 %inc9.us, %n
160 br i1 %exitcond5, label %for.body.us, label %for.end10.loopexit
162 for.end10.loopexit: ; preds = %for.cond1.for.end_crit_edge.us
165 for.end10: ; preds = %for.end10.loopexit, %for.body.lr.ph, %entry
168 ; void PositiveFor64(int *ar, long long n, long long m)
171 ; for (i=0; i<n; ++i) {
172 ; int *arr = ar + i * m;
173 ; memset(arr, 0, m * sizeof(int));
176 define dso_local void @PositiveFor64(i32* %ar, i64 %n, i64 %m) #0 {
177 ; CHECK-LABEL: @PositiveFor64(
179 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
180 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i64 0, [[N:%.*]]
181 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
182 ; CHECK: for.body.lr.ph:
183 ; CHECK-NEXT: [[MUL3:%.*]] = mul nsw i64 [[M:%.*]], 4
184 ; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[MUL3]] to i32
185 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[M]] to i32
186 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N]] to i32
187 ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0]], [[TMP1]]
188 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2
189 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR1]], i8 0, i32 [[TMP3]], i1 false)
190 ; CHECK-NEXT: br label [[FOR_END]]
192 ; CHECK-NEXT: ret void
195 %cmp1 = icmp slt i64 0, %n
196 br i1 %cmp1, label %for.body.lr.ph, label %for.end
198 for.body.lr.ph: ; preds = %entry
199 %mul3 = mul nsw i64 %m, 4
200 %conv4 = trunc i64 %mul3 to i32
203 for.body: ; preds = %for.body.lr.ph, %for.body
204 %i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
205 %conv2 = sext i32 %i.02 to i64
206 %mul = mul nsw i64 %conv2, %m
207 %idx.ext = trunc i64 %mul to i32
208 %add.ptr = getelementptr inbounds i32, i32* %ar, i32 %idx.ext
209 %0 = bitcast i32* %add.ptr to i8*
210 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %conv4, i1 false)
211 %inc = add nuw nsw i32 %i.02, 1
212 %conv = sext i32 %inc to i64
213 %cmp = icmp slt i64 %conv, %n
214 br i1 %cmp, label %for.body, label %for.end.loopexit
216 for.end.loopexit: ; preds = %for.body
219 for.end: ; preds = %for.end.loopexit, %entry
222 ; void NegativeFor64(int *ar, long long n, long long m)
225 ; for (i=n-1; i>=0; --i) {
226 ; int *arr = ar + i * m;
227 ; memset(arr, 0, m * sizeof(int));
230 define dso_local void @NegativeFor64(i32* %ar, i64 %n, i64 %m) #0 {
231 ; CHECK-LABEL: @NegativeFor64(
233 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
234 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[N:%.*]], 1
235 ; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[SUB]] to i32
236 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i32 [[CONV]], 0
237 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
238 ; CHECK: for.body.lr.ph:
239 ; CHECK-NEXT: [[MUL3:%.*]] = mul nsw i64 [[M:%.*]], 4
240 ; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[MUL3]] to i32
241 ; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[M]] to i32
242 ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[CONV]], -1
243 ; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP0]], [[TMP1]]
244 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 2
245 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR1]], i8 0, i32 [[TMP3]], i1 false)
246 ; CHECK-NEXT: br label [[FOR_END]]
248 ; CHECK-NEXT: ret void
251 %sub = sub nsw i64 %n, 1
252 %conv = trunc i64 %sub to i32
253 %cmp1 = icmp sge i32 %conv, 0
254 br i1 %cmp1, label %for.body.lr.ph, label %for.end
256 for.body.lr.ph: ; preds = %entry
257 %mul3 = mul nsw i64 %m, 4
258 %conv4 = trunc i64 %mul3 to i32
261 for.body: ; preds = %for.body.lr.ph, %for.body
262 %i.02 = phi i32 [ %conv, %for.body.lr.ph ], [ %dec, %for.body ]
263 %conv2 = sext i32 %i.02 to i64
264 %mul = mul nsw i64 %conv2, %m
265 %idx.ext = trunc i64 %mul to i32
266 %add.ptr = getelementptr inbounds i32, i32* %ar, i32 %idx.ext
267 %0 = bitcast i32* %add.ptr to i8*
268 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %conv4, i1 false)
269 %dec = add nsw i32 %i.02, -1
270 %cmp = icmp sge i32 %dec, 0
271 br i1 %cmp, label %for.body, label %for.end.loopexit
273 for.end.loopexit: ; preds = %for.body
276 for.end: ; preds = %for.end.loopexit, %entry
279 ; void NestedFor64(int *ar, long long n, long long m, long long o)
282 ; for (i=0; i<n; ++i) {
283 ; for (j=0; j<m; j++) {
284 ; int *arr = ar + i * m * o + j * o;
285 ; memset(arr, 0, o * sizeof(int));
289 define dso_local void @NestedFor64(i32* %ar, i64 %n, i64 %m, i64 %o) #0 {
290 ; CHECK-LABEL: @NestedFor64(
292 ; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i64 0, [[N:%.*]]
293 ; CHECK-NEXT: [[CMP41:%.*]] = icmp slt i64 0, [[M:%.*]]
294 ; CHECK-NEXT: [[MUL13:%.*]] = mul nsw i64 [[O:%.*]], 4
295 ; CHECK-NEXT: [[CONV14:%.*]] = trunc i64 [[MUL13]] to i32
296 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP3]], i1 [[CMP41]], i1 false
297 ; CHECK-NEXT: br i1 [[OR_COND]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_END17:%.*]]
298 ; CHECK: for.body.us.preheader:
299 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[O]], [[M]]
300 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
301 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[O]] to i32
302 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[M]] to i32
303 ; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP2]], [[TMP3]]
304 ; CHECK-NEXT: [[TMP5:%.*]] = shl i32 [[TMP4]], 2
305 ; CHECK-NEXT: br label [[FOR_BODY_US:%.*]]
306 ; CHECK: for.body.us:
307 ; CHECK-NEXT: [[I_04_US:%.*]] = phi i32 [ [[INC16_US:%.*]], [[FOR_BODY_US]] ], [ 0, [[FOR_BODY_US_PREHEADER]] ]
308 ; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP1]], [[I_04_US]]
309 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[AR:%.*]], i32 [[TMP6]]
310 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
311 ; CHECK-NEXT: [[CONV7_US:%.*]] = sext i32 [[I_04_US]] to i64
312 ; CHECK-NEXT: [[MUL_US:%.*]] = mul nsw i64 [[CONV7_US]], [[M]]
313 ; CHECK-NEXT: [[MUL8_US:%.*]] = mul nsw i64 [[MUL_US]], [[O]]
314 ; CHECK-NEXT: [[IDX_EXT_US:%.*]] = trunc i64 [[MUL8_US]] to i32
315 ; CHECK-NEXT: [[ADD_PTR_US:%.*]] = getelementptr inbounds i32, i32* [[AR]], i32 [[IDX_EXT_US]]
316 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[SCEVGEP1]], i8 0, i32 [[TMP5]], i1 false)
317 ; CHECK-NEXT: [[INC16_US]] = add nuw nsw i32 [[I_04_US]], 1
318 ; CHECK-NEXT: [[CONV_US:%.*]] = sext i32 [[INC16_US]] to i64
319 ; CHECK-NEXT: [[CMP_US:%.*]] = icmp slt i64 [[CONV_US]], [[N]]
320 ; CHECK-NEXT: br i1 [[CMP_US]], label [[FOR_BODY_US]], label [[FOR_END17]]
322 ; CHECK-NEXT: ret void
325 %cmp3 = icmp slt i64 0, %n
326 br i1 %cmp3, label %for.body.lr.ph, label %for.end17
328 for.body.lr.ph: ; preds = %entry
329 %cmp41 = icmp slt i64 0, %m
330 %mul13 = mul nsw i64 %o, 4
331 %conv14 = trunc i64 %mul13 to i32
332 br i1 %cmp41, label %for.body.us.preheader, label %for.end17
334 for.body.us.preheader: ; preds = %for.body.lr.ph
335 br label %for.body.us
337 for.body.us: ; preds = %for.body.us.preheader, %for.cond2.for.end_crit_edge.us
338 %i.04.us = phi i32 [ %inc16.us, %for.cond2.for.end_crit_edge.us ], [ 0, %for.body.us.preheader ]
339 %conv7.us = sext i32 %i.04.us to i64
340 %mul.us = mul nsw i64 %conv7.us, %m
341 %mul8.us = mul nsw i64 %mul.us, %o
342 %idx.ext.us = trunc i64 %mul8.us to i32
343 %add.ptr.us = getelementptr inbounds i32, i32* %ar, i32 %idx.ext.us
344 br label %for.body6.us
346 for.body6.us: ; preds = %for.body.us, %for.body6.us
347 %j.02.us = phi i32 [ 0, %for.body.us ], [ %inc.us, %for.body6.us ]
348 %conv9.us = sext i32 %j.02.us to i64
349 %mul10.us = mul nsw i64 %conv9.us, %o
350 %idx.ext11.us = trunc i64 %mul10.us to i32
351 %add.ptr12.us = getelementptr inbounds i32, i32* %add.ptr.us, i32 %idx.ext11.us
352 %0 = bitcast i32* %add.ptr12.us to i8*
353 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %conv14, i1 false)
354 %inc.us = add nuw nsw i32 %j.02.us, 1
355 %conv3.us = sext i32 %inc.us to i64
356 %cmp4.us = icmp slt i64 %conv3.us, %m
357 br i1 %cmp4.us, label %for.body6.us, label %for.cond2.for.end_crit_edge.us
359 for.cond2.for.end_crit_edge.us: ; preds = %for.body6.us
360 %inc16.us = add nuw nsw i32 %i.04.us, 1
361 %conv.us = sext i32 %inc16.us to i64
362 %cmp.us = icmp slt i64 %conv.us, %n
363 br i1 %cmp.us, label %for.body.us, label %for.end17.loopexit
365 for.end17.loopexit: ; preds = %for.cond2.for.end_crit_edge.us
368 for.end17: ; preds = %for.end17.loopexit, %for.body.lr.ph, %entry
372 ; void NegStart(int n, int m, int *ar) {
373 ; for (int i = -100; i < n; i++) {
374 ; int *arr = ar + (i + 100) * m;
375 ; memset(arr, 0, m * sizeof(int));
378 define void @NegStart(i32 %n, i32 %m, i32* %ar) {
379 ; CHECK-LABEL: @NegStart(
381 ; CHECK-NEXT: [[AR1:%.*]] = bitcast i32* [[AR:%.*]] to i8*
382 ; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 -100, [[N:%.*]]
383 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]]
384 ; CHECK: for.body.lr.ph:
385 ; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[M:%.*]], 4
386 ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], 100
387 ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[M]], [[TMP0]]
388 ; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 2
389 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 4 [[AR1]], i8 0, i32 [[TMP2]], i1 false)
390 ; CHECK-NEXT: br label [[FOR_END]]
392 ; CHECK-NEXT: ret void
395 %cmp1 = icmp slt i32 -100, %n
396 br i1 %cmp1, label %for.body.lr.ph, label %for.end
398 for.body.lr.ph: ; preds = %entry
399 %mul1 = mul i32 %m, 4
402 for.body: ; preds = %for.body.lr.ph, %for.body
403 %i.02 = phi i32 [ -100, %for.body.lr.ph ], [ %inc, %for.body ]
404 %add = add nsw i32 %i.02, 100
405 %mul = mul nsw i32 %add, %m
406 %add.ptr = getelementptr inbounds i32, i32* %ar, i32 %mul
407 %0 = bitcast i32* %add.ptr to i8*
408 call void @llvm.memset.p0i8.i32(i8* align 4 %0, i8 0, i32 %mul1, i1 false)
409 %inc = add nsw i32 %i.02, 1
410 %exitcond = icmp ne i32 %inc, %n
411 br i1 %exitcond, label %for.body, label %for.end.loopexit
413 for.end.loopexit: ; preds = %for.body
416 for.end: ; preds = %for.end.loopexit, %entry
420 declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1 immarg)