1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -basic-aa -loop-idiom < %s -S | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6 ; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 1, i32 1, i32 1, i32 1]
9 ; CHECK: @.memset_pattern.1 = private unnamed_addr constant [2 x i32*] [i32* @G, i32* @G]
11 target triple = "x86_64-apple-darwin10.0.0"
13 define void @test1(i8* %Base, i64 %Size) nounwind ssp {
14 ; CHECK-LABEL: @test1(
16 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
17 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
19 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
20 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
21 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
22 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
23 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
25 ; CHECK-NEXT: ret void
27 bb.nph: ; preds = %entry
30 for.body: ; preds = %bb.nph, %for.body
31 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
32 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
33 store i8 0, i8* %I.0.014, align 1
34 %indvar.next = add i64 %indvar, 1
35 %exitcond = icmp eq i64 %indvar.next, %Size
36 br i1 %exitcond, label %for.end, label %for.body
38 for.end: ; preds = %for.body, %entry
42 ; Make sure memset is formed for larger than 1 byte stores, and that the
43 ; alignment of the store is preserved
44 define void @test1_i16(i16* align 2 %Base, i64 %Size) nounwind ssp {
45 ; CHECK-LABEL: @test1_i16(
47 ; CHECK-NEXT: [[BASE1:%.*]] = bitcast i16* [[BASE:%.*]] to i8*
48 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 1
49 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 2 [[BASE1]], i8 0, i64 [[TMP0]], i1 false)
50 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
52 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
53 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, i16* [[BASE]], i64 [[INDVAR]]
54 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
55 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
56 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
58 ; CHECK-NEXT: ret void
60 bb.nph: ; preds = %entry
63 for.body: ; preds = %bb.nph, %for.body
64 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
65 %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar
66 store i16 0, i16* %I.0.014, align 2
67 %indvar.next = add i64 %indvar, 1
68 %exitcond = icmp eq i64 %indvar.next, %Size
69 br i1 %exitcond, label %for.end, label %for.body
71 for.end: ; preds = %for.body, %entry
75 ; This is a loop that was rotated but where the blocks weren't merged. This
76 ; shouldn't perturb us.
77 define void @test1a(i8* %Base, i64 %Size) nounwind ssp {
78 ; CHECK-LABEL: @test1a(
80 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
81 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
83 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ]
84 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
85 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
86 ; CHECK-NEXT: br label [[FOR_BODY_CONT]]
87 ; CHECK: for.body.cont:
88 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
89 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
91 ; CHECK-NEXT: ret void
93 bb.nph: ; preds = %entry
96 for.body: ; preds = %bb.nph, %for.body
97 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
98 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
99 store i8 0, i8* %I.0.014, align 1
100 %indvar.next = add i64 %indvar, 1
101 br label %for.body.cont
103 %exitcond = icmp eq i64 %indvar.next, %Size
104 br i1 %exitcond, label %for.end, label %for.body
106 for.end: ; preds = %for.body, %entry
111 define void @test2(i32* %Base, i64 %Size) nounwind ssp {
112 ; CHECK-LABEL: @test2(
114 ; CHECK-NEXT: [[BASE1:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
115 ; CHECK-NEXT: [[CMP10:%.*]] = icmp eq i64 [[SIZE:%.*]], 0
116 ; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]]
117 ; CHECK: for.body.preheader:
118 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE]], 2
119 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[BASE1]], i8 1, i64 [[TMP0]], i1 false)
120 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
122 ; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
123 ; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[I_011]]
124 ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1
125 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE]]
126 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]]
127 ; CHECK: for.end.loopexit:
128 ; CHECK-NEXT: br label [[FOR_END]]
130 ; CHECK-NEXT: ret void
133 %cmp10 = icmp eq i64 %Size, 0
134 br i1 %cmp10, label %for.end, label %for.body
136 for.body: ; preds = %entry, %for.body
137 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
138 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
139 store i32 16843009, i32* %add.ptr.i, align 4
140 %inc = add nsw i64 %i.011, 1
141 %exitcond = icmp eq i64 %inc, %Size
142 br i1 %exitcond, label %for.end, label %for.body
144 for.end: ; preds = %for.body, %entry
148 ; This is a case where there is an extra may-aliased store in the loop, we can't
149 ; promote the memset.
150 define void @test3(i32* %Base, i64 %Size, i8 *%MayAlias) nounwind ssp {
151 ; CHECK-LABEL: @test3(
153 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
155 ; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
156 ; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[I_011]]
157 ; CHECK-NEXT: store i32 16843009, i32* [[ADD_PTR_I]], align 4
158 ; CHECK-NEXT: store i8 42, i8* [[MAYALIAS:%.*]], align 1
159 ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1
160 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE:%.*]]
161 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
163 ; CHECK-NEXT: ret void
168 for.body: ; preds = %entry, %for.body
169 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
170 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
171 store i32 16843009, i32* %add.ptr.i, align 4
173 store i8 42, i8* %MayAlias
174 %inc = add nsw i64 %i.011, 1
175 %exitcond = icmp eq i64 %inc, %Size
176 br i1 %exitcond, label %for.end, label %for.body
178 for.end: ; preds = %entry
182 ; Make sure the first store in the loop is turned into a memset.
183 define void @test4(i8* %Base) nounwind ssp {
184 ; CHECK-LABEL: @test4(
185 ; CHECK-NEXT: bb.nph:
186 ; CHECK-NEXT: [[BASE100:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 1000
187 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE]], i8 0, i64 100, i1 false)
188 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
190 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
191 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
192 ; CHECK-NEXT: store i8 42, i8* [[BASE100]], align 1
193 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
194 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 100
195 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
197 ; CHECK-NEXT: ret void
199 bb.nph: ; preds = %entry
200 %Base100 = getelementptr i8, i8* %Base, i64 1000
203 for.body: ; preds = %bb.nph, %for.body
204 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
205 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
206 store i8 0, i8* %I.0.014, align 1
208 ;; Store beyond the range memset, should be safe to promote.
209 store i8 42, i8* %Base100
211 %indvar.next = add i64 %indvar, 1
212 %exitcond = icmp eq i64 %indvar.next, 100
213 br i1 %exitcond, label %for.end, label %for.body
215 for.end: ; preds = %for.body, %entry
219 ; This can't be promoted: the memset is a store of a loop variant value.
220 define void @test5(i8* %Base, i64 %Size) nounwind ssp {
221 ; CHECK-LABEL: @test5(
222 ; CHECK-NEXT: bb.nph:
223 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
225 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
226 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]]
227 ; CHECK-NEXT: [[V:%.*]] = trunc i64 [[INDVAR]] to i8
228 ; CHECK-NEXT: store i8 [[V]], i8* [[I_0_014]], align 1
229 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
230 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
231 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
233 ; CHECK-NEXT: ret void
235 bb.nph: ; preds = %entry
238 for.body: ; preds = %bb.nph, %for.body
239 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
240 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
242 %V = trunc i64 %indvar to i8
243 store i8 %V, i8* %I.0.014, align 1
244 %indvar.next = add i64 %indvar, 1
245 %exitcond = icmp eq i64 %indvar.next, %Size
246 br i1 %exitcond, label %for.end, label %for.body
248 for.end: ; preds = %for.body, %entry
254 define void @test6(i64 %Size) nounwind ssp {
255 ; CHECK-LABEL: @test6(
256 ; CHECK-NEXT: bb.nph:
257 ; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
258 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
259 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false)
260 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
262 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
263 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
264 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
265 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
266 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
267 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
268 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
270 ; CHECK-NEXT: ret void
273 %Base = alloca i8, i32 10000
274 %Dest = alloca i8, i32 10000
277 for.body: ; preds = %bb.nph, %for.body
278 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
279 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
280 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
281 %V = load i8, i8* %I.0.014, align 1
282 store i8 %V, i8* %DestI, align 1
283 %indvar.next = add i64 %indvar, 1
284 %exitcond = icmp eq i64 %indvar.next, %Size
285 br i1 %exitcond, label %for.end, label %for.body
287 for.end: ; preds = %for.body, %entry
291 ;; memcpy formation, check alignment
292 define void @test6_dest_align(i32* noalias align 1 %Base, i32* noalias align 4 %Dest, i64 %Size) nounwind ssp {
293 ; CHECK-LABEL: @test6_dest_align(
294 ; CHECK-NEXT: bb.nph:
295 ; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8*
296 ; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
297 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2
298 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST1]], i8* align 1 [[BASE2]], i64 [[TMP0]], i1 false)
299 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
301 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
302 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
303 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
304 ; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 1
305 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
306 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
307 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
309 ; CHECK-NEXT: ret void
314 for.body: ; preds = %bb.nph, %for.body
315 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
316 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
317 %DestI = getelementptr i32, i32* %Dest, i64 %indvar
318 %V = load i32, i32* %I.0.014, align 1
319 store i32 %V, i32* %DestI, align 4
320 %indvar.next = add i64 %indvar, 1
321 %exitcond = icmp eq i64 %indvar.next, %Size
322 br i1 %exitcond, label %for.end, label %for.body
324 for.end: ; preds = %for.body, %entry
328 ;; memcpy formation, check alignment
329 define void @test6_src_align(i32* noalias align 4 %Base, i32* noalias align 1 %Dest, i64 %Size) nounwind ssp {
330 ; CHECK-LABEL: @test6_src_align(
331 ; CHECK-NEXT: bb.nph:
332 ; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8*
333 ; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8*
334 ; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2
335 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST1]], i8* align 4 [[BASE2]], i64 [[TMP0]], i1 false)
336 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
338 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
339 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]]
340 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]]
341 ; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 4
342 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
343 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
344 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
346 ; CHECK-NEXT: ret void
351 for.body: ; preds = %bb.nph, %for.body
352 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
353 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar
354 %DestI = getelementptr i32, i32* %Dest, i64 %indvar
355 %V = load i32, i32* %I.0.014, align 4
356 store i32 %V, i32* %DestI, align 1
357 %indvar.next = add i64 %indvar, 1
358 %exitcond = icmp eq i64 %indvar.next, %Size
359 br i1 %exitcond, label %for.end, label %for.body
361 for.end: ; preds = %for.body, %entry
366 ; This is a loop that was rotated but where the blocks weren't merged. This
367 ; shouldn't perturb us.
368 define void @test7(i8* %Base, i64 %Size) nounwind ssp {
369 ; CHECK-LABEL: @test7(
370 ; CHECK-NEXT: bb.nph:
371 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false)
372 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
374 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ]
375 ; CHECK-NEXT: br label [[FOR_BODY_CONT]]
376 ; CHECK: for.body.cont:
377 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
378 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
379 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
380 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
382 ; CHECK-NEXT: ret void
384 bb.nph: ; preds = %entry
387 for.body: ; preds = %bb.nph, %for.body
388 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
389 br label %for.body.cont
391 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
392 store i8 0, i8* %I.0.014, align 1
393 %indvar.next = add i64 %indvar, 1
394 %exitcond = icmp eq i64 %indvar.next, %Size
395 br i1 %exitcond, label %for.end, label %for.body
397 for.end: ; preds = %for.body, %entry
401 ; This is a loop should not be transformed, it only executes one iteration.
402 define void @test8(i64* %Ptr, i64 %Size) nounwind ssp {
403 ; CHECK-LABEL: @test8(
404 ; CHECK-NEXT: bb.nph:
405 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
407 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
408 ; CHECK-NEXT: [[PI:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 [[INDVAR]]
409 ; CHECK-NEXT: store i64 0, i64* [[PI]], align 8
410 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
411 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 1
412 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
414 ; CHECK-NEXT: ret void
416 bb.nph: ; preds = %entry
419 for.body: ; preds = %bb.nph, %for.body
420 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
421 %PI = getelementptr i64, i64* %Ptr, i64 %indvar
422 store i64 0, i64 *%PI
423 %indvar.next = add i64 %indvar, 1
424 %exitcond = icmp eq i64 %indvar.next, 1
425 br i1 %exitcond, label %for.end, label %for.body
427 for.end: ; preds = %for.body, %entry
431 declare i8* @external(i8*)
433 ;; This cannot be transformed into a memcpy, because the read-from location is
434 ;; mutated by the loop.
435 define void @test9(i64 %Size) nounwind ssp {
436 ; CHECK-LABEL: @test9(
437 ; CHECK-NEXT: bb.nph:
438 ; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1
439 ; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1
440 ; CHECK-NEXT: [[BASEALIAS:%.*]] = call i8* @external(i8* [[BASE]])
441 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
443 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
444 ; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]]
445 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
446 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1
447 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
448 ; CHECK-NEXT: store i8 4, i8* [[BASEALIAS]], align 1
449 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
450 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
451 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
453 ; CHECK-NEXT: ret void
456 %Base = alloca i8, i32 10000
457 %Dest = alloca i8, i32 10000
459 %BaseAlias = call i8* @external(i8* %Base)
462 for.body: ; preds = %bb.nph, %for.body
463 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
464 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
465 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
466 %V = load i8, i8* %I.0.014, align 1
467 store i8 %V, i8* %DestI, align 1
469 ;; This store can clobber the input.
470 store i8 4, i8* %BaseAlias
472 %indvar.next = add i64 %indvar, 1
473 %exitcond = icmp eq i64 %indvar.next, %Size
474 br i1 %exitcond, label %for.end, label %for.body
476 for.end: ; preds = %for.body, %entry
480 ; Two dimensional nested loop should be promoted to one big memset.
481 define void @test10(i8* %X) nounwind ssp {
482 ; CHECK-LABEL: @test10(
484 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false)
485 ; CHECK-NEXT: br label [[BB_NPH:%.*]]
487 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC10:%.*]] ], [ 0, [[ENTRY:%.*]] ]
488 ; CHECK-NEXT: [[I_04:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC12:%.*]], [[FOR_INC10]] ]
489 ; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[INDVAR]], 100
490 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP0]]
491 ; CHECK-NEXT: br label [[FOR_BODY5:%.*]]
493 ; CHECK-NEXT: [[J_02:%.*]] = phi i32 [ 0, [[BB_NPH]] ], [ [[INC:%.*]], [[FOR_BODY5]] ]
494 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_04]], 100
495 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_02]], [[MUL]]
496 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
497 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]]
498 ; CHECK-NEXT: [[INC]] = add nsw i32 [[J_02]], 1
499 ; CHECK-NEXT: [[CMP4:%.*]] = icmp eq i32 [[INC]], 100
500 ; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_INC10]], label [[FOR_BODY5]]
502 ; CHECK-NEXT: [[INC12]] = add nsw i32 [[I_04]], 1
503 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC12]], 100
504 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
505 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END13:%.*]], label [[BB_NPH]]
507 ; CHECK-NEXT: ret void
512 bb.nph: ; preds = %entry, %for.inc10
513 %i.04 = phi i32 [ 0, %entry ], [ %inc12, %for.inc10 ]
516 for.body5: ; preds = %for.body5, %bb.nph
517 %j.02 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body5 ]
518 %mul = mul nsw i32 %i.04, 100
519 %add = add nsw i32 %j.02, %mul
520 %idxprom = sext i32 %add to i64
521 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom
522 store i8 0, i8* %arrayidx, align 1
523 %inc = add nsw i32 %j.02, 1
524 %cmp4 = icmp eq i32 %inc, 100
525 br i1 %cmp4, label %for.inc10, label %for.body5
527 for.inc10: ; preds = %for.body5
528 %inc12 = add nsw i32 %i.04, 1
529 %cmp = icmp eq i32 %inc12, 100
530 br i1 %cmp, label %for.end13, label %bb.nph
532 for.end13: ; preds = %for.inc10
536 ; On darwin10 (which is the triple in this .ll file) this loop can be turned
537 ; into a memset_pattern call.
539 define void @test11_pattern(i32* nocapture %P) nounwind ssp {
540 ; CHECK-LABEL: @test11_pattern(
542 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32* [[P:%.*]] to i8*
543 ; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([4 x i32]* @.memset_pattern to i8*), i64 40000)
544 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
546 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
547 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR]]
548 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
549 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
550 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
552 ; CHECK-NEXT: ret void
557 for.body: ; preds = %entry, %for.body
558 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
559 %arrayidx = getelementptr i32, i32* %P, i64 %indvar
560 store i32 1, i32* %arrayidx, align 4
561 %indvar.next = add i64 %indvar, 1
562 %exitcond = icmp eq i64 %indvar.next, 10000
563 br i1 %exitcond, label %for.end, label %for.body
565 for.end: ; preds = %for.body
569 ; Store of null should turn into memset of zero.
570 define void @test12(i32** nocapture %P) nounwind ssp {
571 ; CHECK-LABEL: @test12(
573 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8*
574 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[P1]], i8 0, i64 80000, i1 false)
575 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
577 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
578 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]]
579 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
580 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
581 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
583 ; CHECK-NEXT: ret void
588 for.body: ; preds = %entry, %for.body
589 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
590 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
591 store i32* null, i32** %arrayidx, align 4
592 %indvar.next = add i64 %indvar, 1
593 %exitcond = icmp eq i64 %indvar.next, 10000
594 br i1 %exitcond, label %for.end, label %for.body
596 for.end: ; preds = %for.body
602 ; This store-of-address loop can be turned into a memset_pattern call.
604 define void @test13_pattern(i32** nocapture %P) nounwind ssp {
605 ; CHECK-LABEL: @test13_pattern(
607 ; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8*
608 ; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([2 x i32*]* @.memset_pattern.1 to i8*), i64 80000)
609 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
611 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
612 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]]
613 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
614 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000
615 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
617 ; CHECK-NEXT: ret void
622 for.body: ; preds = %entry, %for.body
623 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
624 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
625 store i32* @G, i32** %arrayidx, align 4
626 %indvar.next = add i64 %indvar, 1
627 %exitcond = icmp eq i64 %indvar.next, 10000
628 br i1 %exitcond, label %for.end, label %for.body
630 for.end: ; preds = %for.body
636 ; PR9815 - This is a partial overlap case that cannot be safely transformed
638 @g_50 = global [7 x i32] [i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0], align 16
640 define i32 @test14() nounwind {
641 ; CHECK-LABEL: @test14(
643 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
645 ; CHECK-NEXT: [[T5:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
646 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T5]], 4
647 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
648 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM]]
649 ; CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
650 ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[T5]], 5
651 ; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64
652 ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM5]]
653 ; CHECK-NEXT: store i32 [[T2]], i32* [[ARRAYIDX6]], align 4
654 ; CHECK-NEXT: [[INC]] = add nsw i32 [[T5]], 1
655 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 2
656 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
658 ; CHECK-NEXT: [[T8:%.*]] = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4
659 ; CHECK-NEXT: ret i32 [[T8]]
664 for.body: ; preds = %for.inc, %for.body.lr.ph
665 %t5 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
666 %add = add nsw i32 %t5, 4
667 %idxprom = sext i32 %add to i64
668 %arrayidx = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom
669 %t2 = load i32, i32* %arrayidx, align 4
670 %add4 = add nsw i32 %t5, 5
671 %idxprom5 = sext i32 %add4 to i64
672 %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom5
673 store i32 %t2, i32* %arrayidx6, align 4
674 %inc = add nsw i32 %t5, 1
675 %cmp = icmp slt i32 %inc, 2
676 br i1 %cmp, label %for.body, label %for.end
678 for.end: ; preds = %for.inc
679 %t8 = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4
684 define void @PR14241(i32* %s, i64 %size) {
685 ; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught
686 ; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy
687 ; instead of a memmove. If we get the memmove transform back, this will catch
690 ; CHECK-LABEL: @PR14241(
692 ; CHECK-NEXT: [[S1:%.*]] = bitcast i32* [[S:%.*]] to i8*
693 ; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1
694 ; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 [[END_IDX]]
695 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[S]], i64 1
696 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
697 ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[SIZE]], 2
698 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], -8
699 ; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 2
700 ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw i64 [[TMP3]], 2
701 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], 4
702 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 [[S1]], i8* align 4 [[SCEVGEP2]], i64 [[TMP5]], i1 false)
703 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
705 ; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ]
706 ; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1
707 ; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR]], align 4
708 ; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 0
709 ; CHECK-NEXT: [[NEXT_PTR]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1
710 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]]
711 ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]]
713 ; CHECK-NEXT: ret void
717 %end.idx = add i64 %size, -1
718 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
722 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
723 %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
724 %val = load i32, i32* %src.ptr, align 4
725 %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
726 store i32 %val, i32* %dst.ptr, align 4
727 %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
728 %cmp = icmp eq i32* %next.ptr, %end.ptr
729 br i1 %cmp, label %exit, label %while.body
735 ; Recognize loops with a negative stride.
736 define void @test15(i32* nocapture %f) {
737 ; CHECK-LABEL: @test15(
739 ; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8*
740 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[F1]], i8 0, i64 262148, i1 false)
741 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
743 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
744 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
745 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
746 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
747 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
748 ; CHECK: for.cond.cleanup:
749 ; CHECK-NEXT: ret void
755 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ]
756 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
757 store i32 0, i32* %arrayidx, align 4
758 %indvars.iv.next = add nsw i64 %indvars.iv, -1
759 %cmp = icmp sgt i64 %indvars.iv, 0
760 br i1 %cmp, label %for.body, label %for.cond.cleanup
766 ; Loop with a negative stride. Verify an aliasing write to f[65536] prevents
767 ; the creation of a memset.
768 define void @test16(i32* nocapture %f) {
769 ; CHECK-LABEL: @test16(
771 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 65536
772 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
774 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
775 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]]
776 ; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4
777 ; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4
778 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
779 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
780 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
781 ; CHECK: for.cond.cleanup:
782 ; CHECK-NEXT: ret void
785 %arrayidx1 = getelementptr inbounds i32, i32* %f, i64 65536
788 for.body: ; preds = %entry, %for.body
789 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ]
790 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
791 store i32 0, i32* %arrayidx, align 4
792 store i32 1, i32* %arrayidx1, align 4
793 %indvars.iv.next = add nsw i64 %indvars.iv, -1
794 %cmp = icmp sgt i64 %indvars.iv, 0
795 br i1 %cmp, label %for.body, label %for.cond.cleanup
797 for.cond.cleanup: ; preds = %for.body
801 ; Handle memcpy-able loops with negative stride.
802 define noalias i32* @test17(i32* nocapture readonly %a, i32 %c) {
803 ; CHECK-LABEL: @test17(
805 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[C:%.*]] to i64
806 ; CHECK-NEXT: [[MUL:%.*]] = shl nsw i64 [[CONV]], 2
807 ; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 [[MUL]])
808 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32*
809 ; CHECK-NEXT: [[TOBOOL_9:%.*]] = icmp eq i32 [[C]], 0
810 ; CHECK-NEXT: br i1 [[TOBOOL_9]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
811 ; CHECK: while.body.preheader:
812 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[C]] to i64
813 ; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[TMP1]], 2
814 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], -4
815 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 [[C]], -1
816 ; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64
817 ; CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 2
818 ; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP3]], [[TMP6]]
819 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[CALL]], i64 [[TMP7]]
820 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP1]], -1
821 ; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[TMP8]], [[TMP5]]
822 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP9]]
823 ; CHECK-NEXT: [[SCEVGEP12:%.*]] = bitcast i32* [[SCEVGEP1]] to i8*
824 ; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[C]] to i64
825 ; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
826 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[SCEVGEP]], i8* align 4 [[SCEVGEP12]], i64 [[TMP11]], i1 false)
827 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
829 ; CHECK-NEXT: [[DEC10_IN:%.*]] = phi i32 [ [[DEC10:%.*]], [[WHILE_BODY]] ], [ [[C]], [[WHILE_BODY_PREHEADER]] ]
830 ; CHECK-NEXT: [[DEC10]] = add nsw i32 [[DEC10_IN]], -1
831 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[DEC10]] to i64
832 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IDXPROM]]
833 ; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
834 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[IDXPROM]]
835 ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[DEC10]], 0
836 ; CHECK-NEXT: br i1 [[TOBOOL]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]]
837 ; CHECK: while.end.loopexit:
838 ; CHECK-NEXT: br label [[WHILE_END]]
840 ; CHECK-NEXT: ret i32* [[TMP0]]
843 %conv = sext i32 %c to i64
844 %mul = shl nsw i64 %conv, 2
845 %call = tail call noalias i8* @malloc(i64 %mul)
846 %0 = bitcast i8* %call to i32*
847 %tobool.9 = icmp eq i32 %c, 0
848 br i1 %tobool.9, label %while.end, label %while.body.preheader
850 while.body.preheader: ; preds = %entry
853 while.body: ; preds = %while.body.preheader, %while.body
854 %dec10.in = phi i32 [ %dec10, %while.body ], [ %c, %while.body.preheader ]
855 %dec10 = add nsw i32 %dec10.in, -1
856 %idxprom = sext i32 %dec10 to i64
857 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
858 %1 = load i32, i32* %arrayidx, align 4
859 %arrayidx2 = getelementptr inbounds i32, i32* %0, i64 %idxprom
860 store i32 %1, i32* %arrayidx2, align 4
861 %tobool = icmp eq i32 %dec10, 0
862 br i1 %tobool, label %while.end.loopexit, label %while.body
864 while.end.loopexit: ; preds = %while.body
867 while.end: ; preds = %while.end.loopexit, %entry
871 declare noalias i8* @malloc(i64)
873 ; Handle memcpy-able loops with negative stride.
874 ; void test18(unsigned *__restrict__ a, unsigned *__restrict__ b) {
875 ; for (int i = 2047; i >= 0; --i) {
879 define void @test18(i32* noalias nocapture %a, i32* noalias nocapture readonly %b) #0 {
880 ; CHECK-LABEL: @test18(
882 ; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to i8*
883 ; CHECK-NEXT: [[B2:%.*]] = bitcast i32* [[B:%.*]] to i8*
884 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A1]], i8* align 4 [[B2]], i64 8192, i1 false)
885 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
887 ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 2047, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
888 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
889 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
890 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
891 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1
892 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0
893 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]]
894 ; CHECK: for.cond.cleanup:
895 ; CHECK-NEXT: ret void
900 for.body: ; preds = %entry, %for.body
901 %indvars.iv = phi i64 [ 2047, %entry ], [ %indvars.iv.next, %for.body ]
902 %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
903 %0 = load i32, i32* %arrayidx, align 4
904 %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
905 store i32 %0, i32* %arrayidx2, align 4
906 %indvars.iv.next = add nsw i64 %indvars.iv, -1
907 %cmp = icmp sgt i64 %indvars.iv, 0
908 br i1 %cmp, label %for.body, label %for.cond.cleanup
910 for.cond.cleanup: ; preds = %for.body
914 ; Two dimensional nested loop with negative stride should be promoted to one big memset.
915 define void @test19(i8* nocapture %X) {
916 ; CHECK-LABEL: @test19(
918 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false)
919 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
920 ; CHECK: for.cond1.preheader:
921 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC4:%.*]] ], [ 0, [[ENTRY:%.*]] ]
922 ; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 99, [[ENTRY]] ], [ [[DEC5:%.*]], [[FOR_INC4]] ]
923 ; CHECK-NEXT: [[TMP0:%.*]] = mul nsw i64 [[INDVAR]], -100
924 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 9900
925 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP1]]
926 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_06]], 100
927 ; CHECK-NEXT: br label [[FOR_BODY3:%.*]]
929 ; CHECK-NEXT: [[J_05:%.*]] = phi i32 [ 99, [[FOR_COND1_PREHEADER]] ], [ [[DEC:%.*]], [[FOR_BODY3]] ]
930 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_05]], [[MUL]]
931 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64
932 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]]
933 ; CHECK-NEXT: [[DEC]] = add nsw i32 [[J_05]], -1
934 ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[J_05]], 0
935 ; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_INC4]]
937 ; CHECK-NEXT: [[DEC5]] = add nsw i32 [[I_06]], -1
938 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[I_06]], 0
939 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
940 ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1_PREHEADER]], label [[FOR_END6:%.*]]
942 ; CHECK-NEXT: ret void
945 br label %for.cond1.preheader
947 for.cond1.preheader: ; preds = %entry, %for.inc4
948 %i.06 = phi i32 [ 99, %entry ], [ %dec5, %for.inc4 ]
949 %mul = mul nsw i32 %i.06, 100
952 for.body3: ; preds = %for.cond1.preheader, %for.body3
953 %j.05 = phi i32 [ 99, %for.cond1.preheader ], [ %dec, %for.body3 ]
954 %add = add nsw i32 %j.05, %mul
955 %idxprom = sext i32 %add to i64
956 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom
957 store i8 0, i8* %arrayidx, align 1
958 %dec = add nsw i32 %j.05, -1
959 %cmp2 = icmp sgt i32 %j.05, 0
960 br i1 %cmp2, label %for.body3, label %for.inc4
962 for.inc4: ; preds = %for.body3
963 %dec5 = add nsw i32 %i.06, -1
964 %cmp = icmp sgt i32 %i.06, 0
965 br i1 %cmp, label %for.cond1.preheader, label %for.end6
967 for.end6: ; preds = %for.inc4
971 ; Handle loops where the trip count is a narrow integer that needs to be
973 define void @form_memset_narrow_size(i64* %ptr, i32 %size) {
974 ; CHECK-LABEL: @form_memset_narrow_size(
976 ; CHECK-NEXT: [[PTR1:%.*]] = bitcast i64* [[PTR:%.*]] to i8*
977 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0
978 ; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]]
980 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64
981 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
982 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[PTR1]], i8 0, i64 [[TMP1]], i1 false)
983 ; CHECK-NEXT: br label [[LOOP_BODY:%.*]]
985 ; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ]
986 ; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE4]] to i64
987 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[PTR]], i64 [[IDXPROM]]
988 ; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1
989 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]]
990 ; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]]
992 ; CHECK-NEXT: br label [[EXIT]]
994 ; CHECK-NEXT: ret void
997 %cmp1 = icmp sgt i32 %size, 0
998 br i1 %cmp1, label %loop.ph, label %exit
1004 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ]
1005 %idxprom = sext i32 %storemerge4 to i64
1006 %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom
1007 store i64 0, i64* %arrayidx, align 8
1008 %inc = add nsw i32 %storemerge4, 1
1009 %cmp2 = icmp slt i32 %inc, %size
1010 br i1 %cmp2, label %loop.body, label %loop.exit
1019 define void @form_memcpy_narrow_size(i64* noalias %dst, i64* noalias %src, i32 %size) {
1020 ; CHECK-LABEL: @form_memcpy_narrow_size(
1021 ; CHECK-NEXT: entry:
1022 ; CHECK-NEXT: [[DST1:%.*]] = bitcast i64* [[DST:%.*]] to i8*
1023 ; CHECK-NEXT: [[SRC2:%.*]] = bitcast i64* [[SRC:%.*]] to i8*
1024 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0
1025 ; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]]
1027 ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64
1028 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
1029 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST1]], i8* align 8 [[SRC2]], i64 [[TMP1]], i1 false)
1030 ; CHECK-NEXT: br label [[LOOP_BODY:%.*]]
1032 ; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ]
1033 ; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[STOREMERGE4]] to i64
1034 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[SRC]], i64 [[IDXPROM1]]
1035 ; CHECK-NEXT: [[V:%.*]] = load i64, i64* [[ARRAYIDX1]], align 8
1036 ; CHECK-NEXT: [[IDXPROM2:%.*]] = sext i32 [[STOREMERGE4]] to i64
1037 ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[DST]], i64 [[IDXPROM2]]
1038 ; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1
1039 ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]]
1040 ; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]]
1042 ; CHECK-NEXT: br label [[EXIT]]
1044 ; CHECK-NEXT: ret void
1047 %cmp1 = icmp sgt i32 %size, 0
1048 br i1 %cmp1, label %loop.ph, label %exit
1054 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ]
1055 %idxprom1 = sext i32 %storemerge4 to i64
1056 %arrayidx1 = getelementptr inbounds i64, i64* %src, i64 %idxprom1
1057 %v = load i64, i64* %arrayidx1, align 8
1058 %idxprom2 = sext i32 %storemerge4 to i64
1059 %arrayidx2 = getelementptr inbounds i64, i64* %dst, i64 %idxprom2
1060 store i64 %v, i64* %arrayidx2, align 8
1061 %inc = add nsw i32 %storemerge4, 1
1062 %cmp2 = icmp slt i32 %inc, %size
1063 br i1 %cmp2, label %loop.body, label %loop.exit
1072 ;; Memmove formation.
1073 define void @PR46179_positive_stride(i8* %Src, i64 %Size) {
1074 ; CHECK-LABEL: @PR46179_positive_stride(
1075 ; CHECK-NEXT: bb.nph:
1076 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1077 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false)
1078 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1080 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1081 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1082 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1083 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1084 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1085 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1086 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1087 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1089 ; CHECK-NEXT: ret void
1094 for.body: ; preds = %bb.nph, %for.body
1095 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1096 %Step = add nuw nsw i64 %indvar, 1
1097 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1098 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1099 %V = load i8, i8* %SrcI, align 1
1100 store i8 %V, i8* %DestI, align 1
1101 %indvar.next = add i64 %indvar, 1
1102 %exitcond = icmp eq i64 %indvar.next, %Size
1103 br i1 %exitcond, label %for.end, label %for.body
1105 for.end: ; preds = %for.body, %entry
1109 ;; Memmove formation.
1110 define void @PR46179_negative_stride(i8* %Src, i64 %Size) {
1111 ; CHECK-LABEL: @PR46179_negative_stride(
1112 ; CHECK-NEXT: bb.nph:
1113 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1114 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1115 ; CHECK: for.body.preheader:
1116 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1
1117 ; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SCEVGEP]], i8* align 1 [[SRC]], i64 [[SIZE]], i1 false)
1118 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1120 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1121 ; CHECK-NEXT: [[STEP:%.*]] = add nsw i64 [[INDVAR]], -1
1122 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
1123 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1124 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1125 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1126 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]]
1128 ; CHECK-NEXT: ret void
1131 %cmp1 = icmp sgt i64 %Size, 0
1132 br i1 %cmp1, label %for.body, label %for.end
1134 for.body: ; preds = %bb.nph, %.for.body
1135 %indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ]
1136 %Step = add nsw i64 %indvar, -1
1137 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1138 %V = load i8, i8* %SrcI, align 1
1139 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1140 store i8 %V, i8* %DestI, align 1
1141 %exitcond = icmp sgt i64 %indvar, 1
1142 br i1 %exitcond, label %for.body, label %for.end
1144 for.end: ; preds = %.for.body, %bb.nph
1148 ;; Do not form memmove from previous store when stride is positive.
1149 define void @do_not_form_memmove1(i8* %Src, i64 %Size) {
1150 ; CHECK-LABEL: @do_not_form_memmove1(
1151 ; CHECK-NEXT: bb.nph:
1152 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1154 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 1, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1155 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], -1
1156 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]]
1157 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1158 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1159 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
1160 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1161 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1162 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1164 ; CHECK-NEXT: ret void
1169 for.body: ; preds = %bb.nph, %for.body
1170 %indvar = phi i64 [ 1, %bb.nph ], [ %indvar.next, %for.body ]
1171 %Step = add nuw nsw i64 %indvar, -1
1172 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1173 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1174 %V = load i8, i8* %SrcI, align 1
1175 store i8 %V, i8* %DestI, align 1
1176 %indvar.next = add i64 %indvar, 1
1177 %exitcond = icmp eq i64 %indvar.next, %Size
1178 br i1 %exitcond, label %for.end, label %for.body
1180 for.end: ; preds = %for.body, %entry
1184 ;; Do not form memmove from next store when stride is negative.
1185 define void @do_not_form_memmove2(i8* %Src, i64 %Size) {
1186 ; CHECK-LABEL: @do_not_form_memmove2(
1187 ; CHECK-NEXT: bb.nph:
1188 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0
1189 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
1190 ; CHECK: for.body.preheader:
1191 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1193 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ]
1194 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1195 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]]
1196 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1197 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]]
1198 ; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1
1199 ; CHECK-NEXT: [[INDVAR_NEXT:%.*]] = add nsw i64 [[INDVAR]], -1
1200 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1
1201 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END]]
1203 ; CHECK-NEXT: ret void
1206 %cmp1 = icmp sgt i64 %Size, 0
1207 br i1 %cmp1, label %for.body, label %for.end
1209 for.body: ; preds = %bb.nph, %.for.body
1210 %indvar = phi i64 [ %indvar.next, %for.body ], [ %Size, %bb.nph ]
1211 %Step = add nuw nsw i64 %indvar, 1
1212 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step
1213 %V = load i8, i8* %SrcI, align 1
1214 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar
1215 store i8 %V, i8* %DestI, align 1
1216 %indvar.next = add nsw i64 %indvar, -1
1217 %exitcond = icmp sgt i64 %indvar, 1
1218 br i1 %exitcond, label %for.body, label %for.end
1220 for.end: ; preds = %.for.body, %bb.nph
1224 ;; Do not form memmove when underaligned load is overlapped with store.
1225 define void @do_not_form_memmove3(i32* %s, i64 %size) {
1226 ; CHECK-LABEL: @do_not_form_memmove3(
1227 ; CHECK-NEXT: entry:
1228 ; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1
1229 ; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 [[END_IDX]]
1230 ; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
1231 ; CHECK: while.body:
1232 ; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ]
1233 ; CHECK-NEXT: [[NEXT:%.*]] = bitcast i32* [[PHI_PTR]] to i16*
1234 ; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr i16, i16* [[NEXT]], i64 1
1235 ; CHECK-NEXT: [[SRC_PTR2:%.*]] = bitcast i16* [[SRC_PTR]] to i32*
1236 ; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR2]], align 2
1237 ; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr i32, i32* [[PHI_PTR]], i64 0
1238 ; CHECK-NEXT: store i32 [[VAL]], i32* [[DST_PTR]], align 4
1239 ; CHECK-NEXT: [[NEXT_PTR]] = getelementptr i32, i32* [[PHI_PTR]], i64 1
1240 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]]
1241 ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]]
1243 ; CHECK-NEXT: ret void
1246 %end.idx = add i64 %size, -1
1247 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
1248 br label %while.body
1251 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
1252 %next = bitcast i32* %phi.ptr to i16*
1253 %src.ptr = getelementptr i16, i16* %next, i64 1
1254 %src.ptr2 = bitcast i16* %src.ptr to i32*
1255 ; below underaligned load is overlapped with store.
1256 %val = load i32, i32* %src.ptr2, align 2
1257 %dst.ptr = getelementptr i32, i32* %phi.ptr, i64 0
1258 store i32 %val, i32* %dst.ptr, align 4
1259 %next.ptr = getelementptr i32, i32* %phi.ptr, i64 1
1260 %cmp = icmp eq i32* %next.ptr, %end.ptr
1261 br i1 %cmp, label %exit, label %while.body
1267 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg)
1269 ;; FIXME: Do not form memmove from loop body containing memcpy.
1270 define void @do_not_form_memmove4(i8* %Src, i64 %Size) {
1271 ; CHECK-LABEL: @do_not_form_memmove4(
1272 ; CHECK-NEXT: bb.nph:
1273 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1275 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1276 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1
1277 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1278 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]]
1279 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false)
1280 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1281 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]]
1282 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1284 ; CHECK-NEXT: ret void
1289 for.body: ; preds = %bb.nph, %for.body
1290 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1291 %Step = add nuw nsw i64 %indvar, 1
1292 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1293 %DestI = getelementptr i8, i8* %Src, i64 %indvar
1294 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false)
1295 %indvar.next = add i64 %indvar, 1
1296 %exitcond = icmp eq i64 %indvar.next, %Size
1297 br i1 %exitcond, label %for.end, label %for.body
1299 for.end: ; preds = %for.body, %entry
1303 ;; Memcpy formation is still preferred over memmove.
1304 define void @prefer_memcpy_over_memmove(i8* noalias %Src, i8* noalias %Dest, i64 %Size) {
1305 ; CHECK-LABEL: @prefer_memcpy_over_memmove(
1306 ; CHECK-NEXT: bb.nph:
1307 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 42
1308 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST:%.*]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false)
1309 ; CHECK-NEXT: br label [[FOR_BODY:%.*]]
1311 ; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ]
1312 ; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 42
1313 ; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]]
1314 ; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]]
1315 ; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1
1316 ; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
1317 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]]
1318 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
1320 ; CHECK-NEXT: ret void
1325 for.body: ; preds = %bb.nph, %for.body
1326 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
1327 %Step = add nuw nsw i64 %indvar, 42
1328 %SrcI = getelementptr i8, i8* %Src, i64 %Step
1329 %DestI = getelementptr i8, i8* %Dest, i64 %indvar
1330 %V = load i8, i8* %SrcI, align 1
1331 store i8 %V, i8* %DestI, align 1
1332 %indvar.next = add i64 %indvar, 1
1333 %exitcond = icmp eq i64 %indvar.next, %Size
1334 br i1 %exitcond, label %for.end, label %for.body
1336 for.end: ; preds = %for.body, %entry
1340 ; Validate that "memset_pattern" has the proper attributes.
1341 ; CHECK: declare void @memset_pattern16(i8* nocapture writeonly, i8* nocapture readonly, i64) [[ATTRS:#[0-9]+]]
1342 ; CHECK: [[ATTRS]] = { argmemonly nofree }