1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -data-layout="e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-n8:16:32" -basic-aa -gvn -S -dce | FileCheck %s --check-prefixes=CHECK,LE
3 ; RUN: opt < %s -data-layout="E-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-n32" -basic-aa -gvn -S -dce | FileCheck %s --check-prefixes=CHECK,BE
6 define i32 @test0(i32 %V, i32* %P) {
8 ; CHECK-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
9 ; CHECK-NEXT: ret i32 [[V]]
13 %A = load i32, i32* %P
18 ;;===----------------------------------------------------------------------===;;
20 ;;===----------------------------------------------------------------------===;;
23 define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
24 ; CHECK-LABEL: @crash0(
25 ; CHECK-NEXT: store { i32, i32 } [[A:%.*]], { i32, i32 }* [[P:%.*]], align 4
26 ; CHECK-NEXT: [[X:%.*]] = bitcast { i32, i32 }* [[P]] to i8*
27 ; CHECK-NEXT: [[Y:%.*]] = load i8, i8* [[X]], align 1
28 ; CHECK-NEXT: ret i8 [[Y]]
30 store {i32, i32} %A, {i32, i32}* %P
31 %X = bitcast {i32, i32}* %P to i8*
36 ;; No PR filed, crashed in CaptureTracker.
37 declare void @helper()
38 define void @crash1() {
39 ; CHECK-LABEL: @crash1(
40 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* undef, i64 undef, i1 false) #[[ATTR3:[0-9]+]]
41 ; CHECK-NEXT: ret void
43 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* undef, i64 undef, i1 false) nounwind
44 %ttmp = load i8, i8* bitcast (void ()* @helper to i8*)
45 %x = icmp eq i8 %ttmp, 15
50 ;;===----------------------------------------------------------------------===;;
51 ;; Store -> Load and Load -> Load forwarding where src and dst are different
52 ;; types, but where the base pointer is a must alias.
53 ;;===----------------------------------------------------------------------===;;
55 ;; i32 -> f32 forwarding.
56 define float @coerce_mustalias1(i32 %V, i32* %P) {
57 ; CHECK-LABEL: @coerce_mustalias1(
58 ; CHECK-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
59 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[V]] to float
60 ; CHECK-NEXT: ret float [[TMP1]]
64 %P2 = bitcast i32* %P to float*
66 %A = load float, float* %P2
70 ;; i32* -> float forwarding.
71 define float @coerce_mustalias2(i32* %V, i32** %P) {
72 ; CHECK-LABEL: @coerce_mustalias2(
73 ; CHECK-NEXT: store i32* [[V:%.*]], i32** [[P:%.*]], align 4
74 ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i32* [[V]] to i32
75 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
76 ; CHECK-NEXT: ret float [[TMP2]]
78 store i32* %V, i32** %P
80 %P2 = bitcast i32** %P to float*
82 %A = load float, float* %P2
86 ;; float -> i32* forwarding.
87 define i32* @coerce_mustalias3(float %V, float* %P) {
88 ; CHECK-LABEL: @coerce_mustalias3(
89 ; CHECK-NEXT: store float [[V:%.*]], float* [[P:%.*]], align 4
90 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[V]] to i32
91 ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to i32*
92 ; CHECK-NEXT: ret i32* [[TMP2]]
94 store float %V, float* %P
96 %P2 = bitcast float* %P to i32**
98 %A = load i32*, i32** %P2
102 ;; i32 -> f32 load forwarding.
103 define float @coerce_mustalias4(i32* %P, i1 %cond) {
104 ; CHECK-LABEL: @coerce_mustalias4(
105 ; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[P:%.*]], align 4
106 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[A]] to float
107 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
109 ; CHECK-NEXT: ret float [[TMP1]]
111 ; CHECK-NEXT: ret float [[TMP1]]
113 %A = load i32, i32* %P
115 %P2 = bitcast i32* %P to float*
116 %B = load float, float* %P2
117 br i1 %cond, label %T, label %F
122 %X = bitcast i32 %A to float
127 ;; i32 -> i8 forwarding
128 define i8 @coerce_mustalias5(i32 %V, i32* %P) {
129 ; LE-LABEL: @coerce_mustalias5(
130 ; LE-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
131 ; LE-NEXT: [[TMP1:%.*]] = trunc i32 [[V]] to i8
132 ; LE-NEXT: ret i8 [[TMP1]]
134 ; BE-LABEL: @coerce_mustalias5(
135 ; BE-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
136 ; BE-NEXT: [[TMP1:%.*]] = lshr i32 [[V]], 24
137 ; BE-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
138 ; BE-NEXT: ret i8 [[TMP2]]
140 store i32 %V, i32* %P
142 %P2 = bitcast i32* %P to i8*
144 %A = load i8, i8* %P2
148 ;; i64 -> float forwarding
149 define float @coerce_mustalias6(i64 %V, i64* %P) {
150 ; LE-LABEL: @coerce_mustalias6(
151 ; LE-NEXT: store i64 [[V:%.*]], i64* [[P:%.*]], align 4
152 ; LE-NEXT: [[TMP1:%.*]] = trunc i64 [[V]] to i32
153 ; LE-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
154 ; LE-NEXT: ret float [[TMP2]]
156 ; BE-LABEL: @coerce_mustalias6(
157 ; BE-NEXT: store i64 [[V:%.*]], i64* [[P:%.*]], align 4
158 ; BE-NEXT: [[TMP1:%.*]] = lshr i64 [[V]], 32
159 ; BE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
160 ; BE-NEXT: [[TMP3:%.*]] = bitcast i32 [[TMP2]] to float
161 ; BE-NEXT: ret float [[TMP3]]
163 store i64 %V, i64* %P
165 %P2 = bitcast i64* %P to float*
167 %A = load float, float* %P2
171 ;; i64 -> i8* (32-bit) forwarding
172 define i8* @coerce_mustalias7(i64 %V, i64* %P) {
173 ; LE-LABEL: @coerce_mustalias7(
174 ; LE-NEXT: store i64 [[V:%.*]], i64* [[P:%.*]], align 4
175 ; LE-NEXT: [[TMP1:%.*]] = trunc i64 [[V]] to i32
176 ; LE-NEXT: [[TMP2:%.*]] = inttoptr i32 [[TMP1]] to i8*
177 ; LE-NEXT: ret i8* [[TMP2]]
179 ; BE-LABEL: @coerce_mustalias7(
180 ; BE-NEXT: store i64 [[V:%.*]], i64* [[P:%.*]], align 4
181 ; BE-NEXT: [[TMP1:%.*]] = lshr i64 [[V]], 32
182 ; BE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
183 ; BE-NEXT: [[TMP3:%.*]] = inttoptr i32 [[TMP2]] to i8*
184 ; BE-NEXT: ret i8* [[TMP3]]
186 store i64 %V, i64* %P
188 %P2 = bitcast i64* %P to i8**
190 %A = load i8*, i8** %P2
194 ; memset -> i16 forwarding.
195 define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
196 ; CHECK-LABEL: @memset_to_i16_local(
198 ; CHECK-NEXT: [[CONV:%.*]] = bitcast i16* [[A:%.*]] to i8*
199 ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[CONV]], i8 1, i64 200, i1 false)
200 ; CHECK-NEXT: ret i16 257
203 %conv = bitcast i16* %A to i8*
204 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i1 false)
205 %arrayidx = getelementptr inbounds i16, i16* %A, i64 42
206 %ttmp2 = load i16, i16* %arrayidx
210 ; memset -> float forwarding.
211 define float @memset_to_float_local(float* %A, i8 %Val) nounwind ssp {
212 ; CHECK-LABEL: @memset_to_float_local(
214 ; CHECK-NEXT: [[CONV:%.*]] = bitcast float* [[A:%.*]] to i8*
215 ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[CONV]], i8 [[VAL:%.*]], i64 400, i1 false)
216 ; CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[VAL]] to i32
217 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 8
218 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP0]], [[TMP1]]
219 ; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[TMP2]], 16
220 ; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP2]], [[TMP3]]
221 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i32 [[TMP4]] to float
222 ; CHECK-NEXT: ret float [[TMP5]]
225 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
226 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i1 false)
227 %arrayidx = getelementptr inbounds float, float* %A, i64 42 ; <float*> [#uses=1]
228 %ttmp2 = load float, float* %arrayidx ; <float> [#uses=1]
232 ;; non-local memset -> i16 load forwarding.
233 define i16 @memset_to_i16_nonlocal0(i16* %P, i1 %cond) {
234 ; CHECK-LABEL: @memset_to_i16_nonlocal0(
235 ; CHECK-NEXT: [[P3:%.*]] = bitcast i16* [[P:%.*]] to i8*
236 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
238 ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[P3]], i8 1, i64 400, i1 false)
239 ; CHECK-NEXT: br label [[CONT:%.*]]
241 ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* [[P3]], i8 2, i64 400, i1 false)
242 ; CHECK-NEXT: br label [[CONT]]
244 ; CHECK-NEXT: [[A:%.*]] = phi i16 [ 514, [[F]] ], [ 257, [[T]] ]
245 ; CHECK-NEXT: ret i16 [[A]]
247 %P3 = bitcast i16* %P to i8*
248 br i1 %cond, label %T, label %F
250 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 1, i64 400, i1 false)
254 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 2, i64 400, i1 false)
258 %P2 = getelementptr i16, i16* %P, i32 4
259 %A = load i16, i16* %P2
264 @GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
265 @GCst_as1 = addrspace(1) constant {i32, float, i32 } { i32 42, float 14., i32 97 }
267 ; memset -> float forwarding.
268 define float @memcpy_to_float_local(float* %A) nounwind ssp {
269 ; CHECK-LABEL: @memcpy_to_float_local(
271 ; CHECK-NEXT: [[CONV:%.*]] = bitcast float* [[A:%.*]] to i8*
272 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[CONV]], i8* bitcast ({ i32, float, i32 }* @GCst to i8*), i64 12, i1 false)
273 ; CHECK-NEXT: ret float 1.400000e+01
276 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
277 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i1 false)
278 %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
279 %ttmp2 = load float, float* %arrayidx ; <float> [#uses=1]
283 ; memcpy from address space 1
284 define float @memcpy_to_float_local_as1(float* %A) nounwind ssp {
285 ; CHECK-LABEL: @memcpy_to_float_local_as1(
287 ; CHECK-NEXT: [[CONV:%.*]] = bitcast float* [[A:%.*]] to i8*
288 ; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* [[CONV]], i8 addrspace(1)* bitcast ({ i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i1 false)
289 ; CHECK-NEXT: ret float 1.400000e+01
292 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
293 tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i1 false)
294 %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
295 %ttmp2 = load float, float* %arrayidx ; <float> [#uses=1]
299 ;; non-local i32/float -> i8 load forwarding.
300 define i8 @coerce_mustalias_nonlocal0(i32* %P, i1 %cond) {
301 ; LE-LABEL: @coerce_mustalias_nonlocal0(
302 ; LE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
303 ; LE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
305 ; LE-NEXT: store i32 42, i32* [[P]], align 4
306 ; LE-NEXT: br label [[CONT:%.*]]
308 ; LE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
309 ; LE-NEXT: br label [[CONT]]
311 ; LE-NEXT: [[A:%.*]] = phi i8 [ 0, [[F]] ], [ 42, [[T]] ]
312 ; LE-NEXT: ret i8 [[A]]
314 ; BE-LABEL: @coerce_mustalias_nonlocal0(
315 ; BE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
316 ; BE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
318 ; BE-NEXT: store i32 42, i32* [[P]], align 4
319 ; BE-NEXT: br label [[CONT:%.*]]
321 ; BE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
322 ; BE-NEXT: br label [[CONT]]
324 ; BE-NEXT: [[A:%.*]] = phi i8 [ 63, [[F]] ], [ 0, [[T]] ]
325 ; BE-NEXT: ret i8 [[A]]
327 %P2 = bitcast i32* %P to float*
328 %P3 = bitcast i32* %P to i8*
329 br i1 %cond, label %T, label %F
331 store i32 42, i32* %P
335 store float 1.0, float* %P2
339 %A = load i8, i8* %P3
345 ;; non-local i32/float -> i8 load forwarding. This also tests that the "P3"
346 ;; bitcast equivalence can be properly phi translated.
347 define i8 @coerce_mustalias_nonlocal1(i32* %P, i1 %cond) {
348 ; LE-LABEL: @coerce_mustalias_nonlocal1(
349 ; LE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
350 ; LE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
352 ; LE-NEXT: store i32 42, i32* [[P]], align 4
353 ; LE-NEXT: br label [[CONT:%.*]]
355 ; LE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
356 ; LE-NEXT: br label [[CONT]]
358 ; LE-NEXT: [[A:%.*]] = phi i8 [ 0, [[F]] ], [ 42, [[T]] ]
359 ; LE-NEXT: ret i8 [[A]]
361 ; BE-LABEL: @coerce_mustalias_nonlocal1(
362 ; BE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
363 ; BE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
365 ; BE-NEXT: store i32 42, i32* [[P]], align 4
366 ; BE-NEXT: br label [[CONT:%.*]]
368 ; BE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
369 ; BE-NEXT: br label [[CONT]]
371 ; BE-NEXT: [[A:%.*]] = phi i8 [ 63, [[F]] ], [ 0, [[T]] ]
372 ; BE-NEXT: ret i8 [[A]]
374 %P2 = bitcast i32* %P to float*
375 br i1 %cond, label %T, label %F
377 store i32 42, i32* %P
381 store float 1.0, float* %P2
385 %P3 = bitcast i32* %P to i8*
386 %A = load i8, i8* %P3
392 ;; non-local i32 -> i8 partial redundancy load forwarding.
393 define i8 @coerce_mustalias_pre0(i32* %P, i1 %cond) {
394 ; LE-LABEL: @coerce_mustalias_pre0(
395 ; LE-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
396 ; LE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
398 ; LE-NEXT: store i32 42, i32* [[P]], align 4
399 ; LE-NEXT: br label [[CONT:%.*]]
401 ; LE-NEXT: [[A_PRE:%.*]] = load i8, i8* [[P3]], align 1
402 ; LE-NEXT: br label [[CONT]]
404 ; LE-NEXT: [[A:%.*]] = phi i8 [ [[A_PRE]], [[F]] ], [ 42, [[T]] ]
405 ; LE-NEXT: ret i8 [[A]]
407 ; BE-LABEL: @coerce_mustalias_pre0(
408 ; BE-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
409 ; BE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
411 ; BE-NEXT: store i32 42, i32* [[P]], align 4
412 ; BE-NEXT: br label [[CONT:%.*]]
414 ; BE-NEXT: [[A_PRE:%.*]] = load i8, i8* [[P3]], align 1
415 ; BE-NEXT: br label [[CONT]]
417 ; BE-NEXT: [[A:%.*]] = phi i8 [ [[A_PRE]], [[F]] ], [ 0, [[T]] ]
418 ; BE-NEXT: ret i8 [[A]]
420 %P3 = bitcast i32* %P to i8*
421 br i1 %cond, label %T, label %F
423 store i32 42, i32* %P
430 %A = load i8, i8* %P3
435 ;;===----------------------------------------------------------------------===;;
436 ;; Store -> Load and Load -> Load forwarding where src and dst are different
437 ;; types, and the reload is an offset from the store pointer.
438 ;;===----------------------------------------------------------------------===;;
440 ;; i32 -> i8 forwarding.
442 define i8 @coerce_offset0(i32 %V, i32* %P) {
443 ; LE-LABEL: @coerce_offset0(
444 ; LE-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
445 ; LE-NEXT: [[TMP1:%.*]] = lshr i32 [[V]], 16
446 ; LE-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
447 ; LE-NEXT: ret i8 [[TMP2]]
449 ; BE-LABEL: @coerce_offset0(
450 ; BE-NEXT: store i32 [[V:%.*]], i32* [[P:%.*]], align 4
451 ; BE-NEXT: [[TMP1:%.*]] = lshr i32 [[V]], 8
452 ; BE-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
453 ; BE-NEXT: ret i8 [[TMP2]]
455 store i32 %V, i32* %P
457 %P2 = bitcast i32* %P to i8*
458 %P3 = getelementptr i8, i8* %P2, i32 2
460 %A = load i8, i8* %P3
464 ;; non-local i32/float -> i8 load forwarding.
465 define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
466 ; LE-LABEL: @coerce_offset_nonlocal0(
467 ; LE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
468 ; LE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
470 ; LE-NEXT: store i32 57005, i32* [[P]], align 4
471 ; LE-NEXT: br label [[CONT:%.*]]
473 ; LE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
474 ; LE-NEXT: br label [[CONT]]
476 ; LE-NEXT: [[A:%.*]] = phi i8 [ -128, [[F]] ], [ 0, [[T]] ]
477 ; LE-NEXT: ret i8 [[A]]
479 ; BE-LABEL: @coerce_offset_nonlocal0(
480 ; BE-NEXT: [[P2:%.*]] = bitcast i32* [[P:%.*]] to float*
481 ; BE-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
483 ; BE-NEXT: store i32 57005, i32* [[P]], align 4
484 ; BE-NEXT: br label [[CONT:%.*]]
486 ; BE-NEXT: store float 1.000000e+00, float* [[P2]], align 4
487 ; BE-NEXT: br label [[CONT]]
489 ; BE-NEXT: [[A:%.*]] = phi i8 [ 0, [[F]] ], [ -34, [[T]] ]
490 ; BE-NEXT: ret i8 [[A]]
492 %P2 = bitcast i32* %P to float*
493 %P3 = bitcast i32* %P to i8*
494 %P4 = getelementptr i8, i8* %P3, i32 2
495 br i1 %cond, label %T, label %F
497 store i32 57005, i32* %P
501 store float 1.0, float* %P2
505 %A = load i8, i8* %P4
511 ;; non-local i32 -> i8 partial redundancy load forwarding.
512 define i8 @coerce_offset_pre0(i32* %P, i1 %cond) {
513 ; CHECK-LABEL: @coerce_offset_pre0(
514 ; CHECK-NEXT: [[P3:%.*]] = bitcast i32* [[P:%.*]] to i8*
515 ; CHECK-NEXT: [[P4:%.*]] = getelementptr i8, i8* [[P3]], i32 2
516 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
518 ; CHECK-NEXT: store i32 42, i32* [[P]], align 4
519 ; CHECK-NEXT: br label [[CONT:%.*]]
521 ; CHECK-NEXT: [[A_PRE:%.*]] = load i8, i8* [[P4]], align 1
522 ; CHECK-NEXT: br label [[CONT]]
524 ; CHECK-NEXT: [[A:%.*]] = phi i8 [ [[A_PRE]], [[F]] ], [ 0, [[T]] ]
525 ; CHECK-NEXT: ret i8 [[A]]
527 %P3 = bitcast i32* %P to i8*
528 %P4 = getelementptr i8, i8* %P3, i32 2
529 br i1 %cond, label %T, label %F
531 store i32 42, i32* %P
538 %A = load i8, i8* %P4
543 define i32 @chained_load(i32** %p, i32 %x, i32 %y) {
544 ; CHECK-LABEL: @chained_load(
545 ; CHECK-NEXT: block1:
546 ; CHECK-NEXT: [[A:%.*]] = alloca i32*, align 4
547 ; CHECK-NEXT: [[Z:%.*]] = load i32*, i32** [[P:%.*]], align 4
548 ; CHECK-NEXT: store i32* [[Z]], i32** [[A]], align 4
549 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
550 ; CHECK-NEXT: br i1 [[CMP]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
552 ; CHECK-NEXT: br label [[BLOCK4:%.*]]
554 ; CHECK-NEXT: br label [[BLOCK4]]
556 ; CHECK-NEXT: [[D:%.*]] = load i32, i32* [[Z]], align 4
557 ; CHECK-NEXT: ret i32 [[D]]
562 %z = load i32*, i32** %p
563 store i32* %z, i32** %A
564 %cmp = icmp eq i32 %x, %y
565 br i1 %cmp, label %block2, label %block3
568 %a = load i32*, i32** %p
572 %b = load i32*, i32** %p
576 %c = load i32*, i32** %p
577 %d = load i32, i32* %c
583 declare i1 @cond() readonly
584 declare i1 @cond2() readonly
586 define i32 @phi_trans2() {
587 ; CHECK-LABEL: @phi_trans2(
589 ; CHECK-NEXT: [[P:%.*]] = alloca i32, i32 400, align 4
590 ; CHECK-NEXT: br label [[F1:%.*]]
592 ; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ 2, [[F:%.*]] ]
593 ; CHECK-NEXT: [[COND2:%.*]] = call i1 @cond()
594 ; CHECK-NEXT: br i1 [[COND2]], label [[T1:%.*]], label [[TY:%.*]]
596 ; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, i32* [[P]], i32 [[A]]
597 ; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[P2]], align 4
598 ; CHECK-NEXT: [[COND:%.*]] = call i1 @cond2()
599 ; CHECK-NEXT: br i1 [[COND]], label [[TX:%.*]], label [[F]]
601 ; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, i32* [[P]], i32 2
602 ; CHECK-NEXT: store i32 17, i32* [[P3]], align 4
603 ; CHECK-NEXT: store i32 42, i32* [[P2]], align 4
604 ; CHECK-NEXT: br label [[F1]]
606 ; CHECK-NEXT: ret i32 [[X]]
608 ; CHECK-NEXT: ret i32 0
611 %P = alloca i32, i32 400
615 %A = phi i32 [1, %entry], [2, %F]
616 %cond2 = call i1 @cond()
617 br i1 %cond2, label %T1, label %TY
620 %P2 = getelementptr i32, i32* %P, i32 %A
621 %x = load i32, i32* %P2
622 %cond = call i1 @cond2()
623 br i1 %cond, label %TX, label %F
626 %P3 = getelementptr i32, i32* %P, i32 2
627 store i32 17, i32* %P3
629 store i32 42, i32* %P2 ; Provides "P[A]".
633 ; This load should not be compiled to 'ret i32 42'. An overly clever
634 ; implementation of GVN would see that we're returning 17 if the loop
635 ; executes once or 42 if it executes more than that, but we'd have to do
636 ; loop restructuring to expose this, and GVN shouldn't do this sort of CFG
644 define i32 @phi_trans3(i32* %p, i32 %x, i32 %y, i32 %z) {
645 ; CHECK-LABEL: @phi_trans3(
646 ; CHECK-NEXT: block1:
647 ; CHECK-NEXT: [[CMPXY:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
648 ; CHECK-NEXT: br i1 [[CMPXY]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
650 ; CHECK-NEXT: store i32 87, i32* [[P:%.*]], align 4
651 ; CHECK-NEXT: br label [[BLOCK4:%.*]]
653 ; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, i32* [[P]], i32 43
654 ; CHECK-NEXT: store i32 97, i32* [[P2]], align 4
655 ; CHECK-NEXT: br label [[BLOCK4]]
657 ; CHECK-NEXT: [[D:%.*]] = phi i32 [ 87, [[BLOCK2]] ], [ 97, [[BLOCK3]] ]
658 ; CHECK-NEXT: br i1 [[CMPXY]], label [[BLOCK5:%.*]], label [[EXIT:%.*]]
660 ; CHECK-NEXT: br i1 true, label [[BLOCK6:%.*]], label [[BLOCK5_EXIT_CRIT_EDGE:%.*]]
661 ; CHECK: block5.exit_crit_edge:
662 ; CHECK-NEXT: br label [[EXIT]]
664 ; CHECK-NEXT: br i1 true, label [[BLOCK7:%.*]], label [[BLOCK6_EXIT_CRIT_EDGE:%.*]]
665 ; CHECK: block6.exit_crit_edge:
666 ; CHECK-NEXT: br label [[EXIT]]
668 ; CHECK-NEXT: ret i32 [[D]]
670 ; CHECK-NEXT: ret i32 -1
673 %cmpxy = icmp eq i32 %x, %y
674 br i1 %cmpxy, label %block2, label %block3
677 store i32 87, i32* %p
681 %p2 = getelementptr i32, i32* %p, i32 43
682 store i32 97, i32* %p2
686 %A = phi i32 [-1, %block2], [42, %block3]
687 br i1 %cmpxy, label %block5, label %exit
692 br i1 %cmpxy, label %block6, label %exit
695 %C = getelementptr i32, i32* %p, i32 %B
696 br i1 %cmpxy, label %block7, label %exit
699 %D = load i32, i32* %C
707 define i8 @phi_trans4(i8* %p) {
708 ; CHECK-LABEL: @phi_trans4(
710 ; CHECK-NEXT: [[X3:%.*]] = getelementptr i8, i8* [[P:%.*]], i32 192
711 ; CHECK-NEXT: store i8 -64, i8* [[X3]], align 1
712 ; CHECK-NEXT: [[X:%.*]] = getelementptr i8, i8* [[P]], i32 4
713 ; CHECK-NEXT: [[Y:%.*]] = load i8, i8* [[X]], align 1
714 ; CHECK-NEXT: br label [[LOOP:%.*]]
716 ; CHECK-NEXT: [[Y2:%.*]] = phi i8 [ [[Y]], [[ENTRY:%.*]] ], [ 0, [[LOOP]] ]
717 ; CHECK-NEXT: [[COND:%.*]] = call i1 @cond2()
718 ; CHECK-NEXT: [[Z:%.*]] = bitcast i8* [[X3]] to i32*
719 ; CHECK-NEXT: store i32 0, i32* [[Z]], align 4
720 ; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[OUT:%.*]]
722 ; CHECK-NEXT: [[R:%.*]] = add i8 [[Y]], [[Y2]]
723 ; CHECK-NEXT: ret i8 [[R]]
726 %X3 = getelementptr i8, i8* %p, i32 192
727 store i8 192, i8* %X3
729 %X = getelementptr i8, i8* %p, i32 4
734 %i = phi i32 [4, %entry], [192, %loop]
735 %X2 = getelementptr i8, i8* %p, i32 %i
736 %Y2 = load i8, i8* %X2
739 %cond = call i1 @cond2()
741 %Z = bitcast i8 *%X3 to i32*
743 br i1 %cond, label %loop, label %out
750 define i8 @phi_trans5(i8* %p) {
751 ; CHECK-LABEL: @phi_trans5(
753 ; CHECK-NEXT: [[X4:%.*]] = getelementptr i8, i8* [[P:%.*]], i32 2
754 ; CHECK-NEXT: store i8 19, i8* [[X4]], align 1
755 ; CHECK-NEXT: [[X:%.*]] = getelementptr i8, i8* [[P]], i32 4
756 ; CHECK-NEXT: [[Y:%.*]] = load i8, i8* [[X]], align 1
757 ; CHECK-NEXT: br label [[LOOP:%.*]]
759 ; CHECK-NEXT: [[Y2:%.*]] = phi i8 [ [[Y]], [[ENTRY:%.*]] ], [ [[Y2_PRE:%.*]], [[CONT:%.*]] ]
760 ; CHECK-NEXT: [[I:%.*]] = phi i32 [ 4, [[ENTRY]] ], [ 3, [[CONT]] ]
761 ; CHECK-NEXT: [[X2:%.*]] = getelementptr i8, i8* [[P]], i32 [[I]]
762 ; CHECK-NEXT: [[COND:%.*]] = call i1 @cond2()
763 ; CHECK-NEXT: br i1 [[COND]], label [[CONT]], label [[OUT:%.*]]
765 ; CHECK-NEXT: [[Z:%.*]] = getelementptr i8, i8* [[X2]], i32 -1
766 ; CHECK-NEXT: [[Z2:%.*]] = bitcast i8* [[Z]] to i32*
767 ; CHECK-NEXT: store i32 50462976, i32* [[Z2]], align 4
768 ; CHECK-NEXT: [[X2_PHI_TRANS_INSERT:%.*]] = getelementptr i8, i8* [[P]], i32 3
769 ; CHECK-NEXT: [[Y2_PRE]] = load i8, i8* [[X2_PHI_TRANS_INSERT]], align 1
770 ; CHECK-NEXT: br label [[LOOP]]
772 ; CHECK-NEXT: [[R:%.*]] = add i8 [[Y]], [[Y2]]
773 ; CHECK-NEXT: ret i8 [[R]]
777 %X4 = getelementptr i8, i8* %p, i32 2
780 %X = getelementptr i8, i8* %p, i32 4
785 %i = phi i32 [4, %entry], [3, %cont]
786 %X2 = getelementptr i8, i8* %p, i32 %i
787 %Y2 = load i8, i8* %X2 ; Ensure this load is not being incorrectly replaced.
788 %cond = call i1 @cond2()
789 br i1 %cond, label %cont, label %out
792 %Z = getelementptr i8, i8* %X2, i32 -1
793 %Z2 = bitcast i8 *%Z to i32*
794 store i32 50462976, i32* %Z2 ;; (1 << 8) | (2 << 16) | (3 << 24)
804 declare void @use_i32(i32) readonly
806 ; indirectbr currently prevents MergeBlockIntoPredecessor from merging latch
807 ; into header. Make sure we translate the address for %l1 correctly where
808 ; parts of the address computations are in different basic blocks.
809 define i32 @phi_trans6(i32* noalias nocapture readonly %x, i1 %cond) {
810 ; CHECK-LABEL: @phi_trans6(
812 ; CHECK-NEXT: [[L0:%.*]] = load i32, i32* [[X:%.*]], align 4
813 ; CHECK-NEXT: call void @use_i32(i32 [[L0]])
814 ; CHECK-NEXT: br label [[HEADER:%.*]]
816 ; CHECK-NEXT: [[L1:%.*]] = phi i32 [ [[L0]], [[ENTRY:%.*]] ], [ [[L1_PRE:%.*]], [[LATCH_HEADER_CRIT_EDGE:%.*]] ]
817 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LATCH_HEADER_CRIT_EDGE]] ]
818 ; CHECK-NEXT: indirectbr i8* blockaddress(@phi_trans6, [[LATCH:%.*]]), [label %latch]
820 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
821 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[LATCH_HEADER_CRIT_EDGE]]
822 ; CHECK: latch.header_crit_edge:
823 ; CHECK-NEXT: [[GEP_1_PHI_TRANS_INSERT_PHI_TRANS_INSERT:%.*]] = getelementptr i32, i32* [[X]], i32 [[IV_NEXT]]
824 ; CHECK-NEXT: [[L1_PRE]] = load i32, i32* [[GEP_1_PHI_TRANS_INSERT_PHI_TRANS_INSERT]], align 4
825 ; CHECK-NEXT: br label [[HEADER]]
827 ; CHECK-NEXT: ret i32 [[L1]]
830 %l0 = load i32, i32* %x
831 call void @use_i32(i32 %l0)
835 %iv = phi i32 [0, %entry], [ %iv.next, %latch]
836 indirectbr i8* blockaddress(@phi_trans6, %latch), [label %latch]
839 %gep.1 = getelementptr i32, i32* %x, i32 %iv
840 %l1 = load i32, i32* %gep.1
841 %iv.next = add i32 %iv, 1
842 br i1 %cond, label %exit, label %header
848 ; FIXME: Currently we fail to translate the PHI in this case.
849 define i32 @phi_trans7(i32* noalias nocapture readonly %x, i1 %cond) {
850 ; CHECK-LABEL: @phi_trans7(
852 ; CHECK-NEXT: [[L0:%.*]] = load i32, i32* [[X:%.*]], align 4
853 ; CHECK-NEXT: call void @use_i32(i32 [[L0]])
854 ; CHECK-NEXT: br label [[HEADER:%.*]]
856 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 2, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH_HEADER_CRIT_EDGE:%.*]] ]
857 ; CHECK-NEXT: [[OFFSET:%.*]] = add i32 [[IV]], -2
858 ; CHECK-NEXT: indirectbr i8* blockaddress(@phi_trans7, [[LATCH:%.*]]), [label %latch]
860 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i32, i32* [[X]], i32 [[OFFSET]]
861 ; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[GEP_1]], align 4
862 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
863 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[LATCH_HEADER_CRIT_EDGE]]
864 ; CHECK: latch.header_crit_edge:
865 ; CHECK-NEXT: br label [[HEADER]]
867 ; CHECK-NEXT: ret i32 [[L1]]
870 %l0 = load i32, i32* %x
871 call void @use_i32(i32 %l0)
875 %iv = phi i32 [2, %entry], [ %iv.next, %latch]
876 %offset = add i32 %iv, -2
877 indirectbr i8* blockaddress(@phi_trans7, %latch), [label %latch]
880 %gep.1 = getelementptr i32, i32* %x, i32 %offset
881 %l1 = load i32, i32* %gep.1
882 %iv.next = add i32 %iv, 1
883 br i1 %cond, label %exit, label %header
889 ; FIXME: Currently we fail to translate the PHI in this case.
890 define i32 @phi_trans8(i32* noalias nocapture readonly %x, i1 %cond) {
891 ; CHECK-LABEL: @phi_trans8(
893 ; CHECK-NEXT: [[L0:%.*]] = load i32, i32* [[X:%.*]], align 4
894 ; CHECK-NEXT: call void @use_i32(i32 [[L0]])
895 ; CHECK-NEXT: br label [[HEADER:%.*]]
897 ; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 2, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LATCH_HEADER_CRIT_EDGE:%.*]] ]
898 ; CHECK-NEXT: indirectbr i8* blockaddress(@phi_trans8, [[LATCH:%.*]]), [label %latch]
900 ; CHECK-NEXT: [[OFFSET:%.*]] = add i32 [[IV]], -2
901 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i32, i32* [[X]], i32 [[OFFSET]]
902 ; CHECK-NEXT: [[L1:%.*]] = load i32, i32* [[GEP_1]], align 4
903 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
904 ; CHECK-NEXT: br i1 [[COND:%.*]], label [[EXIT:%.*]], label [[LATCH_HEADER_CRIT_EDGE]]
905 ; CHECK: latch.header_crit_edge:
906 ; CHECK-NEXT: br label [[HEADER]]
908 ; CHECK-NEXT: ret i32 [[L1]]
911 %l0 = load i32, i32* %x
912 call void @use_i32(i32 %l0)
916 %iv = phi i32 [2, %entry], [ %iv.next, %latch]
917 indirectbr i8* blockaddress(@phi_trans8, %latch), [label %latch]
920 %offset = add i32 %iv, -2
921 %gep.1 = getelementptr i32, i32* %x, i32 %offset
922 %l1 = load i32, i32* %gep.1
923 %iv.next = add i32 %iv, 1
924 br i1 %cond, label %exit, label %header
933 define i32 @memset_to_load() nounwind readnone {
934 ; CHECK-LABEL: @memset_to_load(
936 ; CHECK-NEXT: [[X:%.*]] = alloca [256 x i32], align 4
937 ; CHECK-NEXT: [[TTMP:%.*]] = bitcast [256 x i32]* [[X]] to i8*
938 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[TTMP]], i8 0, i64 1024, i1 false)
939 ; CHECK-NEXT: ret i32 0
942 %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
943 %ttmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
944 call void @llvm.memset.p0i8.i64(i8* align 4 %ttmp, i8 0, i64 1024, i1 false)
945 %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; <i32*>
946 %ttmp1 = load i32, i32* %arraydecay ; <i32> [#uses=1]
951 ;;===----------------------------------------------------------------------===;;
952 ;; Load -> Load forwarding in partial alias case.
953 ;;===----------------------------------------------------------------------===;;
955 define i32 @load_load_partial_alias(i8* %P) nounwind ssp {
956 ; CHECK-LABEL: @load_load_partial_alias(
958 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
959 ; CHECK-NEXT: [[TTMP2:%.*]] = load i32, i32* [[TMP0]], align 4
960 ; LE-NEXT: [[TMP1:%.*]] = lshr i32 [[TTMP2]], 8
961 ; BE-NEXT: [[TMP1:%.*]] = lshr i32 [[TTMP2]], 16
962 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
963 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP2]] to i32
964 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TTMP2]], [[CONV]]
965 ; CHECK-NEXT: ret i32 [[ADD]]
968 %0 = bitcast i8* %P to i32*
969 %ttmp2 = load i32, i32* %0
970 %add.ptr = getelementptr inbounds i8, i8* %P, i64 1
971 %ttmp5 = load i8, i8* %add.ptr
972 %conv = zext i8 %ttmp5 to i32
973 %add = add nsw i32 %ttmp2, %conv
978 ; Cross block partial alias case.
979 define i32 @load_load_partial_alias_cross_block(i8* %P) nounwind ssp {
980 ; CHECK-LABEL: @load_load_partial_alias_cross_block(
982 ; CHECK-NEXT: [[XX:%.*]] = bitcast i8* [[P:%.*]] to i32*
983 ; CHECK-NEXT: [[X1:%.*]] = load i32, i32* [[XX]], align 4
984 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X1]], 127
985 ; LE-NEXT: [[TMP0:%.*]] = lshr i32 [[X1]], 8
986 ; BE-NEXT: [[TMP0:%.*]] = lshr i32 [[X1]], 16
987 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[TMP0]] to i8
988 ; CHECK-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[IF_END:%.*]]
989 ; CHECK: land.lhs.true:
990 ; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP1]] to i32
991 ; CHECK-NEXT: ret i32 [[CONV6]]
993 ; CHECK-NEXT: ret i32 52
996 %xx = bitcast i8* %P to i32*
997 %x1 = load i32, i32* %xx, align 4
998 %cmp = icmp eq i32 %x1, 127
999 br i1 %cmp, label %land.lhs.true, label %if.end
1001 land.lhs.true: ; preds = %entry
1002 %arrayidx4 = getelementptr inbounds i8, i8* %P, i64 1
1003 %ttmp5 = load i8, i8* %arrayidx4, align 1
1004 %conv6 = zext i8 %ttmp5 to i32
1011 define i32 @load_load_partial_alias_cross_block_phi_trans(i8* %P) nounwind {
1012 ; CHECK-LABEL: @load_load_partial_alias_cross_block_phi_trans(
1013 ; CHECK-NEXT: entry:
1014 ; CHECK-NEXT: [[XX:%.*]] = bitcast i8* [[P:%.*]] to i32*
1015 ; CHECK-NEXT: [[X1:%.*]] = load i32, i32* [[XX]], align 4
1016 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X1]], 127
1017 ; LE-NEXT: [[TMP0:%.*]] = lshr i32 [[X1]], 16
1018 ; BE-NEXT: [[TMP0:%.*]] = lshr i32 [[X1]], 8
1019 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[TMP0]] to i8
1020 ; LE-NEXT: [[TMP2:%.*]] = lshr i32 [[X1]], 8
1021 ; BE-NEXT: [[TMP2:%.*]] = lshr i32 [[X1]], 16
1022 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
1023 ; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[ELSE:%.*]]
1025 ; CHECK-NEXT: br label [[JOIN:%.*]]
1027 ; CHECK-NEXT: br label [[JOIN]]
1029 ; CHECK-NEXT: [[TTMP5:%.*]] = phi i8 [ [[TMP3]], [[IF]] ], [ [[TMP1]], [[ELSE]] ]
1030 ; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TTMP5]] to i32
1031 ; CHECK-NEXT: ret i32 [[CONV6]]
1033 ; CHECK-NEXT: ret i32 52
1036 %xx = bitcast i8* %P to i32*
1037 %x1 = load i32, i32* %xx, align 4
1038 %cmp = icmp eq i32 %x1, 127
1039 br i1 %cmp, label %if, label %else
1042 %arrayidx.if = getelementptr inbounds i8, i8* %P, i64 1
1046 %arrayidx.else = getelementptr inbounds i8, i8* %P, i64 2
1050 %idx = phi i64 [ 1, %if ], [ 2, %else ]
1051 %arrayidx4 = getelementptr inbounds i8, i8* %P, i64 %idx
1052 %ttmp5 = load i8, i8* %arrayidx4, align 1
1053 %conv6 = zext i8 %ttmp5 to i32
1060 define void @load_load_partial_alias_loop(i8* %P) {
1061 ; LE-LABEL: @load_load_partial_alias_loop(
1063 ; LE-NEXT: [[P_1:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 1
1064 ; LE-NEXT: [[V_1:%.*]] = load i8, i8* [[P_1]], align 1
1065 ; LE-NEXT: call void @use.i8(i8 [[V_1]])
1066 ; LE-NEXT: [[P_1_32:%.*]] = bitcast i8* [[P_1]] to i32*
1067 ; LE-NEXT: [[V_1_32:%.*]] = load i32, i32* [[P_1_32]], align 4
1068 ; LE-NEXT: call void @use.i32(i32 [[V_1_32]])
1069 ; LE-NEXT: [[TMP0:%.*]] = trunc i32 [[V_1_32]] to i8
1070 ; LE-NEXT: br label [[LOOP:%.*]]
1072 ; LE-NEXT: [[V_I:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[LOOP_LOOP_CRIT_EDGE:%.*]] ]
1073 ; LE-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_INC:%.*]], [[LOOP_LOOP_CRIT_EDGE]] ]
1074 ; LE-NEXT: [[P_I:%.*]] = getelementptr i8, i8* [[P]], i64 [[I]]
1075 ; LE-NEXT: call void @use.i8(i8 [[V_I]])
1076 ; LE-NEXT: [[P_I_32:%.*]] = bitcast i8* [[P_I]] to i32*
1077 ; LE-NEXT: [[V_I_32:%.*]] = load i32, i32* [[P_I_32]], align 4
1078 ; LE-NEXT: call void @use.i32(i32 [[V_I_32]])
1079 ; LE-NEXT: [[I_INC]] = add i64 [[I]], 1
1080 ; LE-NEXT: [[CMP:%.*]] = icmp ne i64 [[I_INC]], 64
1081 ; LE-NEXT: [[TMP1:%.*]] = lshr i32 [[V_I_32]], 8
1082 ; LE-NEXT: [[TMP2]] = trunc i32 [[TMP1]] to i8
1083 ; LE-NEXT: br i1 [[CMP]], label [[LOOP_LOOP_CRIT_EDGE]], label [[EXIT:%.*]]
1084 ; LE: loop.loop_crit_edge:
1085 ; LE-NEXT: br label [[LOOP]]
1089 ; BE-LABEL: @load_load_partial_alias_loop(
1091 ; BE-NEXT: [[P_1:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 1
1092 ; BE-NEXT: [[V_1:%.*]] = load i8, i8* [[P_1]], align 1
1093 ; BE-NEXT: call void @use.i8(i8 [[V_1]])
1094 ; BE-NEXT: [[P_1_32:%.*]] = bitcast i8* [[P_1]] to i32*
1095 ; BE-NEXT: [[V_1_32:%.*]] = load i32, i32* [[P_1_32]], align 4
1096 ; BE-NEXT: call void @use.i32(i32 [[V_1_32]])
1097 ; BE-NEXT: [[TMP0:%.*]] = lshr i32 [[V_1_32]], 24
1098 ; BE-NEXT: [[TMP1:%.*]] = trunc i32 [[TMP0]] to i8
1099 ; BE-NEXT: br label [[LOOP:%.*]]
1101 ; BE-NEXT: [[V_I:%.*]] = phi i8 [ [[TMP1]], [[ENTRY:%.*]] ], [ [[TMP3:%.*]], [[LOOP_LOOP_CRIT_EDGE:%.*]] ]
1102 ; BE-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_INC:%.*]], [[LOOP_LOOP_CRIT_EDGE]] ]
1103 ; BE-NEXT: [[P_I:%.*]] = getelementptr i8, i8* [[P]], i64 [[I]]
1104 ; BE-NEXT: call void @use.i8(i8 [[V_I]])
1105 ; BE-NEXT: [[P_I_32:%.*]] = bitcast i8* [[P_I]] to i32*
1106 ; BE-NEXT: [[V_I_32:%.*]] = load i32, i32* [[P_I_32]], align 4
1107 ; BE-NEXT: call void @use.i32(i32 [[V_I_32]])
1108 ; BE-NEXT: [[I_INC]] = add i64 [[I]], 1
1109 ; BE-NEXT: [[CMP:%.*]] = icmp ne i64 [[I_INC]], 64
1110 ; BE-NEXT: [[TMP2:%.*]] = lshr i32 [[V_I_32]], 16
1111 ; BE-NEXT: [[TMP3]] = trunc i32 [[TMP2]] to i8
1112 ; BE-NEXT: br i1 [[CMP]], label [[LOOP_LOOP_CRIT_EDGE]], label [[EXIT:%.*]]
1113 ; BE: loop.loop_crit_edge:
1114 ; BE-NEXT: br label [[LOOP]]
1119 %P.1 = getelementptr i8, i8* %P, i64 1
1120 %v.1 = load i8, i8* %P.1
1121 call void @use.i8(i8 %v.1)
1122 %P.1.32 = bitcast i8* %P.1 to i32*
1123 %v.1.32 = load i32, i32* %P.1.32
1124 call void @use.i32(i32 %v.1.32)
1128 %i = phi i64 [ 1, %entry ], [ %i.inc, %loop ]
1129 %P.i = getelementptr i8, i8* %P, i64 %i
1130 %v.i = load i8, i8* %P.i
1131 call void @use.i8(i8 %v.i)
1132 %P.i.32 = bitcast i8* %P.i to i32*
1133 %v.i.32 = load i32, i32* %P.i.32
1134 call void @use.i32(i32 %v.i.32)
1135 %i.inc = add i64 %i, 1
1136 %cmp = icmp ne i64 %i.inc, 64
1137 br i1 %cmp, label %loop, label %exit
1143 declare void @use.i8(i8) readnone
1144 declare void @use.i32(i32) readnone
1146 @global = external local_unnamed_addr global i8, align 4
1148 define void @load_load_partial_alias_atomic(i8* %arg) {
1149 ; CHECK-LABEL: @load_load_partial_alias_atomic(
1151 ; CHECK-NEXT: [[TMP2_1:%.*]] = getelementptr inbounds i8, i8* [[ARG:%.*]], i64 1
1152 ; CHECK-NEXT: [[TMP2_2:%.*]] = bitcast i8* [[TMP2_1]] to i64*
1153 ; CHECK-NEXT: [[TMP2_3:%.*]] = load i64, i64* [[TMP2_2]], align 4
1154 ; CHECK-NEXT: [[TMP3_1:%.*]] = getelementptr inbounds i8, i8* [[ARG]], i64 2
1155 ; LE-NEXT: [[TMP0:%.*]] = lshr i64 [[TMP2_3]], 8
1156 ; BE-NEXT: [[TMP0:%.*]] = lshr i64 [[TMP2_3]], 48
1157 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i8
1158 ; CHECK-NEXT: br label [[BB5:%.*]]
1160 ; CHECK-NEXT: [[TMP4_1:%.*]] = phi i8 [ [[TMP4_1_PRE:%.*]], [[BB5]] ], [ [[TMP1]], [[BB:%.*]] ]
1161 ; CHECK-NEXT: [[TMP6_1:%.*]] = load atomic i8, i8* @global acquire, align 4
1162 ; CHECK-NEXT: [[TMP7_1:%.*]] = add i8 [[TMP6_1]], [[TMP4_1]]
1163 ; CHECK-NEXT: store i8 [[TMP7_1]], i8* [[ARG]], align 1
1164 ; CHECK-NEXT: [[TMP4_1_PRE]] = load i8, i8* [[TMP3_1]], align 4
1165 ; CHECK-NEXT: br label [[BB5]]
1168 %tmp1.1 = getelementptr inbounds i8, i8* %arg, i64 0
1169 %tmp2.1 = getelementptr inbounds i8, i8* %arg, i64 1
1170 %tmp2.2 = bitcast i8* %tmp2.1 to i64*
1171 %tmp2.3 = load i64, i64* %tmp2.2, align 4
1172 %tmp2.4 = icmp ugt i64 %tmp2.3, 1
1174 %tmp3.1 = getelementptr inbounds i8, i8* %arg, i64 2
1177 bb5: ; preds = %bb14, %bb
1178 %tmp4.1 = load i8, i8* %tmp3.1, align 4
1179 %tmp6.1 = load atomic i8, i8* getelementptr inbounds (i8, i8* @global, i64 0) acquire, align 4
1180 %tmp7.1 = add i8 %tmp6.1, %tmp4.1
1181 store i8 %tmp7.1, i8* %tmp1.1
1186 ;;===----------------------------------------------------------------------===;;
1188 ;; We explicitly choose NOT to widen. And are testing to make sure we don't.
1189 ;;===----------------------------------------------------------------------===;;
1191 %widening1 = type { i32, i8, i8, i8, i8 }
1193 @f = global %widening1 zeroinitializer, align 4
1195 define i32 @test_widening1(i8* %P) nounwind ssp noredzone {
1196 ; CHECK-LABEL: @test_widening1(
1197 ; CHECK-NEXT: entry:
1198 ; CHECK-NEXT: [[TTMP:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1:%.*]], %widening1* @f, i64 0, i32 1), align 4
1199 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TTMP]] to i32
1200 ; CHECK-NEXT: [[TTMP1:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1]], %widening1* @f, i64 0, i32 2), align 1
1201 ; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TTMP1]] to i32
1202 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
1203 ; CHECK-NEXT: ret i32 [[ADD]]
1206 %ttmp = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 1), align 4
1207 %conv = zext i8 %ttmp to i32
1208 %ttmp1 = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 2), align 1
1209 %conv2 = zext i8 %ttmp1 to i32
1210 %add = add nsw i32 %conv, %conv2
1214 define i32 @test_widening2() nounwind ssp noredzone {
1215 ; CHECK-LABEL: @test_widening2(
1216 ; CHECK-NEXT: entry:
1217 ; CHECK-NEXT: [[TTMP:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1:%.*]], %widening1* @f, i64 0, i32 1), align 4
1218 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TTMP]] to i32
1219 ; CHECK-NEXT: [[TTMP1:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1]], %widening1* @f, i64 0, i32 2), align 1
1220 ; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TTMP1]] to i32
1221 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]]
1222 ; CHECK-NEXT: [[TTMP2:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1]], %widening1* @f, i64 0, i32 3), align 2
1223 ; CHECK-NEXT: [[CONV3:%.*]] = zext i8 [[TTMP2]] to i32
1224 ; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[ADD]], [[CONV3]]
1225 ; CHECK-NEXT: [[TTMP3:%.*]] = load i8, i8* getelementptr inbounds ([[WIDENING1]], %widening1* @f, i64 0, i32 4), align 1
1226 ; CHECK-NEXT: [[CONV4:%.*]] = zext i8 [[TTMP3]] to i32
1227 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD2]], [[CONV4]]
1228 ; CHECK-NEXT: ret i32 [[ADD3]]
1231 %ttmp = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 1), align 4
1232 %conv = zext i8 %ttmp to i32
1233 %ttmp1 = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 2), align 1
1234 %conv2 = zext i8 %ttmp1 to i32
1235 %add = add nsw i32 %conv, %conv2
1237 %ttmp2 = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 3), align 2
1238 %conv3 = zext i8 %ttmp2 to i32
1239 %add2 = add nsw i32 %add, %conv3
1241 %ttmp3 = load i8, i8* getelementptr inbounds (%widening1, %widening1* @f, i64 0, i32 4), align 1
1242 %conv4 = zext i8 %ttmp3 to i32
1243 %add3 = add nsw i32 %add2, %conv4
1249 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind
1251 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind
1252 declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind
1255 ;;===----------------------------------------------------------------------===;;
1256 ;; Load -> Store dependency which isn't interfered with by a call that happens
1257 ;; before the pointer was captured.
1258 ;;===----------------------------------------------------------------------===;;
1260 %class.X = type { [8 x i8] }
1262 @_ZTV1X = weak_odr constant [5 x i8*] zeroinitializer
1263 @_ZTV1Y = weak_odr constant [5 x i8*] zeroinitializer
1266 declare void @use3(i8***, i8**)
1269 define void @test_escape1() nounwind {
1270 ; CHECK-LABEL: @test_escape1(
1271 ; CHECK-NEXT: [[X:%.*]] = alloca i8**, align 8
1272 ; CHECK-NEXT: store i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTV1X, i64 0, i64 2), i8*** [[X]], align 8
1273 ; CHECK-NEXT: call void @use() #[[ATTR3]]
1274 ; CHECK-NEXT: call void @use3(i8*** [[X]], i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTV1X, i64 0, i64 2)) #[[ATTR3]]
1275 ; CHECK-NEXT: ret void
1277 %x = alloca i8**, align 8
1278 store i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @_ZTV1X, i64 0, i64 2), i8*** %x, align 8
1279 call void @use() nounwind
1280 %DEAD = load i8**, i8*** %x, align 8
1281 call void @use3(i8*** %x, i8** %DEAD) nounwind