1 ; RUN: opt < %s -basicaa -gvn -S -die | FileCheck %s
3 ; 32-bit little endian target.
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
7 define i32 @test0(i32 %V, i32* %P) {
17 ;;===----------------------------------------------------------------------===;;
19 ;;===----------------------------------------------------------------------===;;
22 define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
23 store {i32, i32} %A, {i32, i32}* %P
24 %X = bitcast {i32, i32}* %P to i8*
30 ;;===----------------------------------------------------------------------===;;
31 ;; Store -> Load and Load -> Load forwarding where src and dst are different
32 ;; types, but where the base pointer is a must alias.
33 ;;===----------------------------------------------------------------------===;;
35 ;; i32 -> f32 forwarding.
36 define float @coerce_mustalias1(i32 %V, i32* %P) {
39 %P2 = bitcast i32* %P to float*
43 ; CHECK: @coerce_mustalias1
48 ;; i32* -> float forwarding.
49 define float @coerce_mustalias2(i32* %V, i32** %P) {
50 store i32* %V, i32** %P
52 %P2 = bitcast i32** %P to float*
56 ; CHECK: @coerce_mustalias2
61 ;; float -> i32* forwarding.
62 define i32* @coerce_mustalias3(float %V, float* %P) {
63 store float %V, float* %P
65 %P2 = bitcast float* %P to i32**
69 ; CHECK: @coerce_mustalias3
74 ;; i32 -> f32 load forwarding.
75 define float @coerce_mustalias4(i32* %P, i1 %cond) {
78 %P2 = bitcast i32* %P to float*
80 br i1 %cond, label %T, label %F
85 %X = bitcast i32 %A to float
88 ; CHECK: @coerce_mustalias4
89 ; CHECK: %A = load i32* %P
95 ;; i32 -> i8 forwarding
96 define i8 @coerce_mustalias5(i32 %V, i32* %P) {
99 %P2 = bitcast i32* %P to i8*
103 ; CHECK: @coerce_mustalias5
108 ;; i64 -> float forwarding
109 define float @coerce_mustalias6(i64 %V, i64* %P) {
110 store i64 %V, i64* %P
112 %P2 = bitcast i64* %P to float*
116 ; CHECK: @coerce_mustalias6
121 ;; i64 -> i8* (32-bit) forwarding
122 define i8* @coerce_mustalias7(i64 %V, i64* %P) {
123 store i64 %V, i64* %P
125 %P2 = bitcast i64* %P to i8**
129 ; CHECK: @coerce_mustalias7
134 ; memset -> i16 forwarding.
135 define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
137 %conv = bitcast i16* %A to i8*
138 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
139 %arrayidx = getelementptr inbounds i16* %A, i64 42
140 %tmp2 = load i16* %arrayidx
142 ; CHECK: @memset_to_i16_local
147 ; memset -> float forwarding.
148 define float @memset_to_float_local(float* %A, i8 %Val) nounwind ssp {
150 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
151 tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i32 1, i1 false)
152 %arrayidx = getelementptr inbounds float* %A, i64 42 ; <float*> [#uses=1]
153 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
155 ; CHECK: @memset_to_float_local
162 ; CHECK-NEXT: bitcast
163 ; CHECK-NEXT: ret float
166 ;; non-local memset -> i16 load forwarding.
167 define i16 @memset_to_i16_nonlocal0(i16* %P, i1 %cond) {
168 %P3 = bitcast i16* %P to i8*
169 br i1 %cond, label %T, label %F
171 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 1, i64 400, i32 1, i1 false)
175 tail call void @llvm.memset.p0i8.i64(i8* %P3, i8 2, i64 400, i32 1, i1 false)
179 %P2 = getelementptr i16* %P, i32 4
183 ; CHECK: @memset_to_i16_nonlocal0
185 ; CHECK-NEXT: %A = phi i16 [ 514, %F ], [ 257, %T ]
190 @GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
192 ; memset -> float forwarding.
193 define float @memcpy_to_float_local(float* %A) nounwind ssp {
195 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
196 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
197 %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
198 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
200 ; CHECK: @memcpy_to_float_local
202 ; CHECK: ret float 1.400000e+01
207 ;; non-local i32/float -> i8 load forwarding.
208 define i8 @coerce_mustalias_nonlocal0(i32* %P, i1 %cond) {
209 %P2 = bitcast i32* %P to float*
210 %P3 = bitcast i32* %P to i8*
211 br i1 %cond, label %T, label %F
213 store i32 42, i32* %P
217 store float 1.0, float* %P2
224 ; CHECK: @coerce_mustalias_nonlocal0
226 ; CHECK: %A = phi i8 [
232 ;; non-local i32/float -> i8 load forwarding. This also tests that the "P3"
233 ;; bitcast equivalence can be properly phi translated.
234 define i8 @coerce_mustalias_nonlocal1(i32* %P, i1 %cond) {
235 %P2 = bitcast i32* %P to float*
236 br i1 %cond, label %T, label %F
238 store i32 42, i32* %P
242 store float 1.0, float* %P2
246 %P3 = bitcast i32* %P to i8*
250 ;; FIXME: This is disabled because this caused a miscompile in the llvm-gcc
251 ;; bootstrap, see r82411
253 ; HECK: @coerce_mustalias_nonlocal1
255 ; HECK: %A = phi i8 [
261 ;; non-local i32 -> i8 partial redundancy load forwarding.
262 define i8 @coerce_mustalias_pre0(i32* %P, i1 %cond) {
263 %P3 = bitcast i32* %P to i8*
264 br i1 %cond, label %T, label %F
266 store i32 42, i32* %P
276 ; CHECK: @coerce_mustalias_pre0
278 ; CHECK: load i8* %P3
280 ; CHECK: %A = phi i8 [
285 ;;===----------------------------------------------------------------------===;;
286 ;; Store -> Load and Load -> Load forwarding where src and dst are different
287 ;; types, and the reload is an offset from the store pointer.
288 ;;===----------------------------------------------------------------------===;;
290 ;; i32 -> i8 forwarding.
292 define i8 @coerce_offset0(i32 %V, i32* %P) {
293 store i32 %V, i32* %P
295 %P2 = bitcast i32* %P to i8*
296 %P3 = getelementptr i8* %P2, i32 2
300 ; CHECK: @coerce_offset0
305 ;; non-local i32/float -> i8 load forwarding.
306 define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
307 %P2 = bitcast i32* %P to float*
308 %P3 = bitcast i32* %P to i8*
309 %P4 = getelementptr i8* %P3, i32 2
310 br i1 %cond, label %T, label %F
312 store i32 42, i32* %P
316 store float 1.0, float* %P2
323 ; CHECK: @coerce_offset_nonlocal0
325 ; CHECK: %A = phi i8 [
331 ;; non-local i32 -> i8 partial redundancy load forwarding.
332 define i8 @coerce_offset_pre0(i32* %P, i1 %cond) {
333 %P3 = bitcast i32* %P to i8*
334 %P4 = getelementptr i8* %P3, i32 2
335 br i1 %cond, label %T, label %F
337 store i32 42, i32* %P
347 ; CHECK: @coerce_offset_pre0
349 ; CHECK: load i8* %P4
351 ; CHECK: %A = phi i8 [
356 define i32 @chained_load(i32** %p) {
361 store i32* %z, i32** %A
362 br i1 true, label %block2, label %block3
377 ; CHECK: @chained_load
378 ; CHECK: %z = load i32** %p
380 ; CHECK: %d = load i32* %z
381 ; CHECK-NEXT: ret i32 %d
385 declare i1 @cond() readonly
386 declare i1 @cond2() readonly
388 define i32 @phi_trans2() {
391 %P = alloca i32, i32 400
395 %A = phi i32 [1, %entry], [2, %F]
396 %cond2 = call i1 @cond()
397 br i1 %cond2, label %T1, label %TY
400 %P2 = getelementptr i32* %P, i32 %A
402 %cond = call i1 @cond2()
403 br i1 %cond, label %TX, label %F
406 %P3 = getelementptr i32* %P, i32 2
407 store i32 17, i32* %P3
409 store i32 42, i32* %P2 ; Provides "P[A]".
413 ; This load should not be compiled to 'ret i32 42'. An overly clever
414 ; implementation of GVN would see that we're returning 17 if the loop
415 ; executes once or 42 if it executes more than that, but we'd have to do
416 ; loop restructuring to expose this, and GVN shouldn't do this sort of CFG
426 define i32 @phi_trans3(i32* %p) {
429 br i1 true, label %block2, label %block3
432 store i32 87, i32* %p
436 %p2 = getelementptr i32* %p, i32 43
437 store i32 97, i32* %p2
441 %A = phi i32 [-1, %block2], [42, %block3]
442 br i1 true, label %block5, label %exit
445 ; CHECK-NEXT: %D = phi i32 [ 87, %block2 ], [ 97, %block3 ]
450 br i1 true, label %block6, label %exit
453 %C = getelementptr i32* %p, i32 %B
454 br i1 true, label %block7, label %exit
461 ; CHECK-NEXT: ret i32 %D
467 define i8 @phi_trans4(i8* %p) {
470 %X3 = getelementptr i8* %p, i32 192
471 store i8 192, i8* %X3
473 %X = getelementptr i8* %p, i32 4
478 %i = phi i32 [4, %entry], [192, %loop]
479 %X2 = getelementptr i8* %p, i32 %i
483 ; CHECK-NEXT: %Y2 = phi i8 [ %Y, %entry ], [ 0, %loop ]
486 %cond = call i1 @cond2()
488 %Z = bitcast i8 *%X3 to i32*
490 br i1 %cond, label %loop, label %out
497 define i8 @phi_trans5(i8* %p) {
501 %X4 = getelementptr i8* %p, i32 2
504 %X = getelementptr i8* %p, i32 4
509 %i = phi i32 [4, %entry], [3, %cont]
510 %X2 = getelementptr i8* %p, i32 %i
511 %Y2 = load i8* %X2 ; Ensure this load is not being incorrectly replaced.
512 %cond = call i1 @cond2()
513 br i1 %cond, label %cont, label %out
516 %Z = getelementptr i8* %X2, i32 -1
517 %Z2 = bitcast i8 *%Z to i32*
518 store i32 50462976, i32* %Z2 ;; (1 << 8) | (2 << 16) | (3 << 24)
522 ; CHECK-NEXT: getelementptr i8* %p, i32 3
523 ; CHECK-NEXT: load i8*
533 define i32 @memset_to_load() nounwind readnone {
535 %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
536 %tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
537 call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
538 %arraydecay = getelementptr inbounds [256 x i32]* %x, i32 0, i32 0 ; <i32*>
539 %tmp1 = load i32* %arraydecay ; <i32> [#uses=1]
541 ; CHECK: @memset_to_load
546 ;;===----------------------------------------------------------------------===;;
547 ;; Load -> Load forwarding in partial alias case.
548 ;;===----------------------------------------------------------------------===;;
550 define i32 @load_load_partial_alias(i8* %P) nounwind ssp {
552 %0 = bitcast i8* %P to i32*
554 %add.ptr = getelementptr inbounds i8* %P, i64 1
555 %tmp5 = load i8* %add.ptr
556 %conv = zext i8 %tmp5 to i32
557 %add = add nsw i32 %tmp2, %conv
560 ; TEMPORARILYDISABLED: @load_load_partial_alias
561 ; TEMPORARILYDISABLED: load i32*
562 ; TEMPORARILYDISABLED-NOT: load
563 ; TEMPORARILYDISABLED: lshr i32 {{.*}}, 8
564 ; TEMPORARILYDISABLED-NOT: load
565 ; TEMPORARILYDISABLED: trunc i32 {{.*}} to i8
566 ; TEMPORARILYDISABLED-NOT: load
567 ; TEMPORARILYDISABLED: ret i32
571 ; Cross block partial alias case.
572 define i32 @load_load_partial_alias_cross_block(i8* %P) nounwind ssp {
574 %xx = bitcast i8* %P to i32*
575 %x1 = load i32* %xx, align 4
576 %cmp = icmp eq i32 %x1, 127
577 br i1 %cmp, label %land.lhs.true, label %if.end
579 land.lhs.true: ; preds = %entry
580 %arrayidx4 = getelementptr inbounds i8* %P, i64 1
581 %tmp5 = load i8* %arrayidx4, align 1
582 %conv6 = zext i8 %tmp5 to i32
587 ; TEMPORARILY_DISABLED: @load_load_partial_alias_cross_block
588 ; TEMPORARILY_DISABLED: land.lhs.true:
589 ; TEMPORARILY_DISABLED-NOT: load i8
590 ; TEMPORARILY_DISABLED: ret i32 %conv6
594 ;;===----------------------------------------------------------------------===;;
596 ;;===----------------------------------------------------------------------===;;
598 %widening1 = type { i32, i8, i8, i8, i8 }
600 @f = global %widening1 zeroinitializer, align 4
602 define i32 @test_widening1(i8* %P) nounwind ssp noredzone {
604 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
605 %conv = zext i8 %tmp to i32
606 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
607 %conv2 = zext i8 %tmp1 to i32
608 %add = add nsw i32 %conv, %conv2
610 ; CHECK: @test_widening1
617 define i32 @test_widening2() nounwind ssp noredzone {
619 %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
620 %conv = zext i8 %tmp to i32
621 %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
622 %conv2 = zext i8 %tmp1 to i32
623 %add = add nsw i32 %conv, %conv2
625 %tmp2 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 3), align 2
626 %conv3 = zext i8 %tmp2 to i32
627 %add2 = add nsw i32 %add, %conv3
629 %tmp3 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 4), align 1
630 %conv4 = zext i8 %tmp3 to i32
631 %add3 = add nsw i32 %add2, %conv3
634 ; CHECK: @test_widening2
641 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
643 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind