1 ; RUN: opt < %s -basicaa -gvn -S | FileCheck %s
3 ; 32-bit little endian target.
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
7 define i32 @test0(i32 %V, i32* %P) {
17 ;;===----------------------------------------------------------------------===;;
19 ;;===----------------------------------------------------------------------===;;
22 define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
23 store {i32, i32} %A, {i32, i32}* %P
24 %X = bitcast {i32, i32}* %P to i8*
30 ;;===----------------------------------------------------------------------===;;
31 ;; Store -> Load and Load -> Load forwarding where src and dst are different
32 ;; types, but where the base pointer is a must alias.
33 ;;===----------------------------------------------------------------------===;;
35 ;; i32 -> f32 forwarding.
36 define float @coerce_mustalias1(i32 %V, i32* %P) {
39 %P2 = bitcast i32* %P to float*
43 ; CHECK: @coerce_mustalias1
48 ;; i32* -> float forwarding.
49 define float @coerce_mustalias2(i32* %V, i32** %P) {
50 store i32* %V, i32** %P
52 %P2 = bitcast i32** %P to float*
56 ; CHECK: @coerce_mustalias2
61 ;; float -> i32* forwarding.
62 define i32* @coerce_mustalias3(float %V, float* %P) {
63 store float %V, float* %P
65 %P2 = bitcast float* %P to i32**
69 ; CHECK: @coerce_mustalias3
74 ;; i32 -> f32 load forwarding.
75 define float @coerce_mustalias4(i32* %P, i1 %cond) {
78 %P2 = bitcast i32* %P to float*
80 br i1 %cond, label %T, label %F
85 %X = bitcast i32 %A to float
88 ; CHECK: @coerce_mustalias4
89 ; CHECK: %A = load i32* %P
95 ;; i32 -> i8 forwarding
96 define i8 @coerce_mustalias5(i32 %V, i32* %P) {
99 %P2 = bitcast i32* %P to i8*
103 ; CHECK: @coerce_mustalias5
108 ;; i64 -> float forwarding
109 define float @coerce_mustalias6(i64 %V, i64* %P) {
110 store i64 %V, i64* %P
112 %P2 = bitcast i64* %P to float*
116 ; CHECK: @coerce_mustalias6
121 ;; i64 -> i8* (32-bit) forwarding
122 define i8* @coerce_mustalias7(i64 %V, i64* %P) {
123 store i64 %V, i64* %P
125 %P2 = bitcast i64* %P to i8**
129 ; CHECK: @coerce_mustalias7
134 ; memset -> i16 forwarding.
135 define signext i16 @memset_to_i16_local(i16* %A) nounwind ssp {
137 %conv = bitcast i16* %A to i8*
138 tail call void @llvm.memset.i64(i8* %conv, i8 1, i64 200, i32 1)
139 %arrayidx = getelementptr inbounds i16* %A, i64 42
140 %tmp2 = load i16* %arrayidx
142 ; CHECK: @memset_to_i16_local
147 ; memset -> float forwarding.
148 define float @memset_to_float_local(float* %A, i8 %Val) nounwind ssp {
150 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
151 tail call void @llvm.memset.i64(i8* %conv, i8 %Val, i64 400, i32 1)
152 %arrayidx = getelementptr inbounds float* %A, i64 42 ; <float*> [#uses=1]
153 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
155 ; CHECK: @memset_to_float_local
162 ; CHECK-NEXT: bitcast
163 ; CHECK-NEXT: ret float
166 ;; non-local memset -> i16 load forwarding.
167 define i16 @memset_to_i16_nonlocal0(i16* %P, i1 %cond) {
168 %P3 = bitcast i16* %P to i8*
169 br i1 %cond, label %T, label %F
171 tail call void @llvm.memset.i64(i8* %P3, i8 1, i64 400, i32 1)
175 tail call void @llvm.memset.i64(i8* %P3, i8 2, i64 400, i32 1)
179 %P2 = getelementptr i16* %P, i32 4
183 ; CHECK: @memset_to_i16_nonlocal0
185 ; CHECK-NEXT: %A = phi i16 [ 514, %F ], [ 257, %T ]
190 @GCst = constant {i32, float, i32 } { i32 42, float 14., i32 97 }
192 ; memset -> float forwarding.
193 define float @memcpy_to_float_local(float* %A) nounwind ssp {
195 %conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
196 tail call void @llvm.memcpy.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1)
197 %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
198 %tmp2 = load float* %arrayidx ; <float> [#uses=1]
200 ; CHECK: @memcpy_to_float_local
202 ; CHECK: ret float 1.400000e+01
206 declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
207 declare void @llvm.memcpy.i64(i8* nocapture, i8* nocapture, i64, i32) nounwind
212 ;; non-local i32/float -> i8 load forwarding.
213 define i8 @coerce_mustalias_nonlocal0(i32* %P, i1 %cond) {
214 %P2 = bitcast i32* %P to float*
215 %P3 = bitcast i32* %P to i8*
216 br i1 %cond, label %T, label %F
218 store i32 42, i32* %P
222 store float 1.0, float* %P2
229 ; CHECK: @coerce_mustalias_nonlocal0
231 ; CHECK: %A = phi i8 [
237 ;; non-local i32/float -> i8 load forwarding. This also tests that the "P3"
238 ;; bitcast equivalence can be properly phi translated.
239 define i8 @coerce_mustalias_nonlocal1(i32* %P, i1 %cond) {
240 %P2 = bitcast i32* %P to float*
241 br i1 %cond, label %T, label %F
243 store i32 42, i32* %P
247 store float 1.0, float* %P2
251 %P3 = bitcast i32* %P to i8*
255 ;; FIXME: This is disabled because this caused a miscompile in the llvm-gcc
256 ;; bootstrap, see r82411
258 ; HECK: @coerce_mustalias_nonlocal1
260 ; HECK: %A = phi i8 [
266 ;; non-local i32 -> i8 partial redundancy load forwarding.
267 define i8 @coerce_mustalias_pre0(i32* %P, i1 %cond) {
268 %P3 = bitcast i32* %P to i8*
269 br i1 %cond, label %T, label %F
271 store i32 42, i32* %P
281 ; CHECK: @coerce_mustalias_pre0
283 ; CHECK: load i8* %P3
285 ; CHECK: %A = phi i8 [
290 ;;===----------------------------------------------------------------------===;;
291 ;; Store -> Load and Load -> Load forwarding where src and dst are different
292 ;; types, and the reload is an offset from the store pointer.
293 ;;===----------------------------------------------------------------------===;;
295 ;; i32 -> i8 forwarding.
297 define i8 @coerce_offset0(i32 %V, i32* %P) {
298 store i32 %V, i32* %P
300 %P2 = bitcast i32* %P to i8*
301 %P3 = getelementptr i8* %P2, i32 2
305 ; CHECK: @coerce_offset0
310 ;; non-local i32/float -> i8 load forwarding.
311 define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
312 %P2 = bitcast i32* %P to float*
313 %P3 = bitcast i32* %P to i8*
314 %P4 = getelementptr i8* %P3, i32 2
315 br i1 %cond, label %T, label %F
317 store i32 42, i32* %P
321 store float 1.0, float* %P2
328 ; CHECK: @coerce_offset_nonlocal0
330 ; CHECK: %A = phi i8 [
336 ;; non-local i32 -> i8 partial redundancy load forwarding.
337 define i8 @coerce_offset_pre0(i32* %P, i1 %cond) {
338 %P3 = bitcast i32* %P to i8*
339 %P4 = getelementptr i8* %P3, i32 2
340 br i1 %cond, label %T, label %F
342 store i32 42, i32* %P
352 ; CHECK: @coerce_offset_pre0
354 ; CHECK: load i8* %P4
356 ; CHECK: %A = phi i8 [
361 define i32 @chained_load(i32** %p) {
364 br i1 true, label %block2, label %block3
379 ; CHECK: @chained_load
380 ; CHECK: %z = load i32** %p
382 ; CHECK: %d = load i32* %z
383 ; CHECK-NEXT: ret i32 %d
387 declare i1 @cond() readonly
388 declare i1 @cond2() readonly
390 define i32 @phi_trans2() {
393 %P = alloca i32, i32 400
397 %A = phi i32 [1, %entry], [2, %F]
398 %cond2 = call i1 @cond()
399 br i1 %cond2, label %T1, label %TY
402 %P2 = getelementptr i32* %P, i32 %A
404 %cond = call i1 @cond2()
405 br i1 %cond, label %TX, label %F
408 %P3 = getelementptr i32* %P, i32 2
409 store i32 17, i32* %P3
411 store i32 42, i32* %P2 ; Provides "P[A]".
415 ; This load should not be compiled to 'ret i32 42'. An overly clever
416 ; implementation of GVN would see that we're returning 17 if the loop
417 ; executes once or 42 if it executes more than that, but we'd have to do
418 ; loop restructuring to expose this, and GVN shouldn't do this sort of CFG
428 define i32 @phi_trans3(i32* %p) {
431 br i1 true, label %block2, label %block3
434 store i32 87, i32* %p
438 %p2 = getelementptr i32* %p, i32 43
439 store i32 97, i32* %p2
443 %A = phi i32 [-1, %block2], [42, %block3]
444 br i1 true, label %block5, label %exit
447 ; CHECK-NEXT: %D = phi i32 [ 87, %block2 ], [ 97, %block3 ]
452 br i1 true, label %block6, label %exit
455 %C = getelementptr i32* %p, i32 %B
456 br i1 true, label %block7, label %exit
463 ; CHECK-NEXT: ret i32 %D
469 define i8 @phi_trans4(i8* %p) {
472 %X3 = getelementptr i8* %p, i32 192
473 store i8 192, i8* %X3
475 %X = getelementptr i8* %p, i32 4
480 %i = phi i32 [4, %entry], [192, %loop]
481 %X2 = getelementptr i8* %p, i32 %i
485 ; CHECK-NEXT: %Y2 = phi i8 [ %Y, %entry ], [ 0, %loop ]
488 %cond = call i1 @cond2()
490 %Z = bitcast i8 *%X3 to i32*
492 br i1 %cond, label %loop, label %out
499 define i8 @phi_trans5(i8* %p) {
503 %X4 = getelementptr i8* %p, i32 2
506 %X = getelementptr i8* %p, i32 4
511 %i = phi i32 [4, %entry], [3, %cont]
512 %X2 = getelementptr i8* %p, i32 %i
513 %Y2 = load i8* %X2 ; Ensure this load is not being incorrectly replaced.
514 %cond = call i1 @cond2()
515 br i1 %cond, label %cont, label %out
518 %Z = getelementptr i8* %X2, i32 -1
519 %Z2 = bitcast i8 *%Z to i32*
520 store i32 50462976, i32* %Z2 ;; (1 << 8) | (2 << 16) | (3 << 24)
524 ; CHECK-NEXT: getelementptr i8* %p, i32 3
525 ; CHECK-NEXT: load i8*
535 define i32 @memset_to_load() nounwind readnone {
537 %x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
538 %tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
539 call void @llvm.memset.i64(i8* %tmp, i8 0, i64 1024, i32 4)
540 %arraydecay = getelementptr inbounds [256 x i32]* %x, i32 0, i32 0 ; <i32*>
541 %tmp1 = load i32* %arraydecay ; <i32> [#uses=1]
543 ; CHECK: @memset_to_load