1 ; RUN: opt < %s -passes=bounds-checking -S | FileCheck %s
2 target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4 @.str = private constant [8 x i8] c"abcdefg\00" ; <[8 x i8]*>
6 @.str_as1 = private addrspace(1) constant [8 x i8] c"abcdefg\00" ; <[8 x i8] addrspace(1)*>
9 declare noalias i8* @malloc(i64) nounwind
10 declare noalias i8* @calloc(i64, i64) nounwind
11 declare noalias i8* @realloc(i8* nocapture, i64) nounwind
14 define void @f1() nounwind {
15 %1 = tail call i8* @malloc(i64 32)
16 %2 = bitcast i8* %1 to i32*
17 %idx = getelementptr inbounds i32, i32* %2, i64 2
19 store i32 3, i32* %idx, align 4
24 define void @f2() nounwind {
25 %1 = tail call i8* @malloc(i64 32)
26 %2 = bitcast i8* %1 to i32*
27 %idx = getelementptr inbounds i32, i32* %2, i64 8
29 store i32 3, i32* %idx, align 4
34 define void @f3(i64 %x) nounwind {
35 %1 = tail call i8* @calloc(i64 4, i64 %x)
36 %2 = bitcast i8* %1 to i32*
37 %idx = getelementptr inbounds i32, i32* %2, i64 8
39 ; CHECK: sub i64 {{.*}}, 32
40 ; CHECK-NEXT: icmp ult i64 {{.*}}, 32
41 ; CHECK-NEXT: icmp ult i64 {{.*}}, 4
44 store i32 3, i32* %idx, align 4
48 ; CHECK: @store_volatile
49 define void @store_volatile(i64 %x) nounwind {
50 %1 = tail call i8* @calloc(i64 4, i64 %x)
51 %2 = bitcast i8* %1 to i32*
52 %idx = getelementptr inbounds i32, i32* %2, i64 8
54 store volatile i32 3, i32* %idx, align 4
59 define void @f4(i64 %x) nounwind {
60 %1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
61 %2 = bitcast i8* %1 to i32*
62 %idx = getelementptr inbounds i32, i32* %2, i64 8
64 %3 = load i32, i32* %idx, align 4
69 define void @f5(i64 %x) nounwind {
70 %idx = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i64 0, i64 %x
72 %1 = load i8, i8* %idx, align 4
76 define void @f5_as1(i64 %x) nounwind {
78 %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
82 %1 = load i8, i8 addrspace(1)* %idx, align 4
87 define void @f6(i64 %x) nounwind {
90 %2 = load i128, i128* %1, align 4
95 define void @f7(i64 %x) nounwind {
96 %1 = alloca i128, i64 %x
99 %2 = load i128, i128* %1, align 4
104 define void @f8() nounwind {
107 %3 = select i1 undef, i128* %1, i128* %2
109 %4 = load i128, i128* %3, align 4
114 define void @f9(i128* %arg) nounwind {
116 %2 = select i1 undef, i128* %arg, i128* %1
118 %3 = load i128, i128* %2, align 4
123 define void @f10(i64 %x, i64 %y) nounwind {
124 %1 = alloca i128, i64 %x
125 %2 = alloca i128, i64 %y
126 %3 = select i1 undef, i128* %1, i128* %2
130 %4 = load i128, i128* %3, align 4
135 define void @f11(i128* byval(i128) %x) nounwind {
136 %1 = bitcast i128* %x to i8*
137 %2 = getelementptr inbounds i8, i8* %1, i64 16
139 %3 = load i8, i8* %2, align 4
144 define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind {
145 %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
146 %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
148 %3 = load i8, i8 addrspace(1)* %2, align 4
153 define i64 @f12(i64 %x, i64 %y) nounwind {
154 %1 = tail call i8* @calloc(i64 1, i64 %x)
155 ; CHECK: mul i64 %y, 8
157 %2 = bitcast i8* %1 to i64*
158 %3 = getelementptr inbounds i64, i64* %2, i64 %y
159 %4 = load i64, i64* %3, align 8
163 ; CHECK: @load_volatile
164 define i64 @load_volatile(i64 %x, i64 %y) nounwind {
165 %1 = tail call i8* @calloc(i64 1, i64 %x)
167 %2 = bitcast i8* %1 to i64*
168 %3 = getelementptr inbounds i64, i64* %2, i64 %y
169 %4 = load volatile i64, i64* %3, align 8
175 define void @f13() nounwind {
180 ; Self-refential GEPs can occur in dead code.
181 %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr, i64 1
182 ; CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr
183 %l = load i32, i32* %incdec.ptr