1 ; RUN: opt -print-memderefs -analyze -S < %s -enable-new-pm=0 -use-dereferenceable-at-point-semantics=0 | FileCheck %s --check-prefixes=CHECK,GLOBAL
2 ; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=0 2>&1 | FileCheck %s --check-prefixes=CHECK,GLOBAL
3 ; RUN: opt -print-memderefs -analyze -S < %s -enable-new-pm=0 -use-dereferenceable-at-point-semantics=1 | FileCheck %s --check-prefixes=CHECK,POINT
4 ; RUN: opt -passes=print-memderefs -S < %s -disable-output -use-dereferenceable-at-point-semantics=1 2>&1 | FileCheck %s --check-prefixes=CHECK,POINT
7 ; Uses the print-deref (+ analyze to print) pass to run
8 ; isDereferenceablePointer() on many load instruction operands
10 target datalayout = "e-i32:32:64"
12 %TypeOpaque = type opaque
14 declare zeroext i1 @return_i1()
17 @globalstr = global [6 x i8] c"hello\00"
18 @globali32ptr = external global i32*
20 %struct.A = type { [8 x i8], [5 x i8] }
21 @globalstruct = external global %struct.A
23 @globalptr.align1 = external global i8, align 1
24 @globalptr.align16 = external global i8, align 16
26 ; Loads from sret arguments
27 ; CHECK-LABEL: 'test_sret'
28 ; CHECK: %sret_gep{{.*}}(aligned)
29 ; CHECK-NOT: %sret_gep_outside
30 define void @test_sret(%struct.A* sret(%struct.A) %result) {
31 %sret_gep = getelementptr inbounds %struct.A, %struct.A* %result, i64 0, i32 1, i64 2
32 load i8, i8* %sret_gep
34 %sret_gep_outside = getelementptr %struct.A, %struct.A* %result, i64 0, i32 1, i64 7
35 load i8, i8* %sret_gep_outside
40 define void @test(i32 addrspace(1)* dereferenceable(8) %dparam,
41 i8 addrspace(1)* dereferenceable(32) align 1 %dparam.align1,
42 i8 addrspace(1)* dereferenceable(32) align 16 %dparam.align16)
43 gc "statepoint-example" {
44 ; CHECK: The following are dereferenceable:
48 ; GLOBAL: %dparam{{.*}}(unaligned)
49 ; POINT-NOT: %dparam{{.*}}(unaligned)
50 %load3 = load i32, i32 addrspace(1)* %dparam
52 ; GLOBAL: %relocate{{.*}}(unaligned)
53 ; POINT-NOT: %relocate{{.*}}(unaligned)
54 %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0) ["gc-live" (i32 addrspace(1)* %dparam)]
55 %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 0, i32 0)
56 %load4 = load i32, i32 addrspace(1)* %relocate
59 %dpa = call i32 addrspace(1)* @func1(i32 addrspace(1)* %dparam)
60 %nparam = getelementptr i32, i32 addrspace(1)* %dpa, i32 5
61 %load5 = load i32, i32 addrspace(1)* %nparam
63 ; Load from a non-dereferenceable load
65 %nd_load = load i32*, i32** @globali32ptr
66 %load6 = load i32, i32* %nd_load
68 ; Load from a dereferenceable load
69 ; GLOBAL: %d4_load{{.*}}(unaligned)
70 ; POINT-NOT: %d4_load{{.*}}(unaligned)
71 %d4_load = load i32*, i32** @globali32ptr, !dereferenceable !0
72 %load7 = load i32, i32* %d4_load
74 ; Load from an offset not covered by the dereferenceable portion
76 %d2_load = load i32*, i32** @globali32ptr, !dereferenceable !1
77 %load8 = load i32, i32* %d2_load
79 ; Load from a potentially null pointer with dereferenceable_or_null
80 ; CHECK-NOT: %d_or_null_load
81 %d_or_null_load = load i32*, i32** @globali32ptr, !dereferenceable_or_null !0
82 %load9 = load i32, i32* %d_or_null_load
84 ; Load from a non-null pointer with dereferenceable_or_null
85 ; GLOBAL: %d_or_null_non_null_load{{.*}}(unaligned)
86 ; POINT-NOT: %d_or_null_non_null_load{{.*}}(unaligned)
87 %d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
88 %load10 = load i32, i32* %d_or_null_non_null_load
90 ; Loads from aligned arguments
91 ; GLOBAL: %dparam.align1{{.*}}(unaligned)
92 ; POINT-NOT: %dparam.align1{{.*}}(unaligned)
93 ; POINT-NOT: %dparam.align16{{.*}}(aligned)
94 ; GLOBAL: %dparam.align16{{.*}}(aligned)
95 %load15 = load i8, i8 addrspace(1)* %dparam.align1, align 16
96 %load16 = load i8, i8 addrspace(1)* %dparam.align16, align 16
99 ; GLOBAL: %gep.align1.offset1{{.*}}(unaligned)
100 ; GLOBAL: %gep.align16.offset1{{.*}}(unaligned)
101 ; GLOBAL: %gep.align1.offset16{{.*}}(unaligned)
102 ; GLOBAL: %gep.align16.offset16{{.*}}(aligned)
103 ; POINT-NOT: %gep.align1.offset1{{.*}}(unaligned)
104 ; POINT-NOT: %gep.align16.offset1{{.*}}(unaligned)
105 ; POINT-NOT: %gep.align1.offset16{{.*}}(unaligned)
106 ; POINT-NOT: %gep.align16.offset16{{.*}}(aligned)
107 %gep.align1.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 1
108 %gep.align16.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 1
109 %gep.align1.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 16
110 %gep.align16.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 16
111 %load19 = load i8, i8 addrspace(1)* %gep.align1.offset1, align 16
112 %load20 = load i8, i8 addrspace(1)* %gep.align16.offset1, align 16
113 %load21 = load i8, i8 addrspace(1)* %gep.align1.offset16, align 16
114 %load22 = load i8, i8 addrspace(1)* %gep.align16.offset16, align 16
116 ; CHECK-NOT: %no_deref_return
117 ; GLOBAL: %deref_return{{.*}}(unaligned)
118 ; GLOBAL: %deref_and_aligned_return{{.*}}(aligned)
119 ; POINT-NOT: %deref_return{{.*}}(unaligned)
120 ; POINT-NOT: %deref_and_aligned_return{{.*}}(aligned)
121 %no_deref_return = call i32* @foo()
122 %deref_return = call dereferenceable(32) i32* @foo()
123 %deref_and_aligned_return = call dereferenceable(32) align 16 i32* @foo()
124 %load23 = load i32, i32* %no_deref_return
125 %load24 = load i32, i32* %deref_return, align 16
126 %load25 = load i32, i32* %deref_and_aligned_return, align 16
128 ; Load from a dereferenceable and aligned load
129 ; GLOBAL: %d4_unaligned_load{{.*}}(unaligned)
130 ; GLOBAL: %d4_aligned_load{{.*}}(aligned)
131 ; POINT-NOT: %d4_unaligned_load{{.*}}(unaligned)
132 ; POINT-NOT: %d4_aligned_load{{.*}}(aligned)
133 %d4_unaligned_load = load i32*, i32** @globali32ptr, !dereferenceable !0
134 %d4_aligned_load = load i32*, i32** @globali32ptr, !dereferenceable !0, !align !{i64 16}
135 %load26 = load i32, i32* %d4_unaligned_load, align 16
136 %load27 = load i32, i32* %d4_aligned_load, align 16
140 ; Loads from aligned allocas
141 ; CHECK-LABEL: 'alloca_aligned'
142 ; CHECK: %alloca.align1{{.*}}(unaligned)
143 ; CHECK: %alloca.align16{{.*}}(aligned)
144 define void @alloca_aligned() {
145 %alloca.align1 = alloca i1, align 1
146 %alloca.align16 = alloca i1, align 16
148 %load17 = load i1, i1* %alloca.align1, align 16
149 %load18 = load i1, i1* %alloca.align16, align 16
153 ; CHECK-LABEL: 'alloca_basic'
154 ; CHECK: %alloca{{.*}}(aligned)
155 define void @alloca_basic() {
158 %load2 = load i1, i1* %alloca
162 ; Load from empty array alloca
163 ; CHECK-LABEL: 'alloca_empty'
164 ; CHECK-NOT: %empty_alloca
165 define void @alloca_empty() {
166 %empty_alloca = alloca i8, i64 0
168 %empty_load = load i8, i8* %empty_alloca
172 ; Alloca with no explicit alignment is aligned to preferred alignment of
173 ; the type (specified by datalayout string).
174 ; CHECK-LABEL: 'alloca_perfalign'
175 ; CHECK: %alloca.noalign{{.*}}(aligned)
176 define void @alloca_perfalign() {
177 %alloca.noalign = alloca i32
179 %load28 = load i32, i32* %alloca.noalign, align 8
183 ; CHECK-LABEL: 'global'
184 ; CHECK: @globalptr.align1{{.*}}(unaligned)
185 ; CHECK: @globalptr.align16{{.*}}(aligned)
186 ; CHECK: %globalptr{{.*}}(aligned)
187 define void @global() {
188 %load13 = load i8, i8* @globalptr.align1, align 16
189 %load14 = load i8, i8* @globalptr.align16, align 16
191 %globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
192 %load1 = load i8, i8* %globalptr
196 ; It's OK to overrun static array size as long as we stay within underlying
198 ; CHECK-LABEL: 'global_allocationsize'
199 ; CHECK: %within_allocation{{.*}}(aligned)
200 ; CHECK-NOT: %outside_allocation
201 define void @global_allocationsize() {
202 %within_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 0, i64 10
203 %load11 = load i8, i8* %within_allocation
205 %outside_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 1, i64 10
206 %load12 = load i8, i8* %outside_allocation
210 ; Loads from byval arguments
211 ; CHECK-LABEL: 'byval'
212 ; CHECK: %i8_byval{{.*}}(aligned)
213 ; CHECK-NOT: %bad_byval_cast
214 ; CHECK: %byval_gep{{.*}}(aligned)
215 ; CHECK: %good_byval_cast{{.*}}(unaligned)
216 define void @byval(i8* byval(i8) %i8_byval,
217 %struct.A* byval(%struct.A) %A_byval) {
219 load i8, i8* %i8_byval
221 %bad_byval_cast = bitcast i8* %i8_byval to i32*
222 load i32, i32* %bad_byval_cast
224 %byval_gep = getelementptr inbounds %struct.A, %struct.A* %A_byval, i64 0, i32 1, i64 2
225 load i8, i8* %byval_gep
226 %good_byval_cast = bitcast %struct.A* %A_byval to i32*
227 load i32, i32* %good_byval_cast
233 ; GLOBAL: %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
234 ; POINT-NOT: %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
235 define i32 @f_0(i32 %val) {
236 %ptr = inttoptr i32 %val to i32*, !dereferenceable !0
238 %load29 = load i32, i32* %ptr, align 8
243 ; The most basic case showing the difference between legacy global deref
244 ; attribute semantics and the new point-in-time semantics.
245 ; CHECK-LABEL: 'negative'
248 define void @negative(i32* dereferenceable(8) %p) {
250 %v = load i32, i32* %p
254 ; CHECK-LABEL: 'infer_func_attrs1'
256 define void @infer_func_attrs1(i32* dereferenceable(8) %p) nofree nosync {
258 %v = load i32, i32* %p
262 ; CHECK-LABEL: 'infer_func_attrs2'
265 ; FIXME: Can be inferred from attributes
266 define void @infer_func_attrs2(i32* dereferenceable(8) %p) readonly {
268 %v = load i32, i32* %p
272 ; CHECK-LABEL: 'infer_noalias1'
275 ; FIXME: Can be inferred from attributes
276 define void @infer_noalias1(i32* dereferenceable(8) noalias nofree %p) {
278 %v = load i32, i32* %p
282 ; CHECK-LABEL: 'infer_noalias2'
285 ; FIXME: Can be inferred from attributes
286 define void @infer_noalias2(i32* dereferenceable(8) noalias readonly %p) nosync {
288 %v = load i32, i32* %p
293 ; Just check that we don't crash.
294 ; CHECK-LABEL: 'opaque_type_crasher'
295 define void @opaque_type_crasher(%TypeOpaque* dereferenceable(16) %a) {
297 %bc = bitcast %TypeOpaque* %a to i8*
298 %ptr8 = getelementptr inbounds i8, i8* %bc, i32 8
299 %ptr32 = bitcast i8* %ptr8 to i32*
300 br i1 undef, label %if.then, label %if.end
303 %res = load i32, i32* %ptr32, align 4
310 declare token @llvm.experimental.gc.statepoint.p0f_i1f(i64, i32, i1 ()*, i32, i32, ...)
311 declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token, i32, i32)
313 declare i32 addrspace(1)* @func1(i32 addrspace(1)* returned) nounwind argmemonly
315 ; Can free any object accessible in memory
316 declare void @mayfree()