1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
2 ; RUN: opt < %s -passes=inline -S | FileCheck %s
3 ; RUN: opt < %s -passes='cgscc(inline)' -S | FileCheck %s
5 ; The verifier does catch problems with inlining of byval arguments that has a
6 ; different address space compared to the alloca. But running instcombine
7 ; after inline used to trigger asserts unless we disallow such inlining.
8 ; RUN: opt < %s -passes=inline,instcombine -disable-output 2>/dev/null
10 target datalayout = "p:32:32-p1:64:64-p2:16:16-n16:32:64"
12 ; Inlining a byval struct should cause an explicit copy into an alloca.
14 %struct.ss = type { i32, i64 }
15 @.str = internal constant [10 x i8] c"%d, %lld\0A\00" ; <ptr> [#uses=1]
17 define internal void @f(ptr byval(%struct.ss) %b) nounwind {
19 %tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0 ; <ptr> [#uses=2]
20 %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1]
21 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
22 store i32 %tmp2, ptr %tmp, align 4
26 declare i32 @printf(ptr, ...) nounwind
28 define i32 @test1() nounwind {
29 ; CHECK-LABEL: define i32 @test1(
30 ; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
32 ; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
33 ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 8
34 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 0
35 ; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8
36 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1
37 ; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4
38 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
39 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1]], ptr align 1 [[S]], i64 12, i1 false)
40 ; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S1]], align 4
41 ; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1
42 ; CHECK-NEXT: store i32 [[TMP2_I]], ptr [[S1]], align 4
43 ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
44 ; CHECK-NEXT: ret i32 0
47 %S = alloca %struct.ss ; <ptr> [#uses=4]
48 %tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0 ; <ptr> [#uses=1]
49 store i32 1, ptr %tmp1, align 8
50 %tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1 ; <ptr> [#uses=1]
51 store i64 2, ptr %tmp4, align 4
52 call void @f(ptr byval(%struct.ss) %S) nounwind
56 ; Inlining a byval struct should NOT cause an explicit copy
57 ; into an alloca if the function is readonly
59 define internal i32 @f2(ptr byval(%struct.ss) %b) nounwind readonly {
61 %tmp = getelementptr %struct.ss, ptr %b, i32 0, i32 0 ; <ptr> [#uses=2]
62 %tmp1 = load i32, ptr %tmp, align 4 ; <i32> [#uses=1]
63 %tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
67 define i32 @test2() nounwind {
68 ; CHECK-LABEL: define i32 @test2(
69 ; CHECK-SAME: ) #[[ATTR0]] {
71 ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]], align 8
72 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 0
73 ; CHECK-NEXT: store i32 1, ptr [[TMP1]], align 8
74 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr [[STRUCT_SS]], ptr [[S]], i32 0, i32 1
75 ; CHECK-NEXT: store i64 2, ptr [[TMP4]], align 4
76 ; CHECK-NEXT: [[TMP1_I:%.*]] = load i32, ptr [[S]], align 4
77 ; CHECK-NEXT: [[TMP2_I:%.*]] = add i32 [[TMP1_I]], 1
78 ; CHECK-NEXT: ret i32 [[TMP2_I]]
81 %S = alloca %struct.ss ; <ptr> [#uses=4]
82 %tmp1 = getelementptr %struct.ss, ptr %S, i32 0, i32 0 ; <ptr> [#uses=1]
83 store i32 1, ptr %tmp1, align 8
84 %tmp4 = getelementptr %struct.ss, ptr %S, i32 0, i32 1 ; <ptr> [#uses=1]
85 store i64 2, ptr %tmp4, align 4
86 %X = call i32 @f2(ptr byval(%struct.ss) %S) nounwind
91 ; Inlining a byval with an explicit alignment needs to use *at least* that
92 ; alignment on the generated alloca.
94 declare void @g3(ptr %p)
96 define internal void @f3(ptr byval(%struct.ss) align 64 %b) nounwind {
97 call void @g3(ptr %b) ;; Could make alignment assumptions!
101 define void @test3() nounwind {
102 ; CHECK-LABEL: define void @test3(
103 ; CHECK-SAME: ) #[[ATTR0]] {
105 ; CHECK-NEXT: [[S1:%.*]] = alloca [[STRUCT_SS:%.*]], align 64
106 ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS]], align 1
107 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 12, ptr [[S1]])
108 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[S1]], ptr align 1 [[S]], i64 12, i1 false)
109 ; CHECK-NEXT: call void @g3(ptr align 64 [[S1]]) #[[ATTR0]]
110 ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 12, ptr [[S1]])
111 ; CHECK-NEXT: ret void
114 %S = alloca %struct.ss, align 1 ;; May not be aligned.
115 call void @f3(ptr byval(%struct.ss) align 64 %S) nounwind
120 ; Inlining a byval struct should NOT cause an explicit copy
121 ; into an alloca if the function is readonly, but should increase an alloca's
122 ; alignment to satisfy an explicit alignment request.
124 define internal i32 @f4(ptr byval(%struct.ss) align 64 %b) nounwind readonly {
125 call void @g3(ptr %b)
129 define i32 @test4() nounwind {
130 ; CHECK-LABEL: define i32 @test4(
131 ; CHECK-SAME: ) #[[ATTR0]] {
133 ; CHECK-NEXT: [[S:%.*]] = alloca [[STRUCT_SS:%.*]], align 64
134 ; CHECK-NEXT: call void @g3(ptr align 64 [[S]]) #[[ATTR0]]
135 ; CHECK-NEXT: ret i32 4
138 %S = alloca %struct.ss, align 2 ; <ptr> [#uses=4]
139 %X = call i32 @f4(ptr byval(%struct.ss) align 64 %S) nounwind
143 %struct.S0 = type { i32 }
145 @b = global %struct.S0 { i32 1 }, align 4
146 @a = common global i32 0, align 4
148 define internal void @f5(ptr byval(%struct.S0) nocapture readonly align 4 %p) {
150 store i32 0, ptr @b, align 4
151 %0 = load i32, ptr %p, align 4
152 store i32 %0, ptr @a, align 4
156 define i32 @test5() {
157 ; CHECK-LABEL: define i32 @test5() {
159 ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_S0:%.*]], align 8
160 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[B]])
161 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[B]], ptr align 1 @b, i64 4, i1 false)
162 ; CHECK-NEXT: store i32 0, ptr @b, align 4
163 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[B]], align 4
164 ; CHECK-NEXT: store i32 [[TMP0]], ptr @a, align 4
165 ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[B]])
166 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4
167 ; CHECK-NEXT: ret i32 [[TMP1]]
170 tail call void @f5(ptr byval(%struct.S0) align 4 @b)
171 %0 = load i32, ptr @a, align 4
175 ; Inlining a byval struct that is in a different address space compared to the
176 ; alloca address space is at the moment not expected. That would need
177 ; adjustments inside the inlined function since the address space attribute of
178 ; the inlined argument changes.
180 %struct.S1 = type { i32 }
182 @d = addrspace(1) global %struct.S1 { i32 1 }, align 4
183 @c = common addrspace(1) global i32 0, align 4
185 define internal void @f5_as1(ptr addrspace(1) byval(%struct.S1) nocapture readonly align 4 %p) {
186 ; CHECK-LABEL: define internal void @f5_as1(
187 ; CHECK-SAME: ptr addrspace(1) nocapture readonly byval([[STRUCT_S1:%.*]]) align 4 [[P:%.*]]) {
189 ; CHECK-NEXT: store i32 0, ptr addrspace(1) @d, align 4
190 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) [[P]], align 4
191 ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) @c, align 4
192 ; CHECK-NEXT: ret void
195 store i32 0, ptr addrspace(1) @d, align 4
196 %0 = load i32, ptr addrspace(1) %p, align 4
197 store i32 %0, ptr addrspace(1) @c, align 4
201 define i32 @test5_as1() {
202 ; CHECK-LABEL: define i32 @test5_as1() {
204 ; CHECK-NEXT: tail call void @f5_as1(ptr addrspace(1) byval([[STRUCT_S1:%.*]]) align 4 @d)
205 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(1) @c, align 4
206 ; CHECK-NEXT: ret i32 [[TMP0]]
209 tail call void @f5_as1(ptr addrspace(1) byval(%struct.S1) align 4 @d)
210 %0 = load i32, ptr addrspace(1) @c, align 4