1 ; This test contains extremely tricky call graph structures for the inliner to
2 ; handle correctly. They form cycles where the inliner introduces code that is
3 ; immediately or can eventually be transformed back into the original code. And
4 ; each step changes the call graph and so will trigger iteration. This requires
5 ; some out-of-band way to prevent infinitely re-inlining and re-transforming the
8 ; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -inline-threshold=50 -S | FileCheck %s
11 ; The `test1_*` collection of functions form a directly cycling pattern.
13 define void @test1_a(i8** %ptr) {
14 ; CHECK-LABEL: define void @test1_a(
16 call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 0)
17 ; Inlining and simplifying this call will reliably produce the exact same call,
18 ; over and over again. However, each inlining increments the count, and so we
19 ; expect this test case to stop after one round of inlining with a final
22 ; CHECK: call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 1)
28 define void @test1_b(i8* %arg, i1 %flag, i32 %inline_count) {
29 ; CHECK-LABEL: define void @test1_b(
32 store i8* %arg, i8** %a
33 ; This alloca and store should remain through any optimization.
34 ; CHECK: %[[A:.*]] = alloca
35 ; CHECK: store i8* %arg, i8** %[[A]]
37 br i1 %flag, label %bb1, label %bb2
40 call void @test1_a(i8** %a) noinline
44 %cast = bitcast i8** %a to void (i8*, i1, i32)**
45 %p = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %cast
46 %inline_count_inc = add i32 %inline_count, 1
47 call void %p(i8* %arg, i1 %flag, i32 %inline_count_inc)
48 ; And we should continue to load and call indirectly through optimization.
49 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i1, i32)**
50 ; CHECK: %[[P:.*]] = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %[[CAST]]
51 ; CHECK: call void %[[P]](
56 define void @test2_a(i8** %ptr) {
57 ; CHECK-LABEL: define void @test2_a(
59 call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 0)
60 ; Inlining and simplifying this call will reliably produce the exact same call,
61 ; but only after doing two rounds if inlining, first from @test2_b then
62 ; @test2_c. We check the exact number of inlining rounds before we cut off to
63 ; break the cycle by inspecting the last paramater that gets incremented with
64 ; each inlined function body.
66 ; CHECK: call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 2)
71 define void @test2_b(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
72 ; CHECK-LABEL: define void @test2_b(
75 store i8* %arg2, i8** %a
76 ; This alloca and store should remain through any optimization.
77 ; CHECK: %[[A:.*]] = alloca
78 ; CHECK: store i8* %arg2, i8** %[[A]]
80 br i1 %flag, label %bb1, label %bb2
83 call void @test2_a(i8** %a) noinline
87 %p = load i8*, i8** %a
88 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
89 %inline_count_inc = add i32 %inline_count, 1
90 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
91 ; And we should continue to load and call indirectly through optimization.
92 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
93 ; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
94 ; CHECK: call void %[[P]](
99 define void @test2_c(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
100 ; CHECK-LABEL: define void @test2_c(
103 store i8* %arg1, i8** %a
104 ; This alloca and store should remain through any optimization.
105 ; CHECK: %[[A:.*]] = alloca
106 ; CHECK: store i8* %arg1, i8** %[[A]]
108 br i1 %flag, label %bb1, label %bb2
111 call void @test2_a(i8** %a) noinline
115 %p = load i8*, i8** %a
116 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
117 %inline_count_inc = add i32 %inline_count, 1
118 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
119 ; And we should continue to load and call indirectly through optimization.
120 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
121 ; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
122 ; CHECK: call void %[[P]](
127 ; Another infinite inlining case. The initial callgraph is like following:
129 ; test3_a <---> test3_b
132 ; test3_c <---> test3_d
134 ; For all the call edges in the call graph, only test3_c and test3_d can be
135 ; inlined into test3_a, and no other call edge can be inlined.
137 ; After test3_c is inlined into test3_a, the original call edge test3_a->test3_c
138 ; will be removed, a new call edge will be added and the call graph becomes:
140 ; test3_a <---> test3_b
143 ; test3_c <---> test3_d
144 ; But test3_a, test3_b, test3_c and test3_d still belong to the same SCC.
146 ; Then after test3_a->test3_d is inlined, when test3_a->test3_d is converted to
147 ; a ref edge, the original SCC will be split into two: {test3_c, test3_d} and
148 ; {test3_a, test3_b}, immediately after the newly added ref edge
149 ; test3_a->test3_c will be converted to a call edge, and the two SCCs will be
150 ; merged into the original one again. During this cycle, the original SCC will
151 ; be added into UR.CWorklist again and this creates an infinite loop.
156 define void @test3_c(i32 %i) {
158 %cmp = icmp eq i32 %i, 5
159 br i1 %cmp, label %if.end, label %if.then
161 if.then: ; preds = %entry
162 %call = tail call i64 @random()
163 %t0 = load i64, i64* @a
164 %add = add nsw i64 %t0, %call
165 store i64 %add, i64* @a
168 if.end: ; preds = %entry, %if.then
169 tail call void @test3_d(i32 %i)
170 %t6 = load i64, i64* @a
171 %add85 = add nsw i64 %t6, 1
172 store i64 %add85, i64* @a
176 declare i64 @random()
178 define void @test3_d(i32 %i) {
180 %cmp = icmp eq i32 %i, 5
181 br i1 %cmp, label %if.end, label %if.then
183 if.then: ; preds = %entry
184 %call = tail call i64 @random()
185 %t0 = load i64, i64* @a
186 %add = add nsw i64 %t0, %call
187 store i64 %add, i64* @a
190 if.end: ; preds = %entry, %if.then
191 tail call void @test3_c(i32 %i)
192 tail call void @test3_b()
193 %t6 = load i64, i64* @a
194 %add79 = add nsw i64 %t6, 3
195 store i64 %add79, i64* @a
199 ; Function Attrs: noinline
200 define void @test3_b() #0 {
202 tail call void @test3_a()
203 %t0 = load i64, i64* @a
204 %add = add nsw i64 %t0, 2
205 store i64 %add, i64* @a
209 ; Check test3_c is inlined into test3_a once and only once.
210 ; CHECK-LABEL: @test3_a(
211 ; CHECK: tail call void @test3_b()
212 ; CHECK-NEXT: tail call void @test3_d(i32 5)
213 ; CHECK-NEXT: %[[LD1:.*]] = load i64, i64* @a
214 ; CHECK-NEXT: %[[ADD1:.*]] = add nsw i64 %[[LD1]], 1
215 ; CHECK-NEXT: store i64 %[[ADD1]], i64* @a
216 ; CHECK-NEXT: %[[LD2:.*]] = load i64, i64* @b
217 ; CHECK-NEXT: %[[ADD2:.*]] = add nsw i64 %[[LD2]], 5
218 ; CHECK-NEXT: store i64 %[[ADD2]], i64* @b
219 ; CHECK-NEXT: ret void
221 ; Function Attrs: noinline
222 define void @test3_a() #0 {
224 tail call void @test3_b()
225 tail call void @test3_c(i32 5)
226 %t0 = load i64, i64* @b
227 %add = add nsw i64 %t0, 5
228 store i64 %add, i64* @b
232 attributes #0 = { noinline }