1 ; This test contains extremely tricky call graph structures for the inliner to
2 ; handle correctly. They form cycles where the inliner introduces code that is
3 ; immediately or can eventually be transformed back into the original code. And
4 ; each step changes the call graph and so will trigger iteration. This requires
5 ; some out-of-band way to prevent infinitely re-inlining and re-transforming the
8 ; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -S | FileCheck %s
11 ; The `test1_*` collection of functions form a directly cycling pattern.
13 define void @test1_a(i8** %ptr) {
14 ; CHECK-LABEL: define void @test1_a(
16 call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 0)
17 ; Inlining and simplifying this call will reliably produce the exact same call,
18 ; over and over again. However, each inlining increments the count, and so we
19 ; expect this test case to stop after one round of inlining with a final
22 ; CHECK: call void @test1_b(i8* bitcast (void (i8*, i1, i32)* @test1_b to i8*), i1 false, i32 1)
28 define void @test1_b(i8* %arg, i1 %flag, i32 %inline_count) {
29 ; CHECK-LABEL: define void @test1_b(
32 store i8* %arg, i8** %a
33 ; This alloca and store should remain through any optimization.
34 ; CHECK: %[[A:.*]] = alloca
35 ; CHECK: store i8* %arg, i8** %[[A]]
37 br i1 %flag, label %bb1, label %bb2
40 call void @test1_a(i8** %a) noinline
44 %cast = bitcast i8** %a to void (i8*, i1, i32)**
45 %p = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %cast
46 %inline_count_inc = add i32 %inline_count, 1
47 call void %p(i8* %arg, i1 %flag, i32 %inline_count_inc)
48 ; And we should continue to load and call indirectly through optimization.
49 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i1, i32)**
50 ; CHECK: %[[P:.*]] = load void (i8*, i1, i32)*, void (i8*, i1, i32)** %[[CAST]]
51 ; CHECK: call void %[[P]](
56 define void @test2_a(i8** %ptr) {
57 ; CHECK-LABEL: define void @test2_a(
59 call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 0)
60 ; Inlining and simplifying this call will reliably produce the exact same call,
61 ; but only after doing two rounds if inlining, first from @test2_b then
62 ; @test2_c. We check the exact number of inlining rounds before we cut off to
63 ; break the cycle by inspecting the last paramater that gets incremented with
64 ; each inlined function body.
66 ; CHECK: call void @test2_b(i8* bitcast (void (i8*, i8*, i1, i32)* @test2_b to i8*), i8* bitcast (void (i8*, i8*, i1, i32)* @test2_c to i8*), i1 false, i32 2)
71 define void @test2_b(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
72 ; CHECK-LABEL: define void @test2_b(
75 store i8* %arg2, i8** %a
76 ; This alloca and store should remain through any optimization.
77 ; CHECK: %[[A:.*]] = alloca
78 ; CHECK: store i8* %arg2, i8** %[[A]]
80 br i1 %flag, label %bb1, label %bb2
83 call void @test2_a(i8** %a) noinline
87 %p = load i8*, i8** %a
88 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
89 %inline_count_inc = add i32 %inline_count, 1
90 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
91 ; And we should continue to load and call indirectly through optimization.
92 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
93 ; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
94 ; CHECK: call void %[[P]](
99 define void @test2_c(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count) {
100 ; CHECK-LABEL: define void @test2_c(
103 store i8* %arg1, i8** %a
104 ; This alloca and store should remain through any optimization.
105 ; CHECK: %[[A:.*]] = alloca
106 ; CHECK: store i8* %arg1, i8** %[[A]]
108 br i1 %flag, label %bb1, label %bb2
111 call void @test2_a(i8** %a) noinline
115 %p = load i8*, i8** %a
116 %cast = bitcast i8* %p to void (i8*, i8*, i1, i32)*
117 %inline_count_inc = add i32 %inline_count, 1
118 call void %cast(i8* %arg1, i8* %arg2, i1 %flag, i32 %inline_count_inc)
119 ; And we should continue to load and call indirectly through optimization.
120 ; CHECK: %[[CAST:.*]] = bitcast i8** %[[A]] to void (i8*, i8*, i1, i32)**
121 ; CHECK: %[[P:.*]] = load void (i8*, i8*, i1, i32)*, void (i8*, i8*, i1, i32)** %[[CAST]]
122 ; CHECK: call void %[[P]](