1 ; REQUIRES: x86-registered-target
3 ; Test devirtualization through the thin link and backend, ensuring that
4 ; it is only applied when the type test corresponding to a devirtualization
5 ; dominates an indirect call using the same vtable pointer. Indirect
6 ; call promotion and inlining may introduce a guarded indirect call
7 ; that can be promoted, which uses the same vtable address as the fallback
8 ; indirect call that cannot be devirtualized.
10 ; The code below illustrates the structure when we started with code like:
14 ; virtual int foo() { return 1; }
15 ; virtual int bar() { return 1; }
17 ; class B : public A {
24 ; return a->foo(); // ICP profile says most calls are to B::foo()
31 ; After the compile step, which will perform ICP and a round of inlining, we
32 ; have something like:
34 ; if (&a->foo() == B::foo())
35 ; return ((B*)a)->bar(); // Inlined from promoted direct call to B::foo()
39 ; The inlined code seqence will have a type test against "_ZTS1B",
40 ; which will allow us to devirtualize indirect call ((B*)a)->bar() to B::bar();
41 ; Both that type test and the one for the fallback a->foo() indirect call
42 ; will use the same vtable pointer. Without a dominance check, we could
43 ; incorrectly devirtualize a->foo() to B::foo();
45 ; RUN: opt -thinlto-bc -thinlto-split-lto-unit -o %t.o %s
48 ; RUN: llvm-lto2 run %t.o -save-temps -pass-remarks=. \
50 ; RUN: -r=%t.o,_Z3bazP1A,px \
51 ; RUN: -r=%t.o,_ZN1A3fooEv, \
52 ; RUN: -r=%t.o,_ZN1A3barEv, \
53 ; RUN: -r=%t.o,_ZN1B3fooEv, \
54 ; RUN: -r=%t.o,_ZN1B3barEv, \
55 ; RUN: -r=%t.o,_ZTV1A, \
56 ; RUN: -r=%t.o,_ZTV1B, \
57 ; RUN: -r=%t.o,_ZN1A3fooEv, \
58 ; RUN: -r=%t.o,_ZN1A3barEv, \
59 ; RUN: -r=%t.o,_ZN1B3fooEv, \
60 ; RUN: -r=%t.o,_ZN1B3barEv, \
61 ; RUN: -r=%t.o,_ZTV1A,px \
62 ; RUN: -r=%t.o,_ZTV1B,px 2>&1 | FileCheck %s --check-prefix=REMARK
63 ; RUN: llvm-dis %t3.1.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR
66 ; RUN: llvm-lto2 run %t.o -save-temps -use-new-pm -pass-remarks=. \
68 ; RUN: -r=%t.o,_Z3bazP1A,px \
69 ; RUN: -r=%t.o,_ZN1A3fooEv, \
70 ; RUN: -r=%t.o,_ZN1A3barEv, \
71 ; RUN: -r=%t.o,_ZN1B3fooEv, \
72 ; RUN: -r=%t.o,_ZN1B3barEv, \
73 ; RUN: -r=%t.o,_ZTV1A, \
74 ; RUN: -r=%t.o,_ZTV1B, \
75 ; RUN: -r=%t.o,_ZN1A3fooEv, \
76 ; RUN: -r=%t.o,_ZN1A3barEv, \
77 ; RUN: -r=%t.o,_ZN1B3fooEv, \
78 ; RUN: -r=%t.o,_ZN1B3barEv, \
79 ; RUN: -r=%t.o,_ZTV1A,px \
80 ; RUN: -r=%t.o,_ZTV1B,px 2>&1 | FileCheck %s --check-prefix=REMARK
81 ; RUN: llvm-dis %t3.1.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR
83 ; We should only devirtualize the inlined call to bar().
84 ; REMARK-NOT: single-impl: devirtualized a call to _ZN1B3fooEv
85 ; REMARK: single-impl: devirtualized a call to _ZN1B3barEv
86 ; REMARK-NOT: single-impl: devirtualized a call to _ZN1B3fooEv
88 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
89 target triple = "x86_64-grtev4-linux-gnu"
91 %class.A = type { i32 (...)** }
92 %class.B = type { %class.A }
94 @_ZTV1A = linkonce_odr hidden unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* undef, i8* bitcast (i32 (%class.A*)* @_ZN1A3fooEv to i8*), i8* bitcast (i32 (%class.A*)* @_ZN1A3barEv to i8*)] }, align 8, !type !0
95 @_ZTV1B = hidden unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* undef, i8* bitcast (i32 (%class.B*)* @_ZN1B3fooEv to i8*), i8* bitcast (i32 (%class.B*)* @_ZN1B3barEv to i8*)] }, align 8, !type !0, !type !1
97 define hidden i32 @_Z3bazP1A(%class.A* %a) local_unnamed_addr {
99 %0 = bitcast %class.A* %a to i32 (%class.A*)***
100 %vtable = load i32 (%class.A*)**, i32 (%class.A*)*** %0, align 8
101 %1 = bitcast i32 (%class.A*)** %vtable to i8*
102 %2 = tail call i1 @llvm.type.test(i8* %1, metadata !"_ZTS1A")
103 tail call void @llvm.assume(i1 %2)
104 %3 = load i32 (%class.A*)*, i32 (%class.A*)** %vtable, align 8
105 ; This is the compare instruction inserted by ICP
106 %4 = icmp eq i32 (%class.A*)* %3, bitcast (i32 (%class.B*)* @_ZN1B3fooEv to i32 (%class.A*)*)
107 br i1 %4, label %if.true.direct_targ, label %if.false.orig_indirect
109 ; This block contains the promoted and inlined call to B::foo();
110 ; CHECK-IR: if.true.direct_targ: ; preds = %entry
111 if.true.direct_targ: ; preds = %entry
112 %5 = bitcast %class.A* %a to %class.B*
113 %6 = bitcast i32 (%class.A*)** %vtable to i8*
114 %7 = tail call i1 @llvm.type.test(i8* %6, metadata !"_ZTS1B")
115 tail call void @llvm.assume(i1 %7)
116 %vfn.i1 = getelementptr inbounds i32 (%class.A*)*, i32 (%class.A*)** %vtable, i64 1
117 %vfn.i = bitcast i32 (%class.A*)** %vfn.i1 to i32 (%class.B*)**
118 %8 = load i32 (%class.B*)*, i32 (%class.B*)** %vfn.i, align 8
119 ; Call to bar() can be devirtualized to call to B::bar(), since it was
120 ; inlined from B::foo() after ICP introduced the guarded promotion.
121 ; CHECK-IR: %call.i = tail call i32 @_ZN1B3barEv(%class.B* %3)
122 %call.i = tail call i32 %8(%class.B* %5)
125 ; This block contains the fallback indirect call a->foo()
126 ; CHECK-IR: if.false.orig_indirect:
127 if.false.orig_indirect: ; preds = %entry
128 ; Fallback indirect call to foo() cannot be devirtualized.
129 ; CHECK-IR: %call = tail call i32 %
130 %call = tail call i32 %3(%class.A* nonnull %a)
133 if.end.icp: ; preds = %if.false.orig_indirect, %if.true.direct_targ
134 %9 = phi i32 [ %call, %if.false.orig_indirect ], [ %call.i, %if.true.direct_targ ]
138 declare i1 @llvm.type.test(i8*, metadata)
140 declare void @llvm.assume(i1)
142 declare dso_local i32 @_ZN1B3fooEv(%class.B* %this) unnamed_addr
143 declare dso_local i32 @_ZN1B3barEv(%class.B*) unnamed_addr
144 declare dso_local i32 @_ZN1A3barEv(%class.A* %this) unnamed_addr
145 declare dso_local i32 @_ZN1A3fooEv(%class.A* %this) unnamed_addr
147 !0 = !{i64 16, !"_ZTS1A"}
148 !1 = !{i64 16, !"_ZTS1B"}