[AMDGPU][True16][CodeGen] true16 codegen pattern for v_med3_u/i16 (#121850)
[llvm-project.git] / clang / test / CodeGenCXX / ptrauth-member-function-pointer.cpp
blob0a9ac3fa510f56535ec0da8af0fb90ec5d1e0b6f
1 // RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -o - %s | FileCheck -check-prefixes=CHECK,NODEBUG,DARWIN %s
2 // RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -debug-info-kind=limited -o - %s | FileCheck -check-prefixes=CHECK,DARWIN %s
3 // RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 1 -o - %s | FileCheck %s -check-prefix=STACK-PROT
4 // RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 2 -o - %s | FileCheck %s -check-prefix=STACK-PROT
5 // RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 3 -o - %s | FileCheck %s -check-prefix=STACK-PROT
7 // RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -o - %s | FileCheck -check-prefixes=CHECK,NODEBUG,ELF %s
8 // RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -debug-info-kind=limited -o - %s | FileCheck -check-prefixes=CHECK,ELF %s
9 // RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 1 -o - %s | FileCheck %s -check-prefix=STACK-PROT
10 // RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 2 -o - %s | FileCheck %s -check-prefix=STACK-PROT
11 // RUN: %clang_cc1 -triple aarch64-linux-gnu -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 3 -o - %s | FileCheck %s -check-prefix=STACK-PROT
14 // CHECK: @gmethod0 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1:35591]]) to i64), i64 0 }, align 8
15 // CHECK: @gmethod1 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011nonvirtual5Ev, i32 0, i64 [[TYPEDISC0:22163]]) to i64), i64 0 }, align 8
16 // CHECK: @gmethod2 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, align 8
18 // CHECK: @__const._Z13testArrayInitv.p0 = private unnamed_addr constant [1 x { i64, i64 }] [{ i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 35591) to i64), i64 0 }], align 8
19 // CHECK: @__const._Z13testArrayInitv.p1 = private unnamed_addr constant [1 x { i64, i64 }] [{ i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 35591) to i64), i64 0 }], align 8
20 // CHECK: @__const._Z13testArrayInitv.c0 = private unnamed_addr constant %struct.Class0 { { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 35591) to i64), i64 0 } }, align 8
21 // CHECK: @__const._Z13testArrayInitv.c1 = private unnamed_addr constant %struct.Class0 { { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 35591) to i64), i64 0 } }, align 8
23 // CHECK: @_ZTV5Base0 = unnamed_addr constant { [5 x ptr] } { [5 x ptr] [ptr null, ptr @_ZTI5Base0,
24 // CHECK-SAME: ptr ptrauth (ptr @_ZN5Base08virtual1Ev, i32 0, i64 55600, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 2)),
25 // CHECK-SAME: ptr ptrauth (ptr @_ZN5Base08virtual3Ev, i32 0, i64 53007, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 3)),
26 // CHECK-SAME: ptr ptrauth (ptr @_ZN5Base016virtual_variadicEiz, i32 0, i64 7464, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 4))] }, align 8
28 typedef __SIZE_TYPE__ size_t;
30 namespace std {
31 template <typename _Ep>
32 class initializer_list {
33 const _Ep *__begin_;
34 size_t __size_;
36 initializer_list(const _Ep *__b, size_t __s);
38 } // namespace std
40 struct Base0 {
41 void nonvirtual0();
42 virtual void virtual1();
43 virtual void virtual3();
44 virtual void virtual_variadic(int, ...);
47 struct A0 {
48 int d[4];
51 struct A1 {
52 int d[8];
55 struct __attribute__((trivial_abi)) TrivialS {
56 TrivialS(const TrivialS &);
57 ~TrivialS();
58 int p[4];
61 struct Derived0 : Base0 {
62 void virtual1() override;
63 void nonvirtual5();
64 virtual void virtual6();
65 virtual A0 return_agg();
66 virtual A1 sret();
67 virtual void trivial_abi(TrivialS);
70 struct Base1 {
71 virtual void virtual7();
74 struct Derived1 : Base0, Base1 {
75 void virtual1() override;
76 void virtual7() override;
79 typedef void (Base0::*MethodTy0)();
80 typedef void (Base0::*VariadicMethodTy0)(int, ...);
81 typedef void (Derived0::*MethodTy1)();
83 struct Class0 {
84 MethodTy1 m0;
87 // CHECK: define{{.*}} void @_ZN5Base08virtual1Ev(
89 // CHECK: define{{.*}} void @_Z5test0v()
90 // CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8
91 // CHECK-NEXT: %[[VARMETHOD1:.*]] = alloca { i64, i64 }, align 8
92 // CHECK-NEXT: %[[METHOD2:.*]] = alloca { i64, i64 }, align 8
93 // CHECK-NEXT: %[[METHOD3:.*]] = alloca { i64, i64 }, align 8
94 // CHECK-NEXT: %[[METHOD4:.*]] = alloca { i64, i64 }, align 8
95 // CHECK-NEXT: %[[METHOD5:.*]] = alloca { i64, i64 }, align 8
96 // CHECK-NEXT: %[[METHOD6:.*]] = alloca { i64, i64 }, align 8
97 // CHECK-NEXT: %[[METHOD7:.*]] = alloca { i64, i64 }, align 8
98 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8
99 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8
100 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual3Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8
101 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base016virtual_variadicEiz_vfpthunk_, i32 0, i64 34368) to i64), i64 0 }, ptr %[[VARMETHOD1]], align 8
102 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8
103 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8
104 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual3Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8
105 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011nonvirtual5Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8
106 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived08virtual6Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8
107 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived010return_aggEv_vfpthunk_, i32 0, i64 64418) to i64), i64 0 }, ptr %[[METHOD3]], align 8
108 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived04sretEv_vfpthunk_, i32 0, i64 28187) to i64), i64 0 }, ptr %[[METHOD4]], align 8
109 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_, i32 0, i64 8992) to i64), i64 0 }, ptr %[[METHOD5]], align 8
110 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base18virtual7Ev_vfpthunk_, i32 0, i64 [[TYPEDISC2:61596]]) to i64), i64 0 }, ptr %[[METHOD6]], align 8
111 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived18virtual7Ev_vfpthunk_, i32 0, i64 25206) to i64), i64 0 }, ptr %[[METHOD7]], align 8
112 // CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 25206) to i64), i64 0 }, ptr %[[METHOD7]], align 8
113 // CHECK: ret void
115 // CHECK: define linkonce_odr hidden void @_ZN5Base08virtual1Ev_vfpthunk_(ptr noundef %[[THIS:.*]])
116 // CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8
117 // CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8
118 // CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
119 // CHECK-NEXT: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
120 // CHECK-NEXT: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8
121 // CHECK-NEXT: %[[V2:.*]] = ptrtoint ptr %[[VTABLE]] to i64
122 // CHECK-NEXT: %[[V3:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V2]], i32 2, i64 0)
123 // CHECK-NEXT: %[[V4:.*]] = inttoptr i64 %[[V3]] to ptr
124 // CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V4]], i64 0
125 // CHECK-NEXT: %[[V5:.*]] = load ptr, ptr %[[VFN]], align 8
126 // CHECK-NEXT: %[[V6:.*]] = ptrtoint ptr %[[VFN]] to i64
127 // CHECK-NEXT: %[[V7:.*]] = call i64 @llvm.ptrauth.blend(i64 %[[V6]], i64 55600)
128 // CHECK-NEXT: musttail call void %[[V5]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V0]]) [ "ptrauth"(i32 0, i64 %[[V7]]) ]
129 // CHECK-NEXT: ret void
131 // CHECK: define linkonce_odr hidden void @_ZN5Base08virtual3Ev_vfpthunk_(ptr noundef %{{.*}})
132 // CHECK: load ptr, ptr %{{.*}}, align 8
133 // CHECK: load ptr, ptr %{{.*}}, align 8
134 // CHECK: %[[VTABLE:.*]] = load ptr, ptr %{{.*}}, align 8
135 // CHECK: %[[V2:.*]] = ptrtoint ptr %[[VTABLE]] to i64
136 // CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V2]], i32 2, i64 0)
137 // CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to ptr
138 // CHECK: getelementptr inbounds ptr, ptr %[[V4]], i64 1
139 // CHECK: call i64 @llvm.ptrauth.blend(i64 %{{.*}}, i64 53007)
141 // CHECK: define linkonce_odr hidden void @_ZN5Base016virtual_variadicEiz_vfpthunk_(ptr noundef %[[THIS:.*]], i32 noundef %0, ...)
142 // CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8
143 // CHECK-NEXT: %[[_ADDR:.*]] = alloca i32, align 4
144 // CHECK-NEXT: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8
145 // CHECK: store i32 %0, ptr %[[_ADDR]], align 4
146 // CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
147 // CHECK-NEXT: %[[V1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
148 // CHECK-NEXT: %[[V2:.*]] = load i32, ptr %[[_ADDR]], align 4
149 // CHECK-NEXT: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8
150 // CHECK-NEXT: %[[V4:.*]] = ptrtoint ptr %[[VTABLE]] to i64
151 // CHECK-NEXT: %[[V5:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V4]], i32 2, i64 0)
152 // CHECK-NEXT: %[[V6:.*]] = inttoptr i64 %[[V5]] to ptr
153 // CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V6]], i64 2
154 // CHECK-NEXT: %[[V7:.*]] = load ptr, ptr %[[VFN]], align 8
155 // CHECK-NEXT: %[[V8:.*]] = ptrtoint ptr %[[VFN]] to i64
156 // CHECK-NEXT: %[[V9:.*]] = call i64 @llvm.ptrauth.blend(i64 %[[V8]], i64 7464)
157 // CHECK-NEXT: musttail call void (ptr, i32, ...) %[[V7]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V1]], i32 noundef %[[V2]], ...) [ "ptrauth"(i32 0, i64 %[[V9]]) ]
158 // CHECK-NEXT: ret void
160 // CHECK: define linkonce_odr hidden void @_ZN8Derived08virtual6Ev_vfpthunk_(ptr noundef %[[THIS:.*]])
161 // CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8
162 // CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8
163 // CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
164 // CHECK: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
165 // CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8
166 // CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64
167 // CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0)
168 // CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr
169 // CHECK: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V3]], i64 3
170 // CHECK: %[[V5:.*]] = ptrtoint ptr %[[VFN]] to i64
171 // CHECK: call i64 @llvm.ptrauth.blend(i64 %[[V5]], i64 55535)
173 // Check that the return value of the musttail call isn't copied to a temporary.
175 // CHECK: define linkonce_odr hidden [2 x i64] @_ZN8Derived010return_aggEv_vfpthunk_(ptr noundef %{{.*}})
176 // CHECK: %[[CALL:.*]] = musttail call [2 x i64] %{{.*}}(ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ]
177 // CHECK-NEXT: ret [2 x i64] %[[CALL]]
179 // Check that the sret pointer passed to the caller is forwarded to the musttail
180 // call.
182 // CHECK: define linkonce_odr hidden void @_ZN8Derived04sretEv_vfpthunk_(ptr dead_on_unwind noalias writable sret(%struct.A1) align 4 %[[AGG_RESULT:.*]], ptr noundef %{{.*}})
183 // CHECK: musttail call void %{{.*}}(ptr dead_on_unwind writable sret(%struct.A1) align 4 %[[AGG_RESULT]], ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ]
184 // CHECK-NEXT: ret void
186 // Check that the thunk function doesn't destruct the trivial_abi argument.
188 // CHECK: define linkonce_odr hidden void @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_(ptr noundef %{{.*}}, [2 x i64] %{{.*}})
189 // NODEBUG-NOT: call
190 // CHECK: call i64 @llvm.ptrauth.auth(
191 // NODEBUG-NOT: call
192 // CHECK: call i64 @llvm.ptrauth.blend(
193 // NODEBUG-NOT: call
194 // CHECK: musttail call void
195 // CHECK-NEXT: ret void
197 // CHECK: define linkonce_odr hidden void @_ZN5Base18virtual7Ev_vfpthunk_(ptr noundef %[[THIS:.*]])
198 // CHECK: entry:
199 // CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8
200 // CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8
201 // CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
202 // CHECK: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
203 // CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8
204 // CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64
205 // CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0)
206 // CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr
207 // CHECK: getelementptr inbounds ptr, ptr %[[V3]], i64 0
209 // CHECK: define linkonce_odr hidden void @_ZN8Derived18virtual7Ev_vfpthunk_(ptr noundef %[[THIS:.*]])
210 // CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8
211 // CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8
212 // CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
213 // CHECK: load ptr, ptr %[[THIS_ADDR]], align 8
214 // CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8
215 // CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64
216 // CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0)
217 // CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr
218 // CHECK: getelementptr inbounds ptr, ptr %[[V3]], i64 3
220 void Base0::virtual1() {}
222 void test0() {
223 MethodTy0 method0;
224 method0 = &Base0::nonvirtual0;
225 method0 = &Base0::virtual1;
226 method0 = &Base0::virtual3;
228 VariadicMethodTy0 varmethod1;
229 varmethod1 = &Base0::virtual_variadic;
231 MethodTy1 method2;
232 method2 = &Derived0::nonvirtual0;
233 method2 = &Derived0::virtual1;
234 method2 = &Derived0::virtual3;
235 method2 = &Derived0::nonvirtual5;
236 method2 = &Derived0::virtual6;
238 A0 (Derived0::*method3)();
239 method3 = &Derived0::return_agg;
241 A1 (Derived0::*method4)();
242 method4 = &Derived0::sret;
244 void (Derived0::*method5)(TrivialS);
245 method5 = &Derived0::trivial_abi;
247 void (Base1::*method6)();
248 method6 = &Base1::virtual7;
250 void (Derived1::*method7)();
251 method7 = &Derived1::virtual7;
252 method7 = &Derived1::virtual1;
255 // CHECK: define{{.*}} void @_Z5test1P5Base0MS_FvvE(ptr noundef %[[A0:.*]], [2 x i64] %[[A1_COERCE:.*]])
256 // CHECK: %[[A1:.*]] = alloca { i64, i64 }, align 8
257 // CHECK: %[[A0_ADDR:.*]] = alloca ptr, align 8
258 // CHECK: %[[A1_ADDR:.*]] = alloca { i64, i64 }, align 8
259 // CHECK: store [2 x i64] %[[A1_COERCE]], ptr %[[A1]], align 8
260 // CHECK: %[[A11:.*]] = load { i64, i64 }, ptr %[[A1]], align 8
261 // CHECK: store ptr %[[A0]], ptr %[[A0_ADDR]], align 8
262 // CHECK: store { i64, i64 } %[[A11]], ptr %[[A1_ADDR]], align 8
263 // CHECK: %[[V1:.*]] = load ptr, ptr %[[A0_ADDR]], align 8
264 // CHECK: %[[V2:.*]] = load { i64, i64 }, ptr %[[A1_ADDR]], align 8
265 // CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1
266 // CHECK: %[[MEMPTR_ADJ_SHIFTED:.*]] = ashr i64 %[[MEMPTR_ADJ]], 1
267 // CHECK: %[[V4:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 %[[MEMPTR_ADJ_SHIFTED]]
268 // CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0
269 // CHECK: %[[V5:.*]] = and i64 %[[MEMPTR_ADJ]], 1
270 // CHECK: %[[MEMPTR_ISVIRTUAL:.*]] = icmp ne i64 %[[V5]], 0
271 // CHECK: br i1 %[[MEMPTR_ISVIRTUAL]]
273 // CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[V4]], align 8
274 // CHECK: %[[V7:.*]] = ptrtoint ptr %[[VTABLE]] to i64
275 // CHECK: %[[V8:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V7]], i32 2, i64 0)
276 // CHECK: %[[V9:.*]] = inttoptr i64 %[[V8]] to ptr
277 // DARWIN: %[[V10:.*]] = trunc i64 %[[MEMPTR_PTR]] to i32
278 // DARWIN: %[[V11:.*]] = zext i32 %[[V10]] to i64
279 // DARWIN: %[[V12:.*]] = getelementptr i8, ptr %[[V9]], i64 %[[V11]]
280 // ELF: %[[V12:.*]] = getelementptr i8, ptr %[[V9]], i64 %[[MEMPTR_PTR]]
281 // CHECK: %[[MEMPTR_VIRTUALFN:.*]] = load ptr, ptr %[[V12]], align 8
282 // CHECK: br
284 // CHECK: %[[MEMPTR_NONVIRTUALFN:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to ptr
285 // CHECK: br
287 // CHECK: %[[V14:.*]] = phi ptr [ %[[MEMPTR_VIRTUALFN]], {{.*}} ], [ %[[MEMPTR_NONVIRTUALFN]], {{.*}} ]
288 // CHECK: %[[V15:.*]] = phi i64 [ 0, {{.*}} ], [ [[TYPEDISC0]], {{.*}} ]
289 // CHECK: call void %[[V14]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V4]]) [ "ptrauth"(i32 0, i64 %[[V15]]) ]
290 // CHECK: ret void
292 void test1(Base0 *a0, MethodTy0 a1) {
293 (a0->*a1)();
296 // CHECK: define{{.*}} void @_Z15testConversion0M5Base0FvvEM8Derived0FvvE([2 x i64] %[[METHOD0_COERCE:.*]], [2 x i64] %[[METHOD1_COERCE:.*]])
297 // CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8
298 // CHECK: %[[METHOD1:.*]] = alloca { i64, i64 }, align 8
299 // CHECK: %[[METHOD0_ADDR:.*]] = alloca { i64, i64 }, align 8
300 // CHECK: %[[METHOD1_ADDR:.*]] = alloca { i64, i64 }, align 8
301 // CHECK: store [2 x i64] %[[METHOD0_COERCE]], ptr %[[METHOD0]], align 8
302 // CHECK: %[[METHOD01:.*]] = load { i64, i64 }, ptr %[[METHOD0]], align 8
303 // CHECK: store [2 x i64] %[[METHOD1_COERCE]], ptr %[[METHOD1]], align 8
304 // CHECK: %[[METHOD12:.*]] = load { i64, i64 }, ptr %[[METHOD1]], align 8
305 // CHECK: store { i64, i64 } %[[METHOD01]], ptr %[[METHOD0_ADDR]], align 8
306 // CHECK: store { i64, i64 } %[[METHOD12]], ptr %[[METHOD1_ADDR]], align 8
307 // CHECK: %[[V2:.*]] = load { i64, i64 }, ptr %[[METHOD0_ADDR]], align 8
308 // CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0
309 // CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1
310 // CHECK: %[[V3:.*]] = and i64 %[[MEMPTR_ADJ]], 1
311 // CHECK: %[[IS_VIRTUAL_OFFSET:.*]] = icmp ne i64 %[[V3]], 0
312 // CHECK: br i1 %[[IS_VIRTUAL_OFFSET]]
314 // CHECK: %[[V4:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to ptr
315 // CHECK: %[[V5:.*]] = icmp ne ptr %[[V4]], null
316 // CHECK: br i1 %[[V5]]
318 // CHECK: %[[V6:.*]] = ptrtoint ptr %[[V4]] to i64
319 // CHECK: %[[V7:.*]] = call i64 @llvm.ptrauth.resign(i64 %[[V6]], i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]])
320 // CHECK: %[[V8:.*]] = inttoptr i64 %[[V7]] to ptr
321 // CHECK: br
323 // CHECK: %[[V9:.*]] = phi ptr [ null, {{.*}} ], [ %[[V8]], {{.*}} ]
324 // CHECK: %[[V1:.*]] = ptrtoint ptr %[[V9]] to i64
325 // CHECK: %[[V11:.*]] = insertvalue { i64, i64 } %[[V2]], i64 %[[V1]], 0
326 // CHECK: br
328 // CHECK: %[[V12:.*]] = phi { i64, i64 } [ %[[V2]], {{.*}} ], [ %[[V11]], {{.*}} ]
329 // CHECK: store { i64, i64 } %[[V12]], ptr %[[METHOD1_ADDR]], align 8
330 // CHECK: ret void
332 void testConversion0(MethodTy0 method0, MethodTy1 method1) {
333 method1 = method0;
336 // CHECK: define{{.*}} void @_Z15testConversion1M5Base0FvvE(
337 // CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]])
339 void testConversion1(MethodTy0 method0) {
340 MethodTy1 method1 = reinterpret_cast<MethodTy1>(method0);
343 // CHECK: define{{.*}} void @_Z15testConversion2M8Derived0FvvE(
344 // CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]])
346 void testConversion2(MethodTy1 method1) {
347 MethodTy0 method0 = static_cast<MethodTy0>(method1);
350 // CHECK: define{{.*}} void @_Z15testConversion3M8Derived0FvvE(
351 // CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]])
353 void testConversion3(MethodTy1 method1) {
354 MethodTy0 method0 = reinterpret_cast<MethodTy0>(method1);
357 // No need to call @llvm.ptrauth.resign if the source member function
358 // pointer is a constant.
360 // CHECK: define{{.*}} void @_Z15testConversion4v(
361 // CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8
362 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8
363 // CHECK: ret void
365 void testConversion4() {
366 MethodTy0 method0 = reinterpret_cast<MethodTy0>(&Derived0::virtual1);
369 // This code used to crash.
370 namespace testNonVirtualThunk {
371 struct R {};
373 struct B0 {
374 virtual void bar();
377 struct B1 {
378 virtual R foo();
381 struct D : B0, B1 {
382 virtual R foo();
385 D d;
388 // CHECK: define internal void @_ZN22TestAnonymousNamespace12_GLOBAL__N_11S3fooEv_vfpthunk_(
390 namespace TestAnonymousNamespace {
391 namespace {
392 struct S {
393 virtual void foo(){};
395 } // namespace
397 void test() {
398 auto t = &S::foo;
400 } // namespace TestAnonymousNamespace
402 MethodTy1 gmethod0 = reinterpret_cast<MethodTy1>(&Base0::nonvirtual0);
403 MethodTy0 gmethod1 = reinterpret_cast<MethodTy0>(&Derived0::nonvirtual5);
404 MethodTy0 gmethod2 = reinterpret_cast<MethodTy0>(&Derived0::virtual1);
406 // CHECK-LABEL: define{{.*}} void @_Z13testArrayInitv()
407 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %p0, ptr align 8 @__const._Z13testArrayInitv.p0, i64 16, i1 false)
408 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %p1, ptr align 8 @__const._Z13testArrayInitv.p1, i64 16, i1 false)
409 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %c0, ptr align 8 @__const._Z13testArrayInitv.c0, i64 16, i1 false)
410 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %c1, ptr align 8 @__const._Z13testArrayInitv.c1, i64 16, i1 false)
411 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %{{.*}} align 8
412 // CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %{{.*}}, align 8
414 void initList(std::initializer_list<MethodTy1>);
416 void testArrayInit() {
417 MethodTy1 p0[] = {&Base0::nonvirtual0};
418 MethodTy1 p1[] = {&Base0::virtual1};
419 Class0 c0{&Base0::nonvirtual0};
420 Class0 c1{&Base0::virtual1};
421 initList({&Base0::nonvirtual0});
422 initList({&Base0::virtual1});
427 // STACK-PROT: define {{.*}}_vfpthunk{{.*}}[[ATTRS:#[0-9]+]]
428 // STACK-PROT: attributes [[ATTRS]] =
429 // STACK-PROT-NOT: ssp
430 // STACK-PROT-NOT: sspstrong
431 // STACK-PROT-NOT: sspreq
432 // STACK-PROT-NEXT: attributes
434 // CHECK: define{{.*}} void @_Z15testConvertNullv(
435 // CHECK: %[[T:.*]] = alloca { i64, i64 },
436 // store { i64, i64 } zeroinitializer, { i64, i64 }* %[[T]],
438 void testConvertNull() {
439 VariadicMethodTy0 t = (VariadicMethodTy0)(MethodTy0{});