[docs] Fix build-docs.sh
[llvm-project.git] / clang / lib / CodeGen / ItaniumCXXABI.cpp
blobcb97af7ab11ab16b5af104480893dba3122dbc0d
1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI. The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
18 //===----------------------------------------------------------------------===//
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
39 using namespace clang;
40 using namespace CodeGen;
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44 /// VTables - All the vtables which have been defined.
45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
47 /// All the thread wrapper functions that have been used.
48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49 ThreadWrappers;
51 protected:
52 bool UseARMMethodPtrABI;
53 bool UseARMGuardVarABI;
54 bool Use32BitVTableOffsetABI;
56 ItaniumMangleContext &getMangleContext() {
57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
60 public:
61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62 bool UseARMMethodPtrABI = false,
63 bool UseARMGuardVarABI = false) :
64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65 UseARMGuardVarABI(UseARMGuardVarABI),
66 Use32BitVTableOffsetABI(false) { }
68 bool classifyReturnType(CGFunctionInfo &FI) const override;
70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71 // If C++ prohibits us from making a copy, pass by address.
72 if (!RD->canPassInRegisters())
73 return RAA_Indirect;
74 return RAA_Default;
77 bool isThisCompleteObject(GlobalDecl GD) const override {
78 // The Itanium ABI has separate complete-object vs. base-object
79 // variants of both constructors and destructors.
80 if (isa<CXXDestructorDecl>(GD.getDecl())) {
81 switch (GD.getDtorType()) {
82 case Dtor_Complete:
83 case Dtor_Deleting:
84 return true;
86 case Dtor_Base:
87 return false;
89 case Dtor_Comdat:
90 llvm_unreachable("emitting dtor comdat as function?");
92 llvm_unreachable("bad dtor kind");
94 if (isa<CXXConstructorDecl>(GD.getDecl())) {
95 switch (GD.getCtorType()) {
96 case Ctor_Complete:
97 return true;
99 case Ctor_Base:
100 return false;
102 case Ctor_CopyingClosure:
103 case Ctor_DefaultClosure:
104 llvm_unreachable("closure ctors in Itanium ABI?");
106 case Ctor_Comdat:
107 llvm_unreachable("emitting ctor comdat as function?");
109 llvm_unreachable("bad dtor kind");
112 // No other kinds.
113 return false;
116 bool isZeroInitializable(const MemberPointerType *MPT) override;
118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
120 CGCallee
121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122 const Expr *E,
123 Address This,
124 llvm::Value *&ThisPtrForCall,
125 llvm::Value *MemFnPtr,
126 const MemberPointerType *MPT) override;
128 llvm::Value *
129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130 Address Base,
131 llvm::Value *MemPtr,
132 const MemberPointerType *MPT) override;
134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135 const CastExpr *E,
136 llvm::Value *Src) override;
137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138 llvm::Constant *Src) override;
140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144 CharUnits offset) override;
145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147 CharUnits ThisAdjustment);
149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150 llvm::Value *L, llvm::Value *R,
151 const MemberPointerType *MPT,
152 bool Inequality) override;
154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155 llvm::Value *Addr,
156 const MemberPointerType *MPT) override;
158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159 Address Ptr, QualType ElementType,
160 const CXXDestructorDecl *Dtor) override;
162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
167 llvm::CallInst *
168 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169 llvm::Value *Exn) override;
171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173 CatchTypeInfo
174 getAddrOfCXXCatchHandlerType(QualType Ty,
175 QualType CatchHandlerType) override {
176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182 Address ThisPtr,
183 llvm::Type *StdTypeInfoPtrTy) override;
185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186 QualType SrcRecordTy) override;
188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189 QualType SrcRecordTy, QualType DestTy,
190 QualType DestRecordTy,
191 llvm::BasicBlock *CastEnd) override;
193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194 QualType SrcRecordTy,
195 QualType DestTy) override;
197 bool EmitBadCastCall(CodeGenFunction &CGF) override;
199 llvm::Value *
200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201 const CXXRecordDecl *ClassDecl,
202 const CXXRecordDecl *BaseClassDecl) override;
204 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
206 AddedStructorArgCounts
207 buildStructorSignature(GlobalDecl GD,
208 SmallVectorImpl<CanQualType> &ArgTys) override;
210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211 CXXDtorType DT) const override {
212 // Itanium does not emit any destructor variant as an inline thunk.
213 // Delegating may occur as an optimization, but all variants are either
214 // emitted with external linkage or as linkonce if they are inline and used.
215 return false;
218 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221 FunctionArgList &Params) override;
223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226 const CXXConstructorDecl *D,
227 CXXCtorType Type,
228 bool ForVirtualBase,
229 bool Delegating) override;
231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232 const CXXDestructorDecl *DD,
233 CXXDtorType Type,
234 bool ForVirtualBase,
235 bool Delegating) override;
237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238 CXXDtorType Type, bool ForVirtualBase,
239 bool Delegating, Address This,
240 QualType ThisTy) override;
242 void emitVTableDefinitions(CodeGenVTables &CGVT,
243 const CXXRecordDecl *RD) override;
245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246 CodeGenFunction::VPtr Vptr) override;
248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 return true;
252 llvm::Constant *
253 getVTableAddressPoint(BaseSubobject Base,
254 const CXXRecordDecl *VTableClass) override;
256 llvm::Value *getVTableAddressPointInStructor(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
260 llvm::Value *getVTableAddressPointInStructorWithVTT(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
264 llvm::Constant *
265 getVTableAddressPointForConstExpr(BaseSubobject Base,
266 const CXXRecordDecl *VTableClass) override;
268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269 CharUnits VPtrOffset) override;
271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272 Address This, llvm::Type *Ty,
273 SourceLocation Loc) override;
275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276 const CXXDestructorDecl *Dtor,
277 CXXDtorType DtorType, Address This,
278 DeleteOrMemberCallExpr E) override;
280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286 bool ReturnAdjustment) override {
287 // Allow inlining of thunks by emitting them with available_externally
288 // linkage together with vtables when needed.
289 if (ForVTable && !Thunk->hasLocalLinkage())
290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291 CGM.setGVProperties(Thunk, GD);
294 bool exportThunk() override { return true; }
296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297 const ThisAdjustment &TA) override;
299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300 const ReturnAdjustment &RA) override;
302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303 FunctionArgList &Args) const override {
304 assert(!Args.empty() && "expected the arglist to not be empty!");
305 return Args.size() - 1;
308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309 StringRef GetDeletedVirtualCallName() override
310 { return "__cxa_deleted_virtual"; }
312 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313 Address InitializeArrayCookie(CodeGenFunction &CGF,
314 Address NewPtr,
315 llvm::Value *NumElements,
316 const CXXNewExpr *expr,
317 QualType ElementType) override;
318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319 Address allocPtr,
320 CharUnits cookieSize) override;
322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323 llvm::GlobalVariable *DeclPtr,
324 bool PerformInit) override;
325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326 llvm::FunctionCallee dtor,
327 llvm::Constant *addr) override;
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330 llvm::Value *Val);
331 void EmitThreadLocalInitFuncs(
332 CodeGenModule &CGM,
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
337 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
338 return !isEmittedWithConstantInitializer(VD) ||
339 mayNeedDestruction(VD);
341 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
342 QualType LValType) override;
344 bool NeedsVTTParameter(GlobalDecl GD) override;
346 /**************************** RTTI Uniqueness ******************************/
348 protected:
349 /// Returns true if the ABI requires RTTI type_info objects to be unique
350 /// across a program.
351 virtual bool shouldRTTIBeUnique() const { return true; }
353 public:
354 /// What sort of unique-RTTI behavior should we use?
355 enum RTTIUniquenessKind {
356 /// We are guaranteeing, or need to guarantee, that the RTTI string
357 /// is unique.
358 RUK_Unique,
360 /// We are not guaranteeing uniqueness for the RTTI string, so we
361 /// can demote to hidden visibility but must use string comparisons.
362 RUK_NonUniqueHidden,
364 /// We are not guaranteeing uniqueness for the RTTI string, so we
365 /// have to use string comparisons, but we also have to emit it with
366 /// non-hidden visibility.
367 RUK_NonUniqueVisible
370 /// Return the required visibility status for the given type and linkage in
371 /// the current ABI.
372 RTTIUniquenessKind
373 classifyRTTIUniqueness(QualType CanTy,
374 llvm::GlobalValue::LinkageTypes Linkage) const;
375 friend class ItaniumRTTIBuilder;
377 void emitCXXStructor(GlobalDecl GD) override;
379 std::pair<llvm::Value *, const CXXRecordDecl *>
380 LoadVTablePtr(CodeGenFunction &CGF, Address This,
381 const CXXRecordDecl *RD) override;
383 private:
384 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385 const auto &VtableLayout =
386 CGM.getItaniumVTableContext().getVTableLayout(RD);
388 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389 // Skip empty slot.
390 if (!VtableComponent.isUsedFunctionPointerKind())
391 continue;
393 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394 if (!Method->getCanonicalDecl()->isInlined())
395 continue;
397 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398 auto *Entry = CGM.GetGlobalValue(Name);
399 // This checks if virtual inline function has already been emitted.
400 // Note that it is possible that this inline function would be emitted
401 // after trying to emit vtable speculatively. Because of this we do
402 // an extra pass after emitting all deferred vtables to find and emit
403 // these vtables opportunistically.
404 if (!Entry || Entry->isDeclaration())
405 return true;
407 return false;
410 bool isVTableHidden(const CXXRecordDecl *RD) const {
411 const auto &VtableLayout =
412 CGM.getItaniumVTableContext().getVTableLayout(RD);
414 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415 if (VtableComponent.isRTTIKind()) {
416 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418 return true;
419 } else if (VtableComponent.isUsedFunctionPointerKind()) {
420 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421 if (Method->getVisibility() == Visibility::HiddenVisibility &&
422 !Method->isDefined())
423 return true;
426 return false;
430 class ARMCXXABI : public ItaniumCXXABI {
431 public:
432 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434 /*UseARMGuardVarABI=*/true) {}
436 bool HasThisReturn(GlobalDecl GD) const override {
437 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
438 isa<CXXDestructorDecl>(GD.getDecl()) &&
439 GD.getDtorType() != Dtor_Deleting));
442 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
443 QualType ResTy) override;
445 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
446 Address InitializeArrayCookie(CodeGenFunction &CGF,
447 Address NewPtr,
448 llvm::Value *NumElements,
449 const CXXNewExpr *expr,
450 QualType ElementType) override;
451 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
452 CharUnits cookieSize) override;
455 class AppleARM64CXXABI : public ARMCXXABI {
456 public:
457 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
458 Use32BitVTableOffsetABI = true;
461 // ARM64 libraries are prepared for non-unique RTTI.
462 bool shouldRTTIBeUnique() const override { return false; }
465 class FuchsiaCXXABI final : public ItaniumCXXABI {
466 public:
467 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
468 : ItaniumCXXABI(CGM) {}
470 private:
471 bool HasThisReturn(GlobalDecl GD) const override {
472 return isa<CXXConstructorDecl>(GD.getDecl()) ||
473 (isa<CXXDestructorDecl>(GD.getDecl()) &&
474 GD.getDtorType() != Dtor_Deleting);
478 class WebAssemblyCXXABI final : public ItaniumCXXABI {
479 public:
480 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
481 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
482 /*UseARMGuardVarABI=*/true) {}
483 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
484 llvm::CallInst *
485 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
486 llvm::Value *Exn) override;
488 private:
489 bool HasThisReturn(GlobalDecl GD) const override {
490 return isa<CXXConstructorDecl>(GD.getDecl()) ||
491 (isa<CXXDestructorDecl>(GD.getDecl()) &&
492 GD.getDtorType() != Dtor_Deleting);
494 bool canCallMismatchedFunctionType() const override { return false; }
497 class XLCXXABI final : public ItaniumCXXABI {
498 public:
499 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
500 : ItaniumCXXABI(CGM) {}
502 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
503 llvm::FunctionCallee dtor,
504 llvm::Constant *addr) override;
506 bool useSinitAndSterm() const override { return true; }
508 private:
509 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
510 llvm::Constant *addr);
514 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
515 switch (CGM.getContext().getCXXABIKind()) {
516 // For IR-generation purposes, there's no significant difference
517 // between the ARM and iOS ABIs.
518 case TargetCXXABI::GenericARM:
519 case TargetCXXABI::iOS:
520 case TargetCXXABI::WatchOS:
521 return new ARMCXXABI(CGM);
523 case TargetCXXABI::AppleARM64:
524 return new AppleARM64CXXABI(CGM);
526 case TargetCXXABI::Fuchsia:
527 return new FuchsiaCXXABI(CGM);
529 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
530 // include the other 32-bit ARM oddities: constructor/destructor return values
531 // and array cookies.
532 case TargetCXXABI::GenericAArch64:
533 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
534 /*UseARMGuardVarABI=*/true);
536 case TargetCXXABI::GenericMIPS:
537 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
539 case TargetCXXABI::WebAssembly:
540 return new WebAssemblyCXXABI(CGM);
542 case TargetCXXABI::XL:
543 return new XLCXXABI(CGM);
545 case TargetCXXABI::GenericItanium:
546 if (CGM.getContext().getTargetInfo().getTriple().getArch()
547 == llvm::Triple::le32) {
548 // For PNaCl, use ARM-style method pointers so that PNaCl code
549 // does not assume anything about the alignment of function
550 // pointers.
551 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
553 return new ItaniumCXXABI(CGM);
555 case TargetCXXABI::Microsoft:
556 llvm_unreachable("Microsoft ABI is not Itanium-based");
558 llvm_unreachable("bad ABI kind");
561 llvm::Type *
562 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
563 if (MPT->isMemberDataPointer())
564 return CGM.PtrDiffTy;
565 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
568 /// In the Itanium and ARM ABIs, method pointers have the form:
569 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
571 /// In the Itanium ABI:
572 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero
573 /// - the this-adjustment is (memptr.adj)
574 /// - the virtual offset is (memptr.ptr - 1)
576 /// In the ARM ABI:
577 /// - method pointers are virtual if (memptr.adj & 1) is nonzero
578 /// - the this-adjustment is (memptr.adj >> 1)
579 /// - the virtual offset is (memptr.ptr)
580 /// ARM uses 'adj' for the virtual flag because Thumb functions
581 /// may be only single-byte aligned.
583 /// If the member is virtual, the adjusted 'this' pointer points
584 /// to a vtable pointer from which the virtual offset is applied.
586 /// If the member is non-virtual, memptr.ptr is the address of
587 /// the function to call.
588 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
589 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
590 llvm::Value *&ThisPtrForCall,
591 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
592 CGBuilderTy &Builder = CGF.Builder;
594 const FunctionProtoType *FPT =
595 MPT->getPointeeType()->getAs<FunctionProtoType>();
596 auto *RD =
597 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
599 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
600 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
602 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
604 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
605 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
606 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
608 // Extract memptr.adj, which is in the second field.
609 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
611 // Compute the true adjustment.
612 llvm::Value *Adj = RawAdj;
613 if (UseARMMethodPtrABI)
614 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
616 // Apply the adjustment and cast back to the original struct type
617 // for consistency.
618 llvm::Value *This = ThisAddr.getPointer();
619 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
620 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
621 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
622 ThisPtrForCall = This;
624 // Load the function pointer.
625 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
627 // If the LSB in the function pointer is 1, the function pointer points to
628 // a virtual function.
629 llvm::Value *IsVirtual;
630 if (UseARMMethodPtrABI)
631 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
632 else
633 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
634 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
635 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
637 // In the virtual path, the adjustment left 'This' pointing to the
638 // vtable of the correct base subobject. The "function pointer" is an
639 // offset within the vtable (+1 for the virtual flag on non-ARM).
640 CGF.EmitBlock(FnVirtual);
642 // Cast the adjusted this to a pointer to vtable pointer and load.
643 llvm::Type *VTableTy = Builder.getInt8PtrTy();
644 CharUnits VTablePtrAlign =
645 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
646 CGF.getPointerAlign());
647 llvm::Value *VTable = CGF.GetVTablePtr(
648 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
650 // Apply the offset.
651 // On ARM64, to reserve extra space in virtual member function pointers,
652 // we only pay attention to the low 32 bits of the offset.
653 llvm::Value *VTableOffset = FnAsInt;
654 if (!UseARMMethodPtrABI)
655 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
656 if (Use32BitVTableOffsetABI) {
657 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
658 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
661 // Check the address of the function pointer if CFI on member function
662 // pointers is enabled.
663 llvm::Constant *CheckSourceLocation;
664 llvm::Constant *CheckTypeDesc;
665 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
666 CGM.HasHiddenLTOVisibility(RD);
667 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
668 CGM.HasHiddenLTOVisibility(RD);
669 bool ShouldEmitWPDInfo =
670 CGM.getCodeGenOpts().WholeProgramVTables &&
671 // Don't insert type tests if we are forcing public visibility.
672 !CGM.AlwaysHasLTOVisibilityPublic(RD);
673 llvm::Value *VirtualFn = nullptr;
676 CodeGenFunction::SanitizerScope SanScope(&CGF);
677 llvm::Value *TypeId = nullptr;
678 llvm::Value *CheckResult = nullptr;
680 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
681 // If doing CFI, VFE or WPD, we will need the metadata node to check
682 // against.
683 llvm::Metadata *MD =
684 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
685 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
688 if (ShouldEmitVFEInfo) {
689 llvm::Value *VFPAddr =
690 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
692 // If doing VFE, load from the vtable with a type.checked.load intrinsic
693 // call. Note that we use the GEP to calculate the address to load from
694 // and pass 0 as the offset to the intrinsic. This is because every
695 // vtable slot of the correct type is marked with matching metadata, and
696 // we know that the load must be from one of these slots.
697 llvm::Value *CheckedLoad = Builder.CreateCall(
698 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
699 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
700 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
701 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
702 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
703 "memptr.virtualfn");
704 } else {
705 // When not doing VFE, emit a normal load, as it allows more
706 // optimisations than type.checked.load.
707 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
708 llvm::Value *VFPAddr =
709 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
710 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
711 ? llvm::Intrinsic::type_test
712 : llvm::Intrinsic::public_type_test;
714 CheckResult = Builder.CreateCall(
715 CGM.getIntrinsic(IID),
716 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
719 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
720 VirtualFn = CGF.Builder.CreateCall(
721 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
722 {VTableOffset->getType()}),
723 {VTable, VTableOffset});
724 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
725 } else {
726 llvm::Value *VFPAddr =
727 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
728 VFPAddr = CGF.Builder.CreateBitCast(
729 VFPAddr, FTy->getPointerTo()->getPointerTo());
730 VirtualFn = CGF.Builder.CreateAlignedLoad(
731 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
732 "memptr.virtualfn");
735 assert(VirtualFn && "Virtual fuction pointer not created!");
736 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
737 CheckResult) &&
738 "Check result required but not created!");
740 if (ShouldEmitCFICheck) {
741 // If doing CFI, emit the check.
742 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
743 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
744 llvm::Constant *StaticData[] = {
745 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
746 CheckSourceLocation,
747 CheckTypeDesc,
750 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
751 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
752 } else {
753 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
754 CGM.getLLVMContext(),
755 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
756 llvm::Value *ValidVtable = Builder.CreateCall(
757 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
758 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
759 SanitizerHandler::CFICheckFail, StaticData,
760 {VTable, ValidVtable});
763 FnVirtual = Builder.GetInsertBlock();
765 } // End of sanitizer scope
767 CGF.EmitBranch(FnEnd);
769 // In the non-virtual path, the function pointer is actually a
770 // function pointer.
771 CGF.EmitBlock(FnNonVirtual);
772 llvm::Value *NonVirtualFn =
773 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
775 // Check the function pointer if CFI on member function pointers is enabled.
776 if (ShouldEmitCFICheck) {
777 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
778 if (RD->hasDefinition()) {
779 CodeGenFunction::SanitizerScope SanScope(&CGF);
781 llvm::Constant *StaticData[] = {
782 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
783 CheckSourceLocation,
784 CheckTypeDesc,
787 llvm::Value *Bit = Builder.getFalse();
788 llvm::Value *CastedNonVirtualFn =
789 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
790 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
791 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
792 getContext().getMemberPointerType(
793 MPT->getPointeeType(),
794 getContext().getRecordType(Base).getTypePtr()));
795 llvm::Value *TypeId =
796 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
798 llvm::Value *TypeTest =
799 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
800 {CastedNonVirtualFn, TypeId});
801 Bit = Builder.CreateOr(Bit, TypeTest);
804 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
805 SanitizerHandler::CFICheckFail, StaticData,
806 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
808 FnNonVirtual = Builder.GetInsertBlock();
812 // We're done.
813 CGF.EmitBlock(FnEnd);
814 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
815 CalleePtr->addIncoming(VirtualFn, FnVirtual);
816 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
818 CGCallee Callee(FPT, CalleePtr);
819 return Callee;
822 /// Compute an l-value by applying the given pointer-to-member to a
823 /// base object.
824 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
825 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
826 const MemberPointerType *MPT) {
827 assert(MemPtr->getType() == CGM.PtrDiffTy);
829 CGBuilderTy &Builder = CGF.Builder;
831 // Cast to char*.
832 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
834 // Apply the offset, which we assume is non-null.
835 llvm::Value *Addr = Builder.CreateInBoundsGEP(
836 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
838 // Cast the address to the appropriate pointer type, adopting the
839 // address space of the base pointer.
840 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
841 ->getPointerTo(Base.getAddressSpace());
842 return Builder.CreateBitCast(Addr, PType);
845 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
846 /// conversion.
848 /// Bitcast conversions are always a no-op under Itanium.
850 /// Obligatory offset/adjustment diagram:
851 /// <-- offset --> <-- adjustment -->
852 /// |--------------------------|----------------------|--------------------|
853 /// ^Derived address point ^Base address point ^Member address point
855 /// So when converting a base member pointer to a derived member pointer,
856 /// we add the offset to the adjustment because the address point has
857 /// decreased; and conversely, when converting a derived MP to a base MP
858 /// we subtract the offset from the adjustment because the address point
859 /// has increased.
861 /// The standard forbids (at compile time) conversion to and from
862 /// virtual bases, which is why we don't have to consider them here.
864 /// The standard forbids (at run time) casting a derived MP to a base
865 /// MP when the derived MP does not point to a member of the base.
866 /// This is why -1 is a reasonable choice for null data member
867 /// pointers.
868 llvm::Value *
869 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
870 const CastExpr *E,
871 llvm::Value *src) {
872 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
873 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
874 E->getCastKind() == CK_ReinterpretMemberPointer);
876 // Under Itanium, reinterprets don't require any additional processing.
877 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
879 // Use constant emission if we can.
880 if (isa<llvm::Constant>(src))
881 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
883 llvm::Constant *adj = getMemberPointerAdjustment(E);
884 if (!adj) return src;
886 CGBuilderTy &Builder = CGF.Builder;
887 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
889 const MemberPointerType *destTy =
890 E->getType()->castAs<MemberPointerType>();
892 // For member data pointers, this is just a matter of adding the
893 // offset if the source is non-null.
894 if (destTy->isMemberDataPointer()) {
895 llvm::Value *dst;
896 if (isDerivedToBase)
897 dst = Builder.CreateNSWSub(src, adj, "adj");
898 else
899 dst = Builder.CreateNSWAdd(src, adj, "adj");
901 // Null check.
902 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
903 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
904 return Builder.CreateSelect(isNull, src, dst);
907 // The this-adjustment is left-shifted by 1 on ARM.
908 if (UseARMMethodPtrABI) {
909 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
910 offset <<= 1;
911 adj = llvm::ConstantInt::get(adj->getType(), offset);
914 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
915 llvm::Value *dstAdj;
916 if (isDerivedToBase)
917 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
918 else
919 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
921 return Builder.CreateInsertValue(src, dstAdj, 1);
924 llvm::Constant *
925 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
926 llvm::Constant *src) {
927 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
928 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
929 E->getCastKind() == CK_ReinterpretMemberPointer);
931 // Under Itanium, reinterprets don't require any additional processing.
932 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
934 // If the adjustment is trivial, we don't need to do anything.
935 llvm::Constant *adj = getMemberPointerAdjustment(E);
936 if (!adj) return src;
938 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
940 const MemberPointerType *destTy =
941 E->getType()->castAs<MemberPointerType>();
943 // For member data pointers, this is just a matter of adding the
944 // offset if the source is non-null.
945 if (destTy->isMemberDataPointer()) {
946 // null maps to null.
947 if (src->isAllOnesValue()) return src;
949 if (isDerivedToBase)
950 return llvm::ConstantExpr::getNSWSub(src, adj);
951 else
952 return llvm::ConstantExpr::getNSWAdd(src, adj);
955 // The this-adjustment is left-shifted by 1 on ARM.
956 if (UseARMMethodPtrABI) {
957 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
958 offset <<= 1;
959 adj = llvm::ConstantInt::get(adj->getType(), offset);
962 llvm::Constant *srcAdj = src->getAggregateElement(1);
963 llvm::Constant *dstAdj;
964 if (isDerivedToBase)
965 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
966 else
967 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
969 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
970 assert(res != nullptr && "Folding must succeed");
971 return res;
974 llvm::Constant *
975 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
976 // Itanium C++ ABI 2.3:
977 // A NULL pointer is represented as -1.
978 if (MPT->isMemberDataPointer())
979 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
981 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
982 llvm::Constant *Values[2] = { Zero, Zero };
983 return llvm::ConstantStruct::getAnon(Values);
986 llvm::Constant *
987 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
988 CharUnits offset) {
989 // Itanium C++ ABI 2.3:
990 // A pointer to data member is an offset from the base address of
991 // the class object containing it, represented as a ptrdiff_t
992 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
995 llvm::Constant *
996 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
997 return BuildMemberPointer(MD, CharUnits::Zero());
1000 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1001 CharUnits ThisAdjustment) {
1002 assert(MD->isInstance() && "Member function must not be static!");
1004 CodeGenTypes &Types = CGM.getTypes();
1006 // Get the function pointer (or index if this is a virtual function).
1007 llvm::Constant *MemPtr[2];
1008 if (MD->isVirtual()) {
1009 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1010 uint64_t VTableOffset;
1011 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1012 // Multiply by 4-byte relative offsets.
1013 VTableOffset = Index * 4;
1014 } else {
1015 const ASTContext &Context = getContext();
1016 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1017 Context.getTargetInfo().getPointerWidth(0));
1018 VTableOffset = Index * PointerWidth.getQuantity();
1021 if (UseARMMethodPtrABI) {
1022 // ARM C++ ABI 3.2.1:
1023 // This ABI specifies that adj contains twice the this
1024 // adjustment, plus 1 if the member function is virtual. The
1025 // least significant bit of adj then makes exactly the same
1026 // discrimination as the least significant bit of ptr does for
1027 // Itanium.
1028 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1029 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1030 2 * ThisAdjustment.getQuantity() + 1);
1031 } else {
1032 // Itanium C++ ABI 2.3:
1033 // For a virtual function, [the pointer field] is 1 plus the
1034 // virtual table offset (in bytes) of the function,
1035 // represented as a ptrdiff_t.
1036 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1037 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1038 ThisAdjustment.getQuantity());
1040 } else {
1041 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1042 llvm::Type *Ty;
1043 // Check whether the function has a computable LLVM signature.
1044 if (Types.isFuncTypeConvertible(FPT)) {
1045 // The function has a computable LLVM signature; use the correct type.
1046 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1047 } else {
1048 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1049 // function type is incomplete.
1050 Ty = CGM.PtrDiffTy;
1052 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1054 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1055 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1056 (UseARMMethodPtrABI ? 2 : 1) *
1057 ThisAdjustment.getQuantity());
1060 return llvm::ConstantStruct::getAnon(MemPtr);
1063 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1064 QualType MPType) {
1065 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1066 const ValueDecl *MPD = MP.getMemberPointerDecl();
1067 if (!MPD)
1068 return EmitNullMemberPointer(MPT);
1070 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1072 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1073 return BuildMemberPointer(MD, ThisAdjustment);
1075 CharUnits FieldOffset =
1076 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1077 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1080 /// The comparison algorithm is pretty easy: the member pointers are
1081 /// the same if they're either bitwise identical *or* both null.
1083 /// ARM is different here only because null-ness is more complicated.
1084 llvm::Value *
1085 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1086 llvm::Value *L,
1087 llvm::Value *R,
1088 const MemberPointerType *MPT,
1089 bool Inequality) {
1090 CGBuilderTy &Builder = CGF.Builder;
1092 llvm::ICmpInst::Predicate Eq;
1093 llvm::Instruction::BinaryOps And, Or;
1094 if (Inequality) {
1095 Eq = llvm::ICmpInst::ICMP_NE;
1096 And = llvm::Instruction::Or;
1097 Or = llvm::Instruction::And;
1098 } else {
1099 Eq = llvm::ICmpInst::ICMP_EQ;
1100 And = llvm::Instruction::And;
1101 Or = llvm::Instruction::Or;
1104 // Member data pointers are easy because there's a unique null
1105 // value, so it just comes down to bitwise equality.
1106 if (MPT->isMemberDataPointer())
1107 return Builder.CreateICmp(Eq, L, R);
1109 // For member function pointers, the tautologies are more complex.
1110 // The Itanium tautology is:
1111 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1112 // The ARM tautology is:
1113 // (L == R) <==> (L.ptr == R.ptr &&
1114 // (L.adj == R.adj ||
1115 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1116 // The inequality tautologies have exactly the same structure, except
1117 // applying De Morgan's laws.
1119 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1120 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1122 // This condition tests whether L.ptr == R.ptr. This must always be
1123 // true for equality to hold.
1124 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1126 // This condition, together with the assumption that L.ptr == R.ptr,
1127 // tests whether the pointers are both null. ARM imposes an extra
1128 // condition.
1129 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1130 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1132 // This condition tests whether L.adj == R.adj. If this isn't
1133 // true, the pointers are unequal unless they're both null.
1134 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1135 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1136 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1138 // Null member function pointers on ARM clear the low bit of Adj,
1139 // so the zero condition has to check that neither low bit is set.
1140 if (UseARMMethodPtrABI) {
1141 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1143 // Compute (l.adj | r.adj) & 1 and test it against zero.
1144 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1145 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1146 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1147 "cmp.or.adj");
1148 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1151 // Tie together all our conditions.
1152 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1153 Result = Builder.CreateBinOp(And, PtrEq, Result,
1154 Inequality ? "memptr.ne" : "memptr.eq");
1155 return Result;
1158 llvm::Value *
1159 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1160 llvm::Value *MemPtr,
1161 const MemberPointerType *MPT) {
1162 CGBuilderTy &Builder = CGF.Builder;
1164 /// For member data pointers, this is just a check against -1.
1165 if (MPT->isMemberDataPointer()) {
1166 assert(MemPtr->getType() == CGM.PtrDiffTy);
1167 llvm::Value *NegativeOne =
1168 llvm::Constant::getAllOnesValue(MemPtr->getType());
1169 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1172 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1173 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1175 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1176 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1178 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1179 // (the virtual bit) is set.
1180 if (UseARMMethodPtrABI) {
1181 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1182 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1183 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1184 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1185 "memptr.isvirtual");
1186 Result = Builder.CreateOr(Result, IsVirtual);
1189 return Result;
1192 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1193 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1194 if (!RD)
1195 return false;
1197 // If C++ prohibits us from making a copy, return by address.
1198 if (!RD->canPassInRegisters()) {
1199 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1200 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1201 return true;
1203 return false;
1206 /// The Itanium ABI requires non-zero initialization only for data
1207 /// member pointers, for which '0' is a valid offset.
1208 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1209 return MPT->isMemberFunctionPointer();
1212 /// The Itanium ABI always places an offset to the complete object
1213 /// at entry -2 in the vtable.
1214 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1215 const CXXDeleteExpr *DE,
1216 Address Ptr,
1217 QualType ElementType,
1218 const CXXDestructorDecl *Dtor) {
1219 bool UseGlobalDelete = DE->isGlobalDelete();
1220 if (UseGlobalDelete) {
1221 // Derive the complete-object pointer, which is what we need
1222 // to pass to the deallocation function.
1224 // Grab the vtable pointer as an intptr_t*.
1225 auto *ClassDecl =
1226 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1227 llvm::Value *VTable =
1228 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1230 // Track back to entry -2 and pull out the offset there.
1231 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1232 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1233 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
1235 // Apply the offset.
1236 llvm::Value *CompletePtr =
1237 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1238 CompletePtr =
1239 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1241 // If we're supposed to call the global delete, make sure we do so
1242 // even if the destructor throws.
1243 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1244 ElementType);
1247 // FIXME: Provide a source location here even though there's no
1248 // CXXMemberCallExpr for dtor call.
1249 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1250 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1252 if (UseGlobalDelete)
1253 CGF.PopCleanupBlock();
1256 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1257 // void __cxa_rethrow();
1259 llvm::FunctionType *FTy =
1260 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1262 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1264 if (isNoReturn)
1265 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1266 else
1267 CGF.EmitRuntimeCallOrInvoke(Fn);
1270 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1271 // void *__cxa_allocate_exception(size_t thrown_size);
1273 llvm::FunctionType *FTy =
1274 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1276 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1279 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1280 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1281 // void (*dest) (void *));
1283 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1284 llvm::FunctionType *FTy =
1285 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1287 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1290 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1291 QualType ThrowType = E->getSubExpr()->getType();
1292 // Now allocate the exception object.
1293 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1294 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1296 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1297 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1298 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1300 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1301 CGF.EmitAnyExprToExn(
1302 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1304 // Now throw the exception.
1305 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1306 /*ForEH=*/true);
1308 // The address of the destructor. If the exception type has a
1309 // trivial destructor (or isn't a record), we just pass null.
1310 llvm::Constant *Dtor = nullptr;
1311 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1312 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1313 if (!Record->hasTrivialDestructor()) {
1314 CXXDestructorDecl *DtorD = Record->getDestructor();
1315 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1316 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1319 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1321 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1322 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1325 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1326 // void *__dynamic_cast(const void *sub,
1327 // const abi::__class_type_info *src,
1328 // const abi::__class_type_info *dst,
1329 // std::ptrdiff_t src2dst_offset);
1331 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1332 llvm::Type *PtrDiffTy =
1333 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1335 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1337 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1339 // Mark the function as nounwind readonly.
1340 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1341 llvm::Attribute::ReadOnly };
1342 llvm::AttributeList Attrs = llvm::AttributeList::get(
1343 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1345 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1348 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1349 // void __cxa_bad_cast();
1350 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1351 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1354 /// Compute the src2dst_offset hint as described in the
1355 /// Itanium C++ ABI [2.9.7]
1356 static CharUnits computeOffsetHint(ASTContext &Context,
1357 const CXXRecordDecl *Src,
1358 const CXXRecordDecl *Dst) {
1359 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1360 /*DetectVirtual=*/false);
1362 // If Dst is not derived from Src we can skip the whole computation below and
1363 // return that Src is not a public base of Dst. Record all inheritance paths.
1364 if (!Dst->isDerivedFrom(Src, Paths))
1365 return CharUnits::fromQuantity(-2ULL);
1367 unsigned NumPublicPaths = 0;
1368 CharUnits Offset;
1370 // Now walk all possible inheritance paths.
1371 for (const CXXBasePath &Path : Paths) {
1372 if (Path.Access != AS_public) // Ignore non-public inheritance.
1373 continue;
1375 ++NumPublicPaths;
1377 for (const CXXBasePathElement &PathElement : Path) {
1378 // If the path contains a virtual base class we can't give any hint.
1379 // -1: no hint.
1380 if (PathElement.Base->isVirtual())
1381 return CharUnits::fromQuantity(-1ULL);
1383 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1384 continue;
1386 // Accumulate the base class offsets.
1387 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1388 Offset += L.getBaseClassOffset(
1389 PathElement.Base->getType()->getAsCXXRecordDecl());
1393 // -2: Src is not a public base of Dst.
1394 if (NumPublicPaths == 0)
1395 return CharUnits::fromQuantity(-2ULL);
1397 // -3: Src is a multiple public base type but never a virtual base type.
1398 if (NumPublicPaths > 1)
1399 return CharUnits::fromQuantity(-3ULL);
1401 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1402 // Return the offset of Src from the origin of Dst.
1403 return Offset;
1406 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1407 // void __cxa_bad_typeid();
1408 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1410 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1413 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1414 QualType SrcRecordTy) {
1415 return IsDeref;
1418 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1419 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1420 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1421 Call->setDoesNotReturn();
1422 CGF.Builder.CreateUnreachable();
1425 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1426 QualType SrcRecordTy,
1427 Address ThisPtr,
1428 llvm::Type *StdTypeInfoPtrTy) {
1429 auto *ClassDecl =
1430 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1431 llvm::Value *Value =
1432 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1434 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1435 // Load the type info.
1436 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1437 Value = CGF.Builder.CreateCall(
1438 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1439 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1441 // Setup to dereference again since this is a proxy we accessed.
1442 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1443 } else {
1444 // Load the type info.
1445 Value =
1446 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1448 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1449 CGF.getPointerAlign());
1452 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1453 QualType SrcRecordTy) {
1454 return SrcIsPtr;
1457 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1458 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1459 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1460 llvm::Type *PtrDiffLTy =
1461 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1462 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1464 llvm::Value *SrcRTTI =
1465 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1466 llvm::Value *DestRTTI =
1467 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1469 // Compute the offset hint.
1470 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1471 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1472 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1473 PtrDiffLTy,
1474 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1476 // Emit the call to __dynamic_cast.
1477 llvm::Value *Value = ThisAddr.getPointer();
1478 Value = CGF.EmitCastToVoidPtr(Value);
1480 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1481 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1482 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1484 /// C++ [expr.dynamic.cast]p9:
1485 /// A failed cast to reference type throws std::bad_cast
1486 if (DestTy->isReferenceType()) {
1487 llvm::BasicBlock *BadCastBlock =
1488 CGF.createBasicBlock("dynamic_cast.bad_cast");
1490 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1491 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1493 CGF.EmitBlock(BadCastBlock);
1494 EmitBadCastCall(CGF);
1497 return Value;
1500 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1501 Address ThisAddr,
1502 QualType SrcRecordTy,
1503 QualType DestTy) {
1504 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1505 auto *ClassDecl =
1506 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1507 llvm::Value *OffsetToTop;
1508 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1509 // Get the vtable pointer.
1510 llvm::Value *VTable =
1511 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1513 // Get the offset-to-top from the vtable.
1514 OffsetToTop =
1515 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1516 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1517 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1518 } else {
1519 llvm::Type *PtrDiffLTy =
1520 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1522 // Get the vtable pointer.
1523 llvm::Value *VTable =
1524 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1526 // Get the offset-to-top from the vtable.
1527 OffsetToTop =
1528 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1529 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1530 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1532 // Finally, add the offset to the pointer.
1533 llvm::Value *Value = ThisAddr.getPointer();
1534 Value = CGF.EmitCastToVoidPtr(Value);
1535 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1536 return CGF.Builder.CreateBitCast(Value, DestLTy);
1539 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1540 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1541 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1542 Call->setDoesNotReturn();
1543 CGF.Builder.CreateUnreachable();
1544 return true;
1547 llvm::Value *
1548 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1549 Address This,
1550 const CXXRecordDecl *ClassDecl,
1551 const CXXRecordDecl *BaseClassDecl) {
1552 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1553 CharUnits VBaseOffsetOffset =
1554 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1555 BaseClassDecl);
1556 llvm::Value *VBaseOffsetPtr =
1557 CGF.Builder.CreateConstGEP1_64(
1558 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1559 "vbase.offset.ptr");
1561 llvm::Value *VBaseOffset;
1562 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1563 VBaseOffsetPtr =
1564 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1565 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1566 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1567 "vbase.offset");
1568 } else {
1569 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1570 CGM.PtrDiffTy->getPointerTo());
1571 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1572 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1574 return VBaseOffset;
1577 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1578 // Just make sure we're in sync with TargetCXXABI.
1579 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1581 // The constructor used for constructing this as a base class;
1582 // ignores virtual bases.
1583 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1585 // The constructor used for constructing this as a complete class;
1586 // constructs the virtual bases, then calls the base constructor.
1587 if (!D->getParent()->isAbstract()) {
1588 // We don't need to emit the complete ctor if the class is abstract.
1589 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1593 CGCXXABI::AddedStructorArgCounts
1594 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1595 SmallVectorImpl<CanQualType> &ArgTys) {
1596 ASTContext &Context = getContext();
1598 // All parameters are already in place except VTT, which goes after 'this'.
1599 // These are Clang types, so we don't need to worry about sret yet.
1601 // Check if we need to add a VTT parameter (which has type void **).
1602 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1603 : GD.getDtorType() == Dtor_Base) &&
1604 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1605 ArgTys.insert(ArgTys.begin() + 1,
1606 Context.getPointerType(Context.VoidPtrTy));
1607 return AddedStructorArgCounts::prefix(1);
1609 return AddedStructorArgCounts{};
1612 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1613 // The destructor used for destructing this as a base class; ignores
1614 // virtual bases.
1615 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1617 // The destructor used for destructing this as a most-derived class;
1618 // call the base destructor and then destructs any virtual bases.
1619 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1621 // The destructor in a virtual table is always a 'deleting'
1622 // destructor, which calls the complete destructor and then uses the
1623 // appropriate operator delete.
1624 if (D->isVirtual())
1625 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1628 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1629 QualType &ResTy,
1630 FunctionArgList &Params) {
1631 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1632 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1634 // Check if we need a VTT parameter as well.
1635 if (NeedsVTTParameter(CGF.CurGD)) {
1636 ASTContext &Context = getContext();
1638 // FIXME: avoid the fake decl
1639 QualType T = Context.getPointerType(Context.VoidPtrTy);
1640 auto *VTTDecl = ImplicitParamDecl::Create(
1641 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1642 T, ImplicitParamDecl::CXXVTT);
1643 Params.insert(Params.begin() + 1, VTTDecl);
1644 getStructorImplicitParamDecl(CGF) = VTTDecl;
1648 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1649 // Naked functions have no prolog.
1650 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1651 return;
1653 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1654 /// adjustments are required, because they are all handled by thunks.
1655 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1657 /// Initialize the 'vtt' slot if needed.
1658 if (getStructorImplicitParamDecl(CGF)) {
1659 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1660 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1663 /// If this is a function that the ABI specifies returns 'this', initialize
1664 /// the return slot to 'this' at the start of the function.
1666 /// Unlike the setting of return types, this is done within the ABI
1667 /// implementation instead of by clients of CGCXXABI because:
1668 /// 1) getThisValue is currently protected
1669 /// 2) in theory, an ABI could implement 'this' returns some other way;
1670 /// HasThisReturn only specifies a contract, not the implementation
1671 if (HasThisReturn(CGF.CurGD))
1672 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1675 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1676 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1677 bool ForVirtualBase, bool Delegating) {
1678 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1679 return AddedStructorArgs{};
1681 // Insert the implicit 'vtt' argument as the second argument.
1682 llvm::Value *VTT =
1683 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1684 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1685 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1688 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1689 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1690 bool ForVirtualBase, bool Delegating) {
1691 GlobalDecl GD(DD, Type);
1692 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1695 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1696 const CXXDestructorDecl *DD,
1697 CXXDtorType Type, bool ForVirtualBase,
1698 bool Delegating, Address This,
1699 QualType ThisTy) {
1700 GlobalDecl GD(DD, Type);
1701 llvm::Value *VTT =
1702 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1703 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1705 CGCallee Callee;
1706 if (getContext().getLangOpts().AppleKext &&
1707 Type != Dtor_Base && DD->isVirtual())
1708 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1709 else
1710 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1712 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1713 nullptr);
1716 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1717 const CXXRecordDecl *RD) {
1718 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1719 if (VTable->hasInitializer())
1720 return;
1722 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1723 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1724 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1725 llvm::Constant *RTTI =
1726 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1728 // Create and set the initializer.
1729 ConstantInitBuilder builder(CGM);
1730 auto components = builder.beginStruct();
1731 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1732 llvm::GlobalValue::isLocalLinkage(Linkage));
1733 components.finishAndSetAsInitializer(VTable);
1735 // Set the correct linkage.
1736 VTable->setLinkage(Linkage);
1738 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1739 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1741 // Set the right visibility.
1742 CGM.setGVProperties(VTable, RD);
1744 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1745 // we will emit the typeinfo for the fundamental types. This is the
1746 // same behaviour as GCC.
1747 const DeclContext *DC = RD->getDeclContext();
1748 if (RD->getIdentifier() &&
1749 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1750 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1751 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1752 DC->getParent()->isTranslationUnit())
1753 EmitFundamentalRTTIDescriptors(RD);
1755 // Always emit type metadata on non-available_externally definitions, and on
1756 // available_externally definitions if we are performing whole program
1757 // devirtualization. For WPD we need the type metadata on all vtable
1758 // definitions to ensure we associate derived classes with base classes
1759 // defined in headers but with a strong definition only in a shared library.
1760 if (!VTable->isDeclarationForLinker() ||
1761 CGM.getCodeGenOpts().WholeProgramVTables) {
1762 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1763 // For available_externally definitions, add the vtable to
1764 // @llvm.compiler.used so that it isn't deleted before whole program
1765 // analysis.
1766 if (VTable->isDeclarationForLinker()) {
1767 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1768 CGM.addCompilerUsedGlobal(VTable);
1772 if (VTContext.isRelativeLayout()) {
1773 CGVT.RemoveHwasanMetadata(VTable);
1774 if (!VTable->isDSOLocal())
1775 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1779 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1780 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1781 if (Vptr.NearestVBase == nullptr)
1782 return false;
1783 return NeedsVTTParameter(CGF.CurGD);
1786 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1787 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1788 const CXXRecordDecl *NearestVBase) {
1790 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1791 NeedsVTTParameter(CGF.CurGD)) {
1792 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1793 NearestVBase);
1795 return getVTableAddressPoint(Base, VTableClass);
1798 llvm::Constant *
1799 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1800 const CXXRecordDecl *VTableClass) {
1801 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1803 // Find the appropriate vtable within the vtable group, and the address point
1804 // within that vtable.
1805 VTableLayout::AddressPointLocation AddressPoint =
1806 CGM.getItaniumVTableContext()
1807 .getVTableLayout(VTableClass)
1808 .getAddressPoint(Base);
1809 llvm::Value *Indices[] = {
1810 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1811 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1812 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1815 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1816 Indices, /*InBounds=*/true,
1817 /*InRangeIndex=*/1);
1820 // Check whether all the non-inline virtual methods for the class have the
1821 // specified attribute.
1822 template <typename T>
1823 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1824 bool FoundNonInlineVirtualMethodWithAttr = false;
1825 for (const auto *D : RD->noload_decls()) {
1826 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1827 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1828 FD->doesThisDeclarationHaveABody())
1829 continue;
1830 if (!D->hasAttr<T>())
1831 return false;
1832 FoundNonInlineVirtualMethodWithAttr = true;
1836 // We didn't find any non-inline virtual methods missing the attribute. We
1837 // will return true when we found at least one non-inline virtual with the
1838 // attribute. (This lets our caller know that the attribute needs to be
1839 // propagated up to the vtable.)
1840 return FoundNonInlineVirtualMethodWithAttr;
1843 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1844 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1845 const CXXRecordDecl *NearestVBase) {
1846 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1847 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1849 // Get the secondary vpointer index.
1850 uint64_t VirtualPointerIndex =
1851 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1853 /// Load the VTT.
1854 llvm::Value *VTT = CGF.LoadCXXVTT();
1855 if (VirtualPointerIndex)
1856 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1857 CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1859 // And load the address point from the VTT.
1860 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1861 CGF.getPointerAlign());
1864 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1865 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1866 return getVTableAddressPoint(Base, VTableClass);
1869 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1870 CharUnits VPtrOffset) {
1871 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1873 llvm::GlobalVariable *&VTable = VTables[RD];
1874 if (VTable)
1875 return VTable;
1877 // Queue up this vtable for possible deferred emission.
1878 CGM.addDeferredVTable(RD);
1880 SmallString<256> Name;
1881 llvm::raw_svector_ostream Out(Name);
1882 getMangleContext().mangleCXXVTable(RD, Out);
1884 const VTableLayout &VTLayout =
1885 CGM.getItaniumVTableContext().getVTableLayout(RD);
1886 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1888 // Use pointer alignment for the vtable. Otherwise we would align them based
1889 // on the size of the initializer which doesn't make sense as only single
1890 // values are read.
1891 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1892 ? 32
1893 : CGM.getTarget().getPointerAlign(0);
1895 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1896 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1897 getContext().toCharUnitsFromBits(PAlign).getQuantity());
1898 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1900 // In MS C++ if you have a class with virtual functions in which you are using
1901 // selective member import/export, then all virtual functions must be exported
1902 // unless they are inline, otherwise a link error will result. To match this
1903 // behavior, for such classes, we dllimport the vtable if it is defined
1904 // externally and all the non-inline virtual methods are marked dllimport, and
1905 // we dllexport the vtable if it is defined in this TU and all the non-inline
1906 // virtual methods are marked dllexport.
1907 if (CGM.getTarget().hasPS4DLLImportExport()) {
1908 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1909 if (CGM.getVTables().isVTableExternal(RD)) {
1910 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1911 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1912 } else {
1913 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1914 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1918 CGM.setGVProperties(VTable, RD);
1920 return VTable;
1923 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1924 GlobalDecl GD,
1925 Address This,
1926 llvm::Type *Ty,
1927 SourceLocation Loc) {
1928 llvm::Type *TyPtr = Ty->getPointerTo();
1929 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1930 llvm::Value *VTable = CGF.GetVTablePtr(
1931 This, TyPtr->getPointerTo(), MethodDecl->getParent());
1933 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1934 llvm::Value *VFunc;
1935 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1936 VFunc = CGF.EmitVTableTypeCheckedLoad(
1937 MethodDecl->getParent(), VTable, TyPtr,
1938 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1939 } else {
1940 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1942 llvm::Value *VFuncLoad;
1943 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1944 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1945 llvm::Value *Load = CGF.Builder.CreateCall(
1946 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1947 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1948 VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1949 } else {
1950 VTable =
1951 CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1952 llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1953 TyPtr, VTable, VTableIndex, "vfn");
1954 VFuncLoad =
1955 CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1956 CGF.getPointerAlign());
1959 // Add !invariant.load md to virtual function load to indicate that
1960 // function didn't change inside vtable.
1961 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1962 // help in devirtualization because it will only matter if we will have 2
1963 // the same virtual function loads from the same vtable load, which won't
1964 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1965 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1966 CGM.getCodeGenOpts().StrictVTablePointers) {
1967 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1968 VFuncLoadInstr->setMetadata(
1969 llvm::LLVMContext::MD_invariant_load,
1970 llvm::MDNode::get(CGM.getLLVMContext(),
1971 llvm::ArrayRef<llvm::Metadata *>()));
1974 VFunc = VFuncLoad;
1977 CGCallee Callee(GD, VFunc);
1978 return Callee;
1981 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1982 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1983 Address This, DeleteOrMemberCallExpr E) {
1984 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1985 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1986 assert((CE != nullptr) ^ (D != nullptr));
1987 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1988 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1990 GlobalDecl GD(Dtor, DtorType);
1991 const CGFunctionInfo *FInfo =
1992 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1993 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1994 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1996 QualType ThisTy;
1997 if (CE) {
1998 ThisTy = CE->getObjectType();
1999 } else {
2000 ThisTy = D->getDestroyedType();
2003 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2004 QualType(), nullptr);
2005 return nullptr;
2008 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2009 CodeGenVTables &VTables = CGM.getVTables();
2010 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2011 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2014 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2015 const CXXRecordDecl *RD) const {
2016 // We don't emit available_externally vtables if we are in -fapple-kext mode
2017 // because kext mode does not permit devirtualization.
2018 if (CGM.getLangOpts().AppleKext)
2019 return false;
2021 // If the vtable is hidden then it is not safe to emit an available_externally
2022 // copy of vtable.
2023 if (isVTableHidden(RD))
2024 return false;
2026 if (CGM.getCodeGenOpts().ForceEmitVTables)
2027 return true;
2029 // If we don't have any not emitted inline virtual function then we are safe
2030 // to emit an available_externally copy of vtable.
2031 // FIXME we can still emit a copy of the vtable if we
2032 // can emit definition of the inline functions.
2033 if (hasAnyUnusedVirtualInlineFunction(RD))
2034 return false;
2036 // For a class with virtual bases, we must also be able to speculatively
2037 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2038 // the vtable" and "can emit the VTT". For a base subobject, this means we
2039 // need to be able to emit non-virtual base vtables.
2040 if (RD->getNumVBases()) {
2041 for (const auto &B : RD->bases()) {
2042 auto *BRD = B.getType()->getAsCXXRecordDecl();
2043 assert(BRD && "no class for base specifier");
2044 if (B.isVirtual() || !BRD->isDynamicClass())
2045 continue;
2046 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2047 return false;
2051 return true;
2054 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2055 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2056 return false;
2058 // For a complete-object vtable (or more specifically, for the VTT), we need
2059 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2060 for (const auto &B : RD->vbases()) {
2061 auto *BRD = B.getType()->getAsCXXRecordDecl();
2062 assert(BRD && "no class for base specifier");
2063 if (!BRD->isDynamicClass())
2064 continue;
2065 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2066 return false;
2069 return true;
2071 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2072 Address InitialPtr,
2073 int64_t NonVirtualAdjustment,
2074 int64_t VirtualAdjustment,
2075 bool IsReturnAdjustment) {
2076 if (!NonVirtualAdjustment && !VirtualAdjustment)
2077 return InitialPtr.getPointer();
2079 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2081 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2082 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2083 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2084 CharUnits::fromQuantity(NonVirtualAdjustment));
2087 // Perform the virtual adjustment if we have one.
2088 llvm::Value *ResultPtr;
2089 if (VirtualAdjustment) {
2090 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2091 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2093 llvm::Value *Offset;
2094 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2095 CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2096 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2097 // Load the adjustment offset from the vtable as a 32-bit int.
2098 OffsetPtr =
2099 CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2100 Offset =
2101 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2102 CharUnits::fromQuantity(4));
2103 } else {
2104 llvm::Type *PtrDiffTy =
2105 CGF.ConvertType(CGF.getContext().getPointerDiffType());
2107 OffsetPtr =
2108 CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2110 // Load the adjustment offset from the vtable.
2111 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2112 CGF.getPointerAlign());
2114 // Adjust our pointer.
2115 ResultPtr = CGF.Builder.CreateInBoundsGEP(
2116 V.getElementType(), V.getPointer(), Offset);
2117 } else {
2118 ResultPtr = V.getPointer();
2121 // In a derived-to-base conversion, the non-virtual adjustment is
2122 // applied second.
2123 if (NonVirtualAdjustment && IsReturnAdjustment) {
2124 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2125 NonVirtualAdjustment);
2128 // Cast back to the original type.
2129 return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2132 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2133 Address This,
2134 const ThisAdjustment &TA) {
2135 return performTypeAdjustment(CGF, This, TA.NonVirtual,
2136 TA.Virtual.Itanium.VCallOffsetOffset,
2137 /*IsReturnAdjustment=*/false);
2140 llvm::Value *
2141 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2142 const ReturnAdjustment &RA) {
2143 return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2144 RA.Virtual.Itanium.VBaseOffsetOffset,
2145 /*IsReturnAdjustment=*/true);
2148 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2149 RValue RV, QualType ResultType) {
2150 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2151 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2153 // Destructor thunks in the ARM ABI have indeterminate results.
2154 llvm::Type *T = CGF.ReturnValue.getElementType();
2155 RValue Undef = RValue::get(llvm::UndefValue::get(T));
2156 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2159 /************************** Array allocation cookies **************************/
2161 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2162 // The array cookie is a size_t; pad that up to the element alignment.
2163 // The cookie is actually right-justified in that space.
2164 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2165 CGM.getContext().getPreferredTypeAlignInChars(elementType));
2168 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2169 Address NewPtr,
2170 llvm::Value *NumElements,
2171 const CXXNewExpr *expr,
2172 QualType ElementType) {
2173 assert(requiresArrayCookie(expr));
2175 unsigned AS = NewPtr.getAddressSpace();
2177 ASTContext &Ctx = getContext();
2178 CharUnits SizeSize = CGF.getSizeSize();
2180 // The size of the cookie.
2181 CharUnits CookieSize =
2182 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2183 assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2185 // Compute an offset to the cookie.
2186 Address CookiePtr = NewPtr;
2187 CharUnits CookieOffset = CookieSize - SizeSize;
2188 if (!CookieOffset.isZero())
2189 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2191 // Write the number of elements into the appropriate slot.
2192 Address NumElementsPtr =
2193 CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2194 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2196 // Handle the array cookie specially in ASan.
2197 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2198 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2199 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2200 // The store to the CookiePtr does not need to be instrumented.
2201 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2202 llvm::FunctionType *FTy =
2203 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2204 llvm::FunctionCallee F =
2205 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2206 CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2209 // Finally, compute a pointer to the actual data buffer by skipping
2210 // over the cookie completely.
2211 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2214 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2215 Address allocPtr,
2216 CharUnits cookieSize) {
2217 // The element size is right-justified in the cookie.
2218 Address numElementsPtr = allocPtr;
2219 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2220 if (!numElementsOffset.isZero())
2221 numElementsPtr =
2222 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2224 unsigned AS = allocPtr.getAddressSpace();
2225 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2226 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2227 return CGF.Builder.CreateLoad(numElementsPtr);
2228 // In asan mode emit a function call instead of a regular load and let the
2229 // run-time deal with it: if the shadow is properly poisoned return the
2230 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2231 // We can't simply ignore this load using nosanitize metadata because
2232 // the metadata may be lost.
2233 llvm::FunctionType *FTy =
2234 llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2235 llvm::FunctionCallee F =
2236 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2237 return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2240 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2241 // ARM says that the cookie is always:
2242 // struct array_cookie {
2243 // std::size_t element_size; // element_size != 0
2244 // std::size_t element_count;
2245 // };
2246 // But the base ABI doesn't give anything an alignment greater than
2247 // 8, so we can dismiss this as typical ABI-author blindness to
2248 // actual language complexity and round up to the element alignment.
2249 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2250 CGM.getContext().getTypeAlignInChars(elementType));
2253 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2254 Address newPtr,
2255 llvm::Value *numElements,
2256 const CXXNewExpr *expr,
2257 QualType elementType) {
2258 assert(requiresArrayCookie(expr));
2260 // The cookie is always at the start of the buffer.
2261 Address cookie = newPtr;
2263 // The first element is the element size.
2264 cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2265 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2266 getContext().getTypeSizeInChars(elementType).getQuantity());
2267 CGF.Builder.CreateStore(elementSize, cookie);
2269 // The second element is the element count.
2270 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2271 CGF.Builder.CreateStore(numElements, cookie);
2273 // Finally, compute a pointer to the actual data buffer by skipping
2274 // over the cookie completely.
2275 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2276 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2279 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2280 Address allocPtr,
2281 CharUnits cookieSize) {
2282 // The number of elements is at offset sizeof(size_t) relative to
2283 // the allocated pointer.
2284 Address numElementsPtr
2285 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2287 numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2288 return CGF.Builder.CreateLoad(numElementsPtr);
2291 /*********************** Static local initialization **************************/
2293 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2294 llvm::PointerType *GuardPtrTy) {
2295 // int __cxa_guard_acquire(__guard *guard_object);
2296 llvm::FunctionType *FTy =
2297 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2298 GuardPtrTy, /*isVarArg=*/false);
2299 return CGM.CreateRuntimeFunction(
2300 FTy, "__cxa_guard_acquire",
2301 llvm::AttributeList::get(CGM.getLLVMContext(),
2302 llvm::AttributeList::FunctionIndex,
2303 llvm::Attribute::NoUnwind));
2306 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2307 llvm::PointerType *GuardPtrTy) {
2308 // void __cxa_guard_release(__guard *guard_object);
2309 llvm::FunctionType *FTy =
2310 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2311 return CGM.CreateRuntimeFunction(
2312 FTy, "__cxa_guard_release",
2313 llvm::AttributeList::get(CGM.getLLVMContext(),
2314 llvm::AttributeList::FunctionIndex,
2315 llvm::Attribute::NoUnwind));
2318 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2319 llvm::PointerType *GuardPtrTy) {
2320 // void __cxa_guard_abort(__guard *guard_object);
2321 llvm::FunctionType *FTy =
2322 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2323 return CGM.CreateRuntimeFunction(
2324 FTy, "__cxa_guard_abort",
2325 llvm::AttributeList::get(CGM.getLLVMContext(),
2326 llvm::AttributeList::FunctionIndex,
2327 llvm::Attribute::NoUnwind));
2330 namespace {
2331 struct CallGuardAbort final : EHScopeStack::Cleanup {
2332 llvm::GlobalVariable *Guard;
2333 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2335 void Emit(CodeGenFunction &CGF, Flags flags) override {
2336 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2337 Guard);
2342 /// The ARM code here follows the Itanium code closely enough that we
2343 /// just special-case it at particular places.
2344 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2345 const VarDecl &D,
2346 llvm::GlobalVariable *var,
2347 bool shouldPerformInit) {
2348 CGBuilderTy &Builder = CGF.Builder;
2350 // Inline variables that weren't instantiated from variable templates have
2351 // partially-ordered initialization within their translation unit.
2352 bool NonTemplateInline =
2353 D.isInline() &&
2354 !isTemplateInstantiation(D.getTemplateSpecializationKind());
2356 // We only need to use thread-safe statics for local non-TLS variables and
2357 // inline variables; other global initialization is always single-threaded
2358 // or (through lazy dynamic loading in multiple threads) unsequenced.
2359 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2360 (D.isLocalVarDecl() || NonTemplateInline) &&
2361 !D.getTLSKind();
2363 // If we have a global variable with internal linkage and thread-safe statics
2364 // are disabled, we can just let the guard variable be of type i8.
2365 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2367 llvm::IntegerType *guardTy;
2368 CharUnits guardAlignment;
2369 if (useInt8GuardVariable) {
2370 guardTy = CGF.Int8Ty;
2371 guardAlignment = CharUnits::One();
2372 } else {
2373 // Guard variables are 64 bits in the generic ABI and size width on ARM
2374 // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2375 if (UseARMGuardVarABI) {
2376 guardTy = CGF.SizeTy;
2377 guardAlignment = CGF.getSizeAlign();
2378 } else {
2379 guardTy = CGF.Int64Ty;
2380 guardAlignment = CharUnits::fromQuantity(
2381 CGM.getDataLayout().getABITypeAlignment(guardTy));
2384 llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2385 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2387 // Create the guard variable if we don't already have it (as we
2388 // might if we're double-emitting this function body).
2389 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2390 if (!guard) {
2391 // Mangle the name for the guard.
2392 SmallString<256> guardName;
2394 llvm::raw_svector_ostream out(guardName);
2395 getMangleContext().mangleStaticGuardVariable(&D, out);
2398 // Create the guard variable with a zero-initializer.
2399 // Just absorb linkage and visibility from the guarded variable.
2400 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2401 false, var->getLinkage(),
2402 llvm::ConstantInt::get(guardTy, 0),
2403 guardName.str());
2404 guard->setDSOLocal(var->isDSOLocal());
2405 guard->setVisibility(var->getVisibility());
2406 // If the variable is thread-local, so is its guard variable.
2407 guard->setThreadLocalMode(var->getThreadLocalMode());
2408 guard->setAlignment(guardAlignment.getAsAlign());
2410 // The ABI says: "It is suggested that it be emitted in the same COMDAT
2411 // group as the associated data object." In practice, this doesn't work for
2412 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2413 llvm::Comdat *C = var->getComdat();
2414 if (!D.isLocalVarDecl() && C &&
2415 (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2416 CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2417 guard->setComdat(C);
2418 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2419 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2422 CGM.setStaticLocalDeclGuardAddress(&D, guard);
2425 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2427 // Test whether the variable has completed initialization.
2429 // Itanium C++ ABI 3.3.2:
2430 // The following is pseudo-code showing how these functions can be used:
2431 // if (obj_guard.first_byte == 0) {
2432 // if ( __cxa_guard_acquire (&obj_guard) ) {
2433 // try {
2434 // ... initialize the object ...;
2435 // } catch (...) {
2436 // __cxa_guard_abort (&obj_guard);
2437 // throw;
2438 // }
2439 // ... queue object destructor with __cxa_atexit() ...;
2440 // __cxa_guard_release (&obj_guard);
2441 // }
2442 // }
2444 // Load the first byte of the guard variable.
2445 llvm::LoadInst *LI =
2446 Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2448 // Itanium ABI:
2449 // An implementation supporting thread-safety on multiprocessor
2450 // systems must also guarantee that references to the initialized
2451 // object do not occur before the load of the initialization flag.
2453 // In LLVM, we do this by marking the load Acquire.
2454 if (threadsafe)
2455 LI->setAtomic(llvm::AtomicOrdering::Acquire);
2457 // For ARM, we should only check the first bit, rather than the entire byte:
2459 // ARM C++ ABI 3.2.3.1:
2460 // To support the potential use of initialization guard variables
2461 // as semaphores that are the target of ARM SWP and LDREX/STREX
2462 // synchronizing instructions we define a static initialization
2463 // guard variable to be a 4-byte aligned, 4-byte word with the
2464 // following inline access protocol.
2465 // #define INITIALIZED 1
2466 // if ((obj_guard & INITIALIZED) != INITIALIZED) {
2467 // if (__cxa_guard_acquire(&obj_guard))
2468 // ...
2469 // }
2471 // and similarly for ARM64:
2473 // ARM64 C++ ABI 3.2.2:
2474 // This ABI instead only specifies the value bit 0 of the static guard
2475 // variable; all other bits are platform defined. Bit 0 shall be 0 when the
2476 // variable is not initialized and 1 when it is.
2477 llvm::Value *V =
2478 (UseARMGuardVarABI && !useInt8GuardVariable)
2479 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2480 : LI;
2481 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2483 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2484 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2486 // Check if the first byte of the guard variable is zero.
2487 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2488 CodeGenFunction::GuardKind::VariableGuard, &D);
2490 CGF.EmitBlock(InitCheckBlock);
2492 // Variables used when coping with thread-safe statics and exceptions.
2493 if (threadsafe) {
2494 // Call __cxa_guard_acquire.
2495 llvm::Value *V
2496 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2498 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2500 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2501 InitBlock, EndBlock);
2503 // Call __cxa_guard_abort along the exceptional edge.
2504 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2506 CGF.EmitBlock(InitBlock);
2509 // Emit the initializer and add a global destructor if appropriate.
2510 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2512 if (threadsafe) {
2513 // Pop the guard-abort cleanup if we pushed one.
2514 CGF.PopCleanupBlock();
2516 // Call __cxa_guard_release. This cannot throw.
2517 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2518 guardAddr.getPointer());
2519 } else {
2520 // Store 1 into the first byte of the guard variable after initialization is
2521 // complete.
2522 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2523 Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2526 CGF.EmitBlock(EndBlock);
2529 /// Register a global destructor using __cxa_atexit.
2530 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2531 llvm::FunctionCallee dtor,
2532 llvm::Constant *addr, bool TLS) {
2533 assert(!CGF.getTarget().getTriple().isOSAIX() &&
2534 "unexpected call to emitGlobalDtorWithCXAAtExit");
2535 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2536 "__cxa_atexit is disabled");
2537 const char *Name = "__cxa_atexit";
2538 if (TLS) {
2539 const llvm::Triple &T = CGF.getTarget().getTriple();
2540 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit";
2543 // We're assuming that the destructor function is something we can
2544 // reasonably call with the default CC. Go ahead and cast it to the
2545 // right prototype.
2546 llvm::Type *dtorTy =
2547 llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2549 // Preserve address space of addr.
2550 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2551 auto AddrInt8PtrTy =
2552 AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2554 // Create a variable that binds the atexit to this shared object.
2555 llvm::Constant *handle =
2556 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2557 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2558 GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2560 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2561 llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2562 llvm::FunctionType *atexitTy =
2563 llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2565 // Fetch the actual function.
2566 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2567 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2568 fn->setDoesNotThrow();
2570 if (!addr)
2571 // addr is null when we are trying to register a dtor annotated with
2572 // __attribute__((destructor)) in a constructor function. Using null here is
2573 // okay because this argument is just passed back to the destructor
2574 // function.
2575 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2577 llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2578 cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2579 llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2580 handle};
2581 CGF.EmitNounwindRuntimeCall(atexit, args);
2584 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2585 StringRef FnName) {
2586 // Create a function that registers/unregisters destructors that have the same
2587 // priority.
2588 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2589 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2590 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2592 return GlobalInitOrCleanupFn;
2595 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2596 for (const auto &I : DtorsUsingAtExit) {
2597 int Priority = I.first;
2598 std::string GlobalCleanupFnName =
2599 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2601 llvm::Function *GlobalCleanupFn =
2602 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2604 CodeGenFunction CGF(*this);
2605 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2606 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2607 SourceLocation(), SourceLocation());
2608 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2610 // Get the destructor function type, void(*)(void).
2611 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2612 llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2614 // Destructor functions are run/unregistered in non-ascending
2615 // order of their priorities.
2616 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2617 auto itv = Dtors.rbegin();
2618 while (itv != Dtors.rend()) {
2619 llvm::Function *Dtor = *itv;
2621 // We're assuming that the destructor function is something we can
2622 // reasonably call with the correct CC. Go ahead and cast it to the
2623 // right prototype.
2624 llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2625 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2626 llvm::Value *NeedsDestruct =
2627 CGF.Builder.CreateIsNull(V, "needs_destruct");
2629 llvm::BasicBlock *DestructCallBlock =
2630 CGF.createBasicBlock("destruct.call");
2631 llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2632 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2633 // Check if unatexit returns a value of 0. If it does, jump to
2634 // DestructCallBlock, otherwise jump to EndBlock directly.
2635 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2637 CGF.EmitBlock(DestructCallBlock);
2639 // Emit the call to casted Dtor.
2640 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2641 // Make sure the call and the callee agree on calling convention.
2642 CI->setCallingConv(Dtor->getCallingConv());
2644 CGF.EmitBlock(EndBlock);
2646 itv++;
2649 CGF.FinishFunction();
2650 AddGlobalDtor(GlobalCleanupFn, Priority);
2654 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2655 for (const auto &I : DtorsUsingAtExit) {
2656 int Priority = I.first;
2657 std::string GlobalInitFnName =
2658 std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2659 llvm::Function *GlobalInitFn =
2660 createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2662 CodeGenFunction CGF(*this);
2663 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2664 getTypes().arrangeNullaryFunction(), FunctionArgList(),
2665 SourceLocation(), SourceLocation());
2666 auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2668 // Since constructor functions are run in non-descending order of their
2669 // priorities, destructors are registered in non-descending order of their
2670 // priorities, and since destructor functions are run in the reverse order
2671 // of their registration, destructor functions are run in non-ascending
2672 // order of their priorities.
2673 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2674 for (auto *Dtor : Dtors) {
2675 // Register the destructor function calling __cxa_atexit if it is
2676 // available. Otherwise fall back on calling atexit.
2677 if (getCodeGenOpts().CXAAtExit) {
2678 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2679 } else {
2680 // Get the destructor function type, void(*)(void).
2681 llvm::Type *dtorTy =
2682 llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2684 // We're assuming that the destructor function is something we can
2685 // reasonably call with the correct CC. Go ahead and cast it to the
2686 // right prototype.
2687 CGF.registerGlobalDtorWithAtExit(
2688 llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2692 CGF.FinishFunction();
2693 AddGlobalCtor(GlobalInitFn, Priority);
2696 if (getCXXABI().useSinitAndSterm())
2697 unregisterGlobalDtorsWithUnAtExit();
2700 /// Register a global destructor as best as we know how.
2701 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2702 llvm::FunctionCallee dtor,
2703 llvm::Constant *addr) {
2704 if (D.isNoDestroy(CGM.getContext()))
2705 return;
2707 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2708 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2709 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2710 // We can always use __cxa_thread_atexit.
2711 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2712 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2714 // In Apple kexts, we want to add a global destructor entry.
2715 // FIXME: shouldn't this be guarded by some variable?
2716 if (CGM.getLangOpts().AppleKext) {
2717 // Generate a global destructor entry.
2718 return CGM.AddCXXDtorEntry(dtor, addr);
2721 CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2724 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2725 CodeGen::CodeGenModule &CGM) {
2726 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2727 // Darwin prefers to have references to thread local variables to go through
2728 // the thread wrapper instead of directly referencing the backing variable.
2729 return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2730 CGM.getTarget().getTriple().isOSDarwin();
2733 /// Get the appropriate linkage for the wrapper function. This is essentially
2734 /// the weak form of the variable's linkage; every translation unit which needs
2735 /// the wrapper emits a copy, and we want the linker to merge them.
2736 static llvm::GlobalValue::LinkageTypes
2737 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2738 llvm::GlobalValue::LinkageTypes VarLinkage =
2739 CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2741 // For internal linkage variables, we don't need an external or weak wrapper.
2742 if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2743 return VarLinkage;
2745 // If the thread wrapper is replaceable, give it appropriate linkage.
2746 if (isThreadWrapperReplaceable(VD, CGM))
2747 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2748 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2749 return VarLinkage;
2750 return llvm::GlobalValue::WeakODRLinkage;
2753 llvm::Function *
2754 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2755 llvm::Value *Val) {
2756 // Mangle the name for the thread_local wrapper function.
2757 SmallString<256> WrapperName;
2759 llvm::raw_svector_ostream Out(WrapperName);
2760 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2763 // FIXME: If VD is a definition, we should regenerate the function attributes
2764 // before returning.
2765 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2766 return cast<llvm::Function>(V);
2768 QualType RetQT = VD->getType();
2769 if (RetQT->isReferenceType())
2770 RetQT = RetQT.getNonReferenceType();
2772 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2773 getContext().getPointerType(RetQT), FunctionArgList());
2775 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2776 llvm::Function *Wrapper =
2777 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2778 WrapperName.str(), &CGM.getModule());
2780 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2781 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2783 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2785 // Always resolve references to the wrapper at link time.
2786 if (!Wrapper->hasLocalLinkage())
2787 if (!isThreadWrapperReplaceable(VD, CGM) ||
2788 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2789 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2790 VD->getVisibility() == HiddenVisibility)
2791 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2793 if (isThreadWrapperReplaceable(VD, CGM)) {
2794 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2795 Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2798 ThreadWrappers.push_back({VD, Wrapper});
2799 return Wrapper;
2802 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2803 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2804 ArrayRef<llvm::Function *> CXXThreadLocalInits,
2805 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2806 llvm::Function *InitFunc = nullptr;
2808 // Separate initializers into those with ordered (or partially-ordered)
2809 // initialization and those with unordered initialization.
2810 llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2811 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2812 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2813 if (isTemplateInstantiation(
2814 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2815 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2816 CXXThreadLocalInits[I];
2817 else
2818 OrderedInits.push_back(CXXThreadLocalInits[I]);
2821 if (!OrderedInits.empty()) {
2822 // Generate a guarded initialization function.
2823 llvm::FunctionType *FTy =
2824 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2825 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2826 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2827 SourceLocation(),
2828 /*TLS=*/true);
2829 llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2830 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2831 llvm::GlobalVariable::InternalLinkage,
2832 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2833 Guard->setThreadLocal(true);
2834 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2836 CharUnits GuardAlign = CharUnits::One();
2837 Guard->setAlignment(GuardAlign.getAsAlign());
2839 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2840 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2841 // On Darwin platforms, use CXX_FAST_TLS calling convention.
2842 if (CGM.getTarget().getTriple().isOSDarwin()) {
2843 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2844 InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2848 // Create declarations for thread wrappers for all thread-local variables
2849 // with non-discardable definitions in this translation unit.
2850 for (const VarDecl *VD : CXXThreadLocals) {
2851 if (VD->hasDefinition() &&
2852 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2853 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2854 getOrCreateThreadLocalWrapper(VD, GV);
2858 // Emit all referenced thread wrappers.
2859 for (auto VDAndWrapper : ThreadWrappers) {
2860 const VarDecl *VD = VDAndWrapper.first;
2861 llvm::GlobalVariable *Var =
2862 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2863 llvm::Function *Wrapper = VDAndWrapper.second;
2865 // Some targets require that all access to thread local variables go through
2866 // the thread wrapper. This means that we cannot attempt to create a thread
2867 // wrapper or a thread helper.
2868 if (!VD->hasDefinition()) {
2869 if (isThreadWrapperReplaceable(VD, CGM)) {
2870 Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2871 continue;
2874 // If this isn't a TU in which this variable is defined, the thread
2875 // wrapper is discardable.
2876 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2877 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2880 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2882 // Mangle the name for the thread_local initialization function.
2883 SmallString<256> InitFnName;
2885 llvm::raw_svector_ostream Out(InitFnName);
2886 getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2889 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2891 // If we have a definition for the variable, emit the initialization
2892 // function as an alias to the global Init function (if any). Otherwise,
2893 // produce a declaration of the initialization function.
2894 llvm::GlobalValue *Init = nullptr;
2895 bool InitIsInitFunc = false;
2896 bool HasConstantInitialization = false;
2897 if (!usesThreadWrapperFunction(VD)) {
2898 HasConstantInitialization = true;
2899 } else if (VD->hasDefinition()) {
2900 InitIsInitFunc = true;
2901 llvm::Function *InitFuncToUse = InitFunc;
2902 if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2903 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2904 if (InitFuncToUse)
2905 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2906 InitFuncToUse);
2907 } else {
2908 // Emit a weak global function referring to the initialization function.
2909 // This function will not exist if the TU defining the thread_local
2910 // variable in question does not need any dynamic initialization for
2911 // its thread_local variables.
2912 Init = llvm::Function::Create(InitFnTy,
2913 llvm::GlobalVariable::ExternalWeakLinkage,
2914 InitFnName.str(), &CGM.getModule());
2915 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2916 CGM.SetLLVMFunctionAttributes(
2917 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2920 if (Init) {
2921 Init->setVisibility(Var->getVisibility());
2922 // Don't mark an extern_weak function DSO local on windows.
2923 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2924 Init->setDSOLocal(Var->isDSOLocal());
2927 llvm::LLVMContext &Context = CGM.getModule().getContext();
2929 // The linker on AIX is not happy with missing weak symbols. However,
2930 // other TUs will not know whether the initialization routine exists
2931 // so create an empty, init function to satisfy the linker.
2932 // This is needed whenever a thread wrapper function is not used, and
2933 // also when the symbol is weak.
2934 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2935 isEmittedWithConstantInitializer(VD, true) &&
2936 !mayNeedDestruction(VD)) {
2937 // Init should be null. If it were non-null, then the logic above would
2938 // either be defining the function to be an alias or declaring the
2939 // function with the expectation that the definition of the variable
2940 // is elsewhere.
2941 assert(Init == nullptr && "Expected Init to be null.");
2943 llvm::Function *Func = llvm::Function::Create(
2944 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2945 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2946 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2947 cast<llvm::Function>(Func),
2948 /*IsThunk=*/false);
2949 // Create a function body that just returns
2950 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2951 CGBuilderTy Builder(CGM, Entry);
2952 Builder.CreateRetVoid();
2955 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2956 CGBuilderTy Builder(CGM, Entry);
2957 if (HasConstantInitialization) {
2958 // No dynamic initialization to invoke.
2959 } else if (InitIsInitFunc) {
2960 if (Init) {
2961 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2962 if (isThreadWrapperReplaceable(VD, CGM)) {
2963 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2964 llvm::Function *Fn =
2965 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2966 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2969 } else if (CGM.getTriple().isOSAIX()) {
2970 // On AIX, except if constinit and also neither of class type or of
2971 // (possibly multi-dimensional) array of class type, thread_local vars
2972 // will have init routines regardless of whether they are
2973 // const-initialized. Since the routine is guaranteed to exist, we can
2974 // unconditionally call it without testing for its existance. This
2975 // avoids potentially unresolved weak symbols which the AIX linker
2976 // isn't happy with.
2977 Builder.CreateCall(InitFnTy, Init);
2978 } else {
2979 // Don't know whether we have an init function. Call it if it exists.
2980 llvm::Value *Have = Builder.CreateIsNotNull(Init);
2981 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2982 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2983 Builder.CreateCondBr(Have, InitBB, ExitBB);
2985 Builder.SetInsertPoint(InitBB);
2986 Builder.CreateCall(InitFnTy, Init);
2987 Builder.CreateBr(ExitBB);
2989 Builder.SetInsertPoint(ExitBB);
2992 // For a reference, the result of the wrapper function is a pointer to
2993 // the referenced object.
2994 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
2996 if (VD->getType()->isReferenceType()) {
2997 CharUnits Align = CGM.getContext().getDeclAlign(VD);
2998 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
3000 if (Val->getType() != Wrapper->getReturnType())
3001 Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
3002 Val, Wrapper->getReturnType(), "");
3004 Builder.CreateRet(Val);
3008 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3009 const VarDecl *VD,
3010 QualType LValType) {
3011 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3012 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3014 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3015 CallVal->setCallingConv(Wrapper->getCallingConv());
3017 LValue LV;
3018 if (VD->getType()->isReferenceType())
3019 LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3020 else
3021 LV = CGF.MakeAddrLValue(CallVal, LValType,
3022 CGF.getContext().getDeclAlign(VD));
3023 // FIXME: need setObjCGCLValueClass?
3024 return LV;
3027 /// Return whether the given global decl needs a VTT parameter, which it does
3028 /// if it's a base constructor or destructor with virtual bases.
3029 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3030 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3032 // We don't have any virtual bases, just return early.
3033 if (!MD->getParent()->getNumVBases())
3034 return false;
3036 // Check if we have a base constructor.
3037 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3038 return true;
3040 // Check if we have a base destructor.
3041 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3042 return true;
3044 return false;
3047 namespace {
3048 class ItaniumRTTIBuilder {
3049 CodeGenModule &CGM; // Per-module state.
3050 llvm::LLVMContext &VMContext;
3051 const ItaniumCXXABI &CXXABI; // Per-module state.
3053 /// Fields - The fields of the RTTI descriptor currently being built.
3054 SmallVector<llvm::Constant *, 16> Fields;
3056 /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3057 llvm::GlobalVariable *
3058 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3060 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3061 /// descriptor of the given type.
3062 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3064 /// BuildVTablePointer - Build the vtable pointer for the given type.
3065 void BuildVTablePointer(const Type *Ty);
3067 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3068 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3069 void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3071 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3072 /// classes with bases that do not satisfy the abi::__si_class_type_info
3073 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3074 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3076 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3077 /// for pointer types.
3078 void BuildPointerTypeInfo(QualType PointeeTy);
3080 /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3081 /// type_info for an object type.
3082 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3084 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3085 /// struct, used for member pointer types.
3086 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3088 public:
3089 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3090 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3092 // Pointer type info flags.
3093 enum {
3094 /// PTI_Const - Type has const qualifier.
3095 PTI_Const = 0x1,
3097 /// PTI_Volatile - Type has volatile qualifier.
3098 PTI_Volatile = 0x2,
3100 /// PTI_Restrict - Type has restrict qualifier.
3101 PTI_Restrict = 0x4,
3103 /// PTI_Incomplete - Type is incomplete.
3104 PTI_Incomplete = 0x8,
3106 /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3107 /// (in pointer to member).
3108 PTI_ContainingClassIncomplete = 0x10,
3110 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3111 //PTI_TransactionSafe = 0x20,
3113 /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3114 PTI_Noexcept = 0x40,
3117 // VMI type info flags.
3118 enum {
3119 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3120 VMI_NonDiamondRepeat = 0x1,
3122 /// VMI_DiamondShaped - Class is diamond shaped.
3123 VMI_DiamondShaped = 0x2
3126 // Base class type info flags.
3127 enum {
3128 /// BCTI_Virtual - Base class is virtual.
3129 BCTI_Virtual = 0x1,
3131 /// BCTI_Public - Base class is public.
3132 BCTI_Public = 0x2
3135 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3136 /// link to an existing RTTI descriptor if one already exists.
3137 llvm::Constant *BuildTypeInfo(QualType Ty);
3139 /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3140 llvm::Constant *BuildTypeInfo(
3141 QualType Ty,
3142 llvm::GlobalVariable::LinkageTypes Linkage,
3143 llvm::GlobalValue::VisibilityTypes Visibility,
3144 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3148 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3149 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3150 SmallString<256> Name;
3151 llvm::raw_svector_ostream Out(Name);
3152 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3154 // We know that the mangled name of the type starts at index 4 of the
3155 // mangled name of the typename, so we can just index into it in order to
3156 // get the mangled name of the type.
3157 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3158 Name.substr(4));
3159 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3161 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3162 Name, Init->getType(), Linkage, Align.getQuantity());
3164 GV->setInitializer(Init);
3166 return GV;
3169 llvm::Constant *
3170 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3171 // Mangle the RTTI name.
3172 SmallString<256> Name;
3173 llvm::raw_svector_ostream Out(Name);
3174 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3176 // Look for an existing global.
3177 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3179 if (!GV) {
3180 // Create a new global variable.
3181 // Note for the future: If we would ever like to do deferred emission of
3182 // RTTI, check if emitting vtables opportunistically need any adjustment.
3184 GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3185 /*isConstant=*/true,
3186 llvm::GlobalValue::ExternalLinkage, nullptr,
3187 Name);
3188 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3189 CGM.setGVProperties(GV, RD);
3190 // Import the typeinfo symbol when all non-inline virtual methods are
3191 // imported.
3192 if (CGM.getTarget().hasPS4DLLImportExport()) {
3193 if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3194 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3195 CGM.setDSOLocal(GV);
3200 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3203 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3204 /// info for that type is defined in the standard library.
3205 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3206 // Itanium C++ ABI 2.9.2:
3207 // Basic type information (e.g. for "int", "bool", etc.) will be kept in
3208 // the run-time support library. Specifically, the run-time support
3209 // library should contain type_info objects for the types X, X* and
3210 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3211 // unsigned char, signed char, short, unsigned short, int, unsigned int,
3212 // long, unsigned long, long long, unsigned long long, float, double,
3213 // long double, char16_t, char32_t, and the IEEE 754r decimal and
3214 // half-precision floating point types.
3216 // GCC also emits RTTI for __int128.
3217 // FIXME: We do not emit RTTI information for decimal types here.
3219 // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3220 switch (Ty->getKind()) {
3221 case BuiltinType::Void:
3222 case BuiltinType::NullPtr:
3223 case BuiltinType::Bool:
3224 case BuiltinType::WChar_S:
3225 case BuiltinType::WChar_U:
3226 case BuiltinType::Char_U:
3227 case BuiltinType::Char_S:
3228 case BuiltinType::UChar:
3229 case BuiltinType::SChar:
3230 case BuiltinType::Short:
3231 case BuiltinType::UShort:
3232 case BuiltinType::Int:
3233 case BuiltinType::UInt:
3234 case BuiltinType::Long:
3235 case BuiltinType::ULong:
3236 case BuiltinType::LongLong:
3237 case BuiltinType::ULongLong:
3238 case BuiltinType::Half:
3239 case BuiltinType::Float:
3240 case BuiltinType::Double:
3241 case BuiltinType::LongDouble:
3242 case BuiltinType::Float16:
3243 case BuiltinType::Float128:
3244 case BuiltinType::Ibm128:
3245 case BuiltinType::Char8:
3246 case BuiltinType::Char16:
3247 case BuiltinType::Char32:
3248 case BuiltinType::Int128:
3249 case BuiltinType::UInt128:
3250 return true;
3252 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3253 case BuiltinType::Id:
3254 #include "clang/Basic/OpenCLImageTypes.def"
3255 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3256 case BuiltinType::Id:
3257 #include "clang/Basic/OpenCLExtensionTypes.def"
3258 case BuiltinType::OCLSampler:
3259 case BuiltinType::OCLEvent:
3260 case BuiltinType::OCLClkEvent:
3261 case BuiltinType::OCLQueue:
3262 case BuiltinType::OCLReserveID:
3263 #define SVE_TYPE(Name, Id, SingletonId) \
3264 case BuiltinType::Id:
3265 #include "clang/Basic/AArch64SVEACLETypes.def"
3266 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3267 case BuiltinType::Id:
3268 #include "clang/Basic/PPCTypes.def"
3269 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3270 #include "clang/Basic/RISCVVTypes.def"
3271 case BuiltinType::ShortAccum:
3272 case BuiltinType::Accum:
3273 case BuiltinType::LongAccum:
3274 case BuiltinType::UShortAccum:
3275 case BuiltinType::UAccum:
3276 case BuiltinType::ULongAccum:
3277 case BuiltinType::ShortFract:
3278 case BuiltinType::Fract:
3279 case BuiltinType::LongFract:
3280 case BuiltinType::UShortFract:
3281 case BuiltinType::UFract:
3282 case BuiltinType::ULongFract:
3283 case BuiltinType::SatShortAccum:
3284 case BuiltinType::SatAccum:
3285 case BuiltinType::SatLongAccum:
3286 case BuiltinType::SatUShortAccum:
3287 case BuiltinType::SatUAccum:
3288 case BuiltinType::SatULongAccum:
3289 case BuiltinType::SatShortFract:
3290 case BuiltinType::SatFract:
3291 case BuiltinType::SatLongFract:
3292 case BuiltinType::SatUShortFract:
3293 case BuiltinType::SatUFract:
3294 case BuiltinType::SatULongFract:
3295 case BuiltinType::BFloat16:
3296 return false;
3298 case BuiltinType::Dependent:
3299 #define BUILTIN_TYPE(Id, SingletonId)
3300 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3301 case BuiltinType::Id:
3302 #include "clang/AST/BuiltinTypes.def"
3303 llvm_unreachable("asking for RRTI for a placeholder type!");
3305 case BuiltinType::ObjCId:
3306 case BuiltinType::ObjCClass:
3307 case BuiltinType::ObjCSel:
3308 llvm_unreachable("FIXME: Objective-C types are unsupported!");
3311 llvm_unreachable("Invalid BuiltinType Kind!");
3314 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3315 QualType PointeeTy = PointerTy->getPointeeType();
3316 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3317 if (!BuiltinTy)
3318 return false;
3320 // Check the qualifiers.
3321 Qualifiers Quals = PointeeTy.getQualifiers();
3322 Quals.removeConst();
3324 if (!Quals.empty())
3325 return false;
3327 return TypeInfoIsInStandardLibrary(BuiltinTy);
3330 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3331 /// information for the given type exists in the standard library.
3332 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3333 // Type info for builtin types is defined in the standard library.
3334 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3335 return TypeInfoIsInStandardLibrary(BuiltinTy);
3337 // Type info for some pointer types to builtin types is defined in the
3338 // standard library.
3339 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3340 return TypeInfoIsInStandardLibrary(PointerTy);
3342 return false;
3345 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3346 /// the given type exists somewhere else, and that we should not emit the type
3347 /// information in this translation unit. Assumes that it is not a
3348 /// standard-library type.
3349 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3350 QualType Ty) {
3351 ASTContext &Context = CGM.getContext();
3353 // If RTTI is disabled, assume it might be disabled in the
3354 // translation unit that defines any potential key function, too.
3355 if (!Context.getLangOpts().RTTI) return false;
3357 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3358 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3359 if (!RD->hasDefinition())
3360 return false;
3362 if (!RD->isDynamicClass())
3363 return false;
3365 // FIXME: this may need to be reconsidered if the key function
3366 // changes.
3367 // N.B. We must always emit the RTTI data ourselves if there exists a key
3368 // function.
3369 bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3371 // Don't import the RTTI but emit it locally.
3372 if (CGM.getTriple().isWindowsGNUEnvironment())
3373 return false;
3375 if (CGM.getVTables().isVTableExternal(RD)) {
3376 if (CGM.getTarget().hasPS4DLLImportExport())
3377 return true;
3379 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3380 ? false
3381 : true;
3383 if (IsDLLImport)
3384 return true;
3387 return false;
3390 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3391 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3392 return !RecordTy->getDecl()->isCompleteDefinition();
3395 /// ContainsIncompleteClassType - Returns whether the given type contains an
3396 /// incomplete class type. This is true if
3398 /// * The given type is an incomplete class type.
3399 /// * The given type is a pointer type whose pointee type contains an
3400 /// incomplete class type.
3401 /// * The given type is a member pointer type whose class is an incomplete
3402 /// class type.
3403 /// * The given type is a member pointer type whoise pointee type contains an
3404 /// incomplete class type.
3405 /// is an indirect or direct pointer to an incomplete class type.
3406 static bool ContainsIncompleteClassType(QualType Ty) {
3407 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3408 if (IsIncompleteClassType(RecordTy))
3409 return true;
3412 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3413 return ContainsIncompleteClassType(PointerTy->getPointeeType());
3415 if (const MemberPointerType *MemberPointerTy =
3416 dyn_cast<MemberPointerType>(Ty)) {
3417 // Check if the class type is incomplete.
3418 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3419 if (IsIncompleteClassType(ClassType))
3420 return true;
3422 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3425 return false;
3428 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3429 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3430 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3431 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3432 // Check the number of bases.
3433 if (RD->getNumBases() != 1)
3434 return false;
3436 // Get the base.
3437 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3439 // Check that the base is not virtual.
3440 if (Base->isVirtual())
3441 return false;
3443 // Check that the base is public.
3444 if (Base->getAccessSpecifier() != AS_public)
3445 return false;
3447 // Check that the class is dynamic iff the base is.
3448 auto *BaseDecl =
3449 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3450 if (!BaseDecl->isEmpty() &&
3451 BaseDecl->isDynamicClass() != RD->isDynamicClass())
3452 return false;
3454 return true;
3457 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3458 // abi::__class_type_info.
3459 static const char * const ClassTypeInfo =
3460 "_ZTVN10__cxxabiv117__class_type_infoE";
3461 // abi::__si_class_type_info.
3462 static const char * const SIClassTypeInfo =
3463 "_ZTVN10__cxxabiv120__si_class_type_infoE";
3464 // abi::__vmi_class_type_info.
3465 static const char * const VMIClassTypeInfo =
3466 "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3468 const char *VTableName = nullptr;
3470 switch (Ty->getTypeClass()) {
3471 #define TYPE(Class, Base)
3472 #define ABSTRACT_TYPE(Class, Base)
3473 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3474 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3475 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3476 #include "clang/AST/TypeNodes.inc"
3477 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3479 case Type::LValueReference:
3480 case Type::RValueReference:
3481 llvm_unreachable("References shouldn't get here");
3483 case Type::Auto:
3484 case Type::DeducedTemplateSpecialization:
3485 llvm_unreachable("Undeduced type shouldn't get here");
3487 case Type::Pipe:
3488 llvm_unreachable("Pipe types shouldn't get here");
3490 case Type::Builtin:
3491 case Type::BitInt:
3492 // GCC treats vector and complex types as fundamental types.
3493 case Type::Vector:
3494 case Type::ExtVector:
3495 case Type::ConstantMatrix:
3496 case Type::Complex:
3497 case Type::Atomic:
3498 // FIXME: GCC treats block pointers as fundamental types?!
3499 case Type::BlockPointer:
3500 // abi::__fundamental_type_info.
3501 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3502 break;
3504 case Type::ConstantArray:
3505 case Type::IncompleteArray:
3506 case Type::VariableArray:
3507 // abi::__array_type_info.
3508 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3509 break;
3511 case Type::FunctionNoProto:
3512 case Type::FunctionProto:
3513 // abi::__function_type_info.
3514 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3515 break;
3517 case Type::Enum:
3518 // abi::__enum_type_info.
3519 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3520 break;
3522 case Type::Record: {
3523 const CXXRecordDecl *RD =
3524 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3526 if (!RD->hasDefinition() || !RD->getNumBases()) {
3527 VTableName = ClassTypeInfo;
3528 } else if (CanUseSingleInheritance(RD)) {
3529 VTableName = SIClassTypeInfo;
3530 } else {
3531 VTableName = VMIClassTypeInfo;
3534 break;
3537 case Type::ObjCObject:
3538 // Ignore protocol qualifiers.
3539 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3541 // Handle id and Class.
3542 if (isa<BuiltinType>(Ty)) {
3543 VTableName = ClassTypeInfo;
3544 break;
3547 assert(isa<ObjCInterfaceType>(Ty));
3548 [[fallthrough]];
3550 case Type::ObjCInterface:
3551 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3552 VTableName = SIClassTypeInfo;
3553 } else {
3554 VTableName = ClassTypeInfo;
3556 break;
3558 case Type::ObjCObjectPointer:
3559 case Type::Pointer:
3560 // abi::__pointer_type_info.
3561 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3562 break;
3564 case Type::MemberPointer:
3565 // abi::__pointer_to_member_type_info.
3566 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3567 break;
3570 llvm::Constant *VTable = nullptr;
3572 // Check if the alias exists. If it doesn't, then get or create the global.
3573 if (CGM.getItaniumVTableContext().isRelativeLayout())
3574 VTable = CGM.getModule().getNamedAlias(VTableName);
3575 if (!VTable)
3576 VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3578 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3580 llvm::Type *PtrDiffTy =
3581 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3583 // The vtable address point is 2.
3584 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3585 // The vtable address point is 8 bytes after its start:
3586 // 4 for the offset to top + 4 for the relative offset to rtti.
3587 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3588 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3589 VTable =
3590 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3591 } else {
3592 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3593 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3594 Two);
3596 VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3598 Fields.push_back(VTable);
3601 /// Return the linkage that the type info and type info name constants
3602 /// should have for the given type.
3603 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3604 QualType Ty) {
3605 // Itanium C++ ABI 2.9.5p7:
3606 // In addition, it and all of the intermediate abi::__pointer_type_info
3607 // structs in the chain down to the abi::__class_type_info for the
3608 // incomplete class type must be prevented from resolving to the
3609 // corresponding type_info structs for the complete class type, possibly
3610 // by making them local static objects. Finally, a dummy class RTTI is
3611 // generated for the incomplete type that will not resolve to the final
3612 // complete class RTTI (because the latter need not exist), possibly by
3613 // making it a local static object.
3614 if (ContainsIncompleteClassType(Ty))
3615 return llvm::GlobalValue::InternalLinkage;
3617 switch (Ty->getLinkage()) {
3618 case NoLinkage:
3619 case InternalLinkage:
3620 case UniqueExternalLinkage:
3621 return llvm::GlobalValue::InternalLinkage;
3623 case VisibleNoLinkage:
3624 case ModuleInternalLinkage:
3625 case ModuleLinkage:
3626 case ExternalLinkage:
3627 // RTTI is not enabled, which means that this type info struct is going
3628 // to be used for exception handling. Give it linkonce_odr linkage.
3629 if (!CGM.getLangOpts().RTTI)
3630 return llvm::GlobalValue::LinkOnceODRLinkage;
3632 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3633 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3634 if (RD->hasAttr<WeakAttr>())
3635 return llvm::GlobalValue::WeakODRLinkage;
3636 if (CGM.getTriple().isWindowsItaniumEnvironment())
3637 if (RD->hasAttr<DLLImportAttr>() &&
3638 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3639 return llvm::GlobalValue::ExternalLinkage;
3640 // MinGW always uses LinkOnceODRLinkage for type info.
3641 if (RD->isDynamicClass() &&
3642 !CGM.getContext()
3643 .getTargetInfo()
3644 .getTriple()
3645 .isWindowsGNUEnvironment())
3646 return CGM.getVTableLinkage(RD);
3649 return llvm::GlobalValue::LinkOnceODRLinkage;
3652 llvm_unreachable("Invalid linkage!");
3655 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3656 // We want to operate on the canonical type.
3657 Ty = Ty.getCanonicalType();
3659 // Check if we've already emitted an RTTI descriptor for this type.
3660 SmallString<256> Name;
3661 llvm::raw_svector_ostream Out(Name);
3662 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3664 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3665 if (OldGV && !OldGV->isDeclaration()) {
3666 assert(!OldGV->hasAvailableExternallyLinkage() &&
3667 "available_externally typeinfos not yet implemented");
3669 return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3672 // Check if there is already an external RTTI descriptor for this type.
3673 if (IsStandardLibraryRTTIDescriptor(Ty) ||
3674 ShouldUseExternalRTTIDescriptor(CGM, Ty))
3675 return GetAddrOfExternalRTTIDescriptor(Ty);
3677 // Emit the standard library with external linkage.
3678 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3680 // Give the type_info object and name the formal visibility of the
3681 // type itself.
3682 llvm::GlobalValue::VisibilityTypes llvmVisibility;
3683 if (llvm::GlobalValue::isLocalLinkage(Linkage))
3684 // If the linkage is local, only default visibility makes sense.
3685 llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3686 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3687 ItaniumCXXABI::RUK_NonUniqueHidden)
3688 llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3689 else
3690 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3692 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3693 llvm::GlobalValue::DefaultStorageClass;
3694 if (auto RD = Ty->getAsCXXRecordDecl()) {
3695 if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3696 RD->hasAttr<DLLExportAttr>()) ||
3697 (CGM.shouldMapVisibilityToDLLExport(RD) &&
3698 !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3699 llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3700 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3702 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3705 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3706 QualType Ty,
3707 llvm::GlobalVariable::LinkageTypes Linkage,
3708 llvm::GlobalValue::VisibilityTypes Visibility,
3709 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3710 // Add the vtable pointer.
3711 BuildVTablePointer(cast<Type>(Ty));
3713 // And the name.
3714 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3715 llvm::Constant *TypeNameField;
3717 // If we're supposed to demote the visibility, be sure to set a flag
3718 // to use a string comparison for type_info comparisons.
3719 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3720 CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3721 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3722 // The flag is the sign bit, which on ARM64 is defined to be clear
3723 // for global pointers. This is very ARM64-specific.
3724 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3725 llvm::Constant *flag =
3726 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3727 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3728 TypeNameField =
3729 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3730 } else {
3731 TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3733 Fields.push_back(TypeNameField);
3735 switch (Ty->getTypeClass()) {
3736 #define TYPE(Class, Base)
3737 #define ABSTRACT_TYPE(Class, Base)
3738 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3739 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3740 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3741 #include "clang/AST/TypeNodes.inc"
3742 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3744 // GCC treats vector types as fundamental types.
3745 case Type::Builtin:
3746 case Type::Vector:
3747 case Type::ExtVector:
3748 case Type::ConstantMatrix:
3749 case Type::Complex:
3750 case Type::BlockPointer:
3751 // Itanium C++ ABI 2.9.5p4:
3752 // abi::__fundamental_type_info adds no data members to std::type_info.
3753 break;
3755 case Type::LValueReference:
3756 case Type::RValueReference:
3757 llvm_unreachable("References shouldn't get here");
3759 case Type::Auto:
3760 case Type::DeducedTemplateSpecialization:
3761 llvm_unreachable("Undeduced type shouldn't get here");
3763 case Type::Pipe:
3764 break;
3766 case Type::BitInt:
3767 break;
3769 case Type::ConstantArray:
3770 case Type::IncompleteArray:
3771 case Type::VariableArray:
3772 // Itanium C++ ABI 2.9.5p5:
3773 // abi::__array_type_info adds no data members to std::type_info.
3774 break;
3776 case Type::FunctionNoProto:
3777 case Type::FunctionProto:
3778 // Itanium C++ ABI 2.9.5p5:
3779 // abi::__function_type_info adds no data members to std::type_info.
3780 break;
3782 case Type::Enum:
3783 // Itanium C++ ABI 2.9.5p5:
3784 // abi::__enum_type_info adds no data members to std::type_info.
3785 break;
3787 case Type::Record: {
3788 const CXXRecordDecl *RD =
3789 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3790 if (!RD->hasDefinition() || !RD->getNumBases()) {
3791 // We don't need to emit any fields.
3792 break;
3795 if (CanUseSingleInheritance(RD))
3796 BuildSIClassTypeInfo(RD);
3797 else
3798 BuildVMIClassTypeInfo(RD);
3800 break;
3803 case Type::ObjCObject:
3804 case Type::ObjCInterface:
3805 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3806 break;
3808 case Type::ObjCObjectPointer:
3809 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3810 break;
3812 case Type::Pointer:
3813 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3814 break;
3816 case Type::MemberPointer:
3817 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3818 break;
3820 case Type::Atomic:
3821 // No fields, at least for the moment.
3822 break;
3825 llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3827 SmallString<256> Name;
3828 llvm::raw_svector_ostream Out(Name);
3829 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3830 llvm::Module &M = CGM.getModule();
3831 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3832 llvm::GlobalVariable *GV =
3833 new llvm::GlobalVariable(M, Init->getType(),
3834 /*isConstant=*/true, Linkage, Init, Name);
3836 // Export the typeinfo in the same circumstances as the vtable is exported.
3837 auto GVDLLStorageClass = DLLStorageClass;
3838 if (CGM.getTarget().hasPS4DLLImportExport()) {
3839 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3840 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3841 if (RD->hasAttr<DLLExportAttr>() ||
3842 CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3843 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3848 // If there's already an old global variable, replace it with the new one.
3849 if (OldGV) {
3850 GV->takeName(OldGV);
3851 llvm::Constant *NewPtr =
3852 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3853 OldGV->replaceAllUsesWith(NewPtr);
3854 OldGV->eraseFromParent();
3857 if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3858 GV->setComdat(M.getOrInsertComdat(GV->getName()));
3860 CharUnits Align =
3861 CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3862 GV->setAlignment(Align.getAsAlign());
3864 // The Itanium ABI specifies that type_info objects must be globally
3865 // unique, with one exception: if the type is an incomplete class
3866 // type or a (possibly indirect) pointer to one. That exception
3867 // affects the general case of comparing type_info objects produced
3868 // by the typeid operator, which is why the comparison operators on
3869 // std::type_info generally use the type_info name pointers instead
3870 // of the object addresses. However, the language's built-in uses
3871 // of RTTI generally require class types to be complete, even when
3872 // manipulating pointers to those class types. This allows the
3873 // implementation of dynamic_cast to rely on address equality tests,
3874 // which is much faster.
3876 // All of this is to say that it's important that both the type_info
3877 // object and the type_info name be uniqued when weakly emitted.
3879 TypeName->setVisibility(Visibility);
3880 CGM.setDSOLocal(TypeName);
3882 GV->setVisibility(Visibility);
3883 CGM.setDSOLocal(GV);
3885 TypeName->setDLLStorageClass(DLLStorageClass);
3886 GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3887 ? GVDLLStorageClass
3888 : DLLStorageClass);
3890 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3891 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3893 return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3896 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3897 /// for the given Objective-C object type.
3898 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3899 // Drop qualifiers.
3900 const Type *T = OT->getBaseType().getTypePtr();
3901 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3903 // The builtin types are abi::__class_type_infos and don't require
3904 // extra fields.
3905 if (isa<BuiltinType>(T)) return;
3907 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3908 ObjCInterfaceDecl *Super = Class->getSuperClass();
3910 // Root classes are also __class_type_info.
3911 if (!Super) return;
3913 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3915 // Everything else is single inheritance.
3916 llvm::Constant *BaseTypeInfo =
3917 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3918 Fields.push_back(BaseTypeInfo);
3921 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3922 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3923 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3924 // Itanium C++ ABI 2.9.5p6b:
3925 // It adds to abi::__class_type_info a single member pointing to the
3926 // type_info structure for the base type,
3927 llvm::Constant *BaseTypeInfo =
3928 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3929 Fields.push_back(BaseTypeInfo);
3932 namespace {
3933 /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3934 /// a class hierarchy.
3935 struct SeenBases {
3936 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3937 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3941 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3942 /// abi::__vmi_class_type_info.
3944 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3945 SeenBases &Bases) {
3947 unsigned Flags = 0;
3949 auto *BaseDecl =
3950 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3952 if (Base->isVirtual()) {
3953 // Mark the virtual base as seen.
3954 if (!Bases.VirtualBases.insert(BaseDecl).second) {
3955 // If this virtual base has been seen before, then the class is diamond
3956 // shaped.
3957 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3958 } else {
3959 if (Bases.NonVirtualBases.count(BaseDecl))
3960 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3962 } else {
3963 // Mark the non-virtual base as seen.
3964 if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3965 // If this non-virtual base has been seen before, then the class has non-
3966 // diamond shaped repeated inheritance.
3967 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3968 } else {
3969 if (Bases.VirtualBases.count(BaseDecl))
3970 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3974 // Walk all bases.
3975 for (const auto &I : BaseDecl->bases())
3976 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3978 return Flags;
3981 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3982 unsigned Flags = 0;
3983 SeenBases Bases;
3985 // Walk all bases.
3986 for (const auto &I : RD->bases())
3987 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3989 return Flags;
3992 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3993 /// classes with bases that do not satisfy the abi::__si_class_type_info
3994 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3995 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3996 llvm::Type *UnsignedIntLTy =
3997 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3999 // Itanium C++ ABI 2.9.5p6c:
4000 // __flags is a word with flags describing details about the class
4001 // structure, which may be referenced by using the __flags_masks
4002 // enumeration. These flags refer to both direct and indirect bases.
4003 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
4004 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4006 // Itanium C++ ABI 2.9.5p6c:
4007 // __base_count is a word with the number of direct proper base class
4008 // descriptions that follow.
4009 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4011 if (!RD->getNumBases())
4012 return;
4014 // Now add the base class descriptions.
4016 // Itanium C++ ABI 2.9.5p6c:
4017 // __base_info[] is an array of base class descriptions -- one for every
4018 // direct proper base. Each description is of the type:
4020 // struct abi::__base_class_type_info {
4021 // public:
4022 // const __class_type_info *__base_type;
4023 // long __offset_flags;
4025 // enum __offset_flags_masks {
4026 // __virtual_mask = 0x1,
4027 // __public_mask = 0x2,
4028 // __offset_shift = 8
4029 // };
4030 // };
4032 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4033 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4034 // LLP64 platforms.
4035 // FIXME: Consider updating libc++abi to match, and extend this logic to all
4036 // LLP64 platforms.
4037 QualType OffsetFlagsTy = CGM.getContext().LongTy;
4038 const TargetInfo &TI = CGM.getContext().getTargetInfo();
4039 if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4040 OffsetFlagsTy = CGM.getContext().LongLongTy;
4041 llvm::Type *OffsetFlagsLTy =
4042 CGM.getTypes().ConvertType(OffsetFlagsTy);
4044 for (const auto &Base : RD->bases()) {
4045 // The __base_type member points to the RTTI for the base type.
4046 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4048 auto *BaseDecl =
4049 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4051 int64_t OffsetFlags = 0;
4053 // All but the lower 8 bits of __offset_flags are a signed offset.
4054 // For a non-virtual base, this is the offset in the object of the base
4055 // subobject. For a virtual base, this is the offset in the virtual table of
4056 // the virtual base offset for the virtual base referenced (negative).
4057 CharUnits Offset;
4058 if (Base.isVirtual())
4059 Offset =
4060 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4061 else {
4062 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4063 Offset = Layout.getBaseClassOffset(BaseDecl);
4066 OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4068 // The low-order byte of __offset_flags contains flags, as given by the
4069 // masks from the enumeration __offset_flags_masks.
4070 if (Base.isVirtual())
4071 OffsetFlags |= BCTI_Virtual;
4072 if (Base.getAccessSpecifier() == AS_public)
4073 OffsetFlags |= BCTI_Public;
4075 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4079 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4080 /// pieces from \p Type.
4081 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4082 unsigned Flags = 0;
4084 if (Type.isConstQualified())
4085 Flags |= ItaniumRTTIBuilder::PTI_Const;
4086 if (Type.isVolatileQualified())
4087 Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4088 if (Type.isRestrictQualified())
4089 Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4090 Type = Type.getUnqualifiedType();
4092 // Itanium C++ ABI 2.9.5p7:
4093 // When the abi::__pbase_type_info is for a direct or indirect pointer to an
4094 // incomplete class type, the incomplete target type flag is set.
4095 if (ContainsIncompleteClassType(Type))
4096 Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4098 if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4099 if (Proto->isNothrow()) {
4100 Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4101 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4105 return Flags;
4108 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4109 /// used for pointer types.
4110 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4111 // Itanium C++ ABI 2.9.5p7:
4112 // __flags is a flag word describing the cv-qualification and other
4113 // attributes of the type pointed to
4114 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4116 llvm::Type *UnsignedIntLTy =
4117 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4118 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4120 // Itanium C++ ABI 2.9.5p7:
4121 // __pointee is a pointer to the std::type_info derivation for the
4122 // unqualified type being pointed to.
4123 llvm::Constant *PointeeTypeInfo =
4124 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4125 Fields.push_back(PointeeTypeInfo);
4128 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4129 /// struct, used for member pointer types.
4130 void
4131 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4132 QualType PointeeTy = Ty->getPointeeType();
4134 // Itanium C++ ABI 2.9.5p7:
4135 // __flags is a flag word describing the cv-qualification and other
4136 // attributes of the type pointed to.
4137 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4139 const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4140 if (IsIncompleteClassType(ClassType))
4141 Flags |= PTI_ContainingClassIncomplete;
4143 llvm::Type *UnsignedIntLTy =
4144 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4145 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4147 // Itanium C++ ABI 2.9.5p7:
4148 // __pointee is a pointer to the std::type_info derivation for the
4149 // unqualified type being pointed to.
4150 llvm::Constant *PointeeTypeInfo =
4151 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4152 Fields.push_back(PointeeTypeInfo);
4154 // Itanium C++ ABI 2.9.5p9:
4155 // __context is a pointer to an abi::__class_type_info corresponding to the
4156 // class type containing the member pointed to
4157 // (e.g., the "A" in "int A::*").
4158 Fields.push_back(
4159 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4162 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4163 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4166 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4167 // Types added here must also be added to TypeInfoIsInStandardLibrary.
4168 QualType FundamentalTypes[] = {
4169 getContext().VoidTy, getContext().NullPtrTy,
4170 getContext().BoolTy, getContext().WCharTy,
4171 getContext().CharTy, getContext().UnsignedCharTy,
4172 getContext().SignedCharTy, getContext().ShortTy,
4173 getContext().UnsignedShortTy, getContext().IntTy,
4174 getContext().UnsignedIntTy, getContext().LongTy,
4175 getContext().UnsignedLongTy, getContext().LongLongTy,
4176 getContext().UnsignedLongLongTy, getContext().Int128Ty,
4177 getContext().UnsignedInt128Ty, getContext().HalfTy,
4178 getContext().FloatTy, getContext().DoubleTy,
4179 getContext().LongDoubleTy, getContext().Float128Ty,
4180 getContext().Char8Ty, getContext().Char16Ty,
4181 getContext().Char32Ty
4183 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4184 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4185 ? llvm::GlobalValue::DLLExportStorageClass
4186 : llvm::GlobalValue::DefaultStorageClass;
4187 llvm::GlobalValue::VisibilityTypes Visibility =
4188 CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4189 for (const QualType &FundamentalType : FundamentalTypes) {
4190 QualType PointerType = getContext().getPointerType(FundamentalType);
4191 QualType PointerTypeConst = getContext().getPointerType(
4192 FundamentalType.withConst());
4193 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4194 ItaniumRTTIBuilder(*this).BuildTypeInfo(
4195 Type, llvm::GlobalValue::ExternalLinkage,
4196 Visibility, DLLStorageClass);
4200 /// What sort of uniqueness rules should we use for the RTTI for the
4201 /// given type?
4202 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4203 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4204 if (shouldRTTIBeUnique())
4205 return RUK_Unique;
4207 // It's only necessary for linkonce_odr or weak_odr linkage.
4208 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4209 Linkage != llvm::GlobalValue::WeakODRLinkage)
4210 return RUK_Unique;
4212 // It's only necessary with default visibility.
4213 if (CanTy->getVisibility() != DefaultVisibility)
4214 return RUK_Unique;
4216 // If we're not required to publish this symbol, hide it.
4217 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4218 return RUK_NonUniqueHidden;
4220 // If we're required to publish this symbol, as we might be under an
4221 // explicit instantiation, leave it with default visibility but
4222 // enable string-comparisons.
4223 assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4224 return RUK_NonUniqueVisible;
4227 // Find out how to codegen the complete destructor and constructor
4228 namespace {
4229 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4231 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4232 const CXXMethodDecl *MD) {
4233 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4234 return StructorCodegen::Emit;
4236 // The complete and base structors are not equivalent if there are any virtual
4237 // bases, so emit separate functions.
4238 if (MD->getParent()->getNumVBases())
4239 return StructorCodegen::Emit;
4241 GlobalDecl AliasDecl;
4242 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4243 AliasDecl = GlobalDecl(DD, Dtor_Complete);
4244 } else {
4245 const auto *CD = cast<CXXConstructorDecl>(MD);
4246 AliasDecl = GlobalDecl(CD, Ctor_Complete);
4248 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4250 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4251 return StructorCodegen::RAUW;
4253 // FIXME: Should we allow available_externally aliases?
4254 if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4255 return StructorCodegen::RAUW;
4257 if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4258 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4259 if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4260 CGM.getTarget().getTriple().isOSBinFormatWasm())
4261 return StructorCodegen::COMDAT;
4262 return StructorCodegen::Emit;
4265 return StructorCodegen::Alias;
4268 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4269 GlobalDecl AliasDecl,
4270 GlobalDecl TargetDecl) {
4271 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4273 StringRef MangledName = CGM.getMangledName(AliasDecl);
4274 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4275 if (Entry && !Entry->isDeclaration())
4276 return;
4278 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4280 // Create the alias with no name.
4281 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4283 // Constructors and destructors are always unnamed_addr.
4284 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4286 // Switch any previous uses to the alias.
4287 if (Entry) {
4288 assert(Entry->getType() == Aliasee->getType() &&
4289 "declaration exists with different type");
4290 Alias->takeName(Entry);
4291 Entry->replaceAllUsesWith(Alias);
4292 Entry->eraseFromParent();
4293 } else {
4294 Alias->setName(MangledName);
4297 // Finally, set up the alias with its proper name and attributes.
4298 CGM.SetCommonAttributes(AliasDecl, Alias);
4301 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4302 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4303 auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4304 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4306 StructorCodegen CGType = getCodegenToUse(CGM, MD);
4308 if (CD ? GD.getCtorType() == Ctor_Complete
4309 : GD.getDtorType() == Dtor_Complete) {
4310 GlobalDecl BaseDecl;
4311 if (CD)
4312 BaseDecl = GD.getWithCtorType(Ctor_Base);
4313 else
4314 BaseDecl = GD.getWithDtorType(Dtor_Base);
4316 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4317 emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4318 return;
4321 if (CGType == StructorCodegen::RAUW) {
4322 StringRef MangledName = CGM.getMangledName(GD);
4323 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4324 CGM.addReplacement(MangledName, Aliasee);
4325 return;
4329 // The base destructor is equivalent to the base destructor of its
4330 // base class if there is exactly one non-virtual base class with a
4331 // non-trivial destructor, there are no fields with a non-trivial
4332 // destructor, and the body of the destructor is trivial.
4333 if (DD && GD.getDtorType() == Dtor_Base &&
4334 CGType != StructorCodegen::COMDAT &&
4335 !CGM.TryEmitBaseDestructorAsAlias(DD))
4336 return;
4338 // FIXME: The deleting destructor is equivalent to the selected operator
4339 // delete if:
4340 // * either the delete is a destroying operator delete or the destructor
4341 // would be trivial if it weren't virtual,
4342 // * the conversion from the 'this' parameter to the first parameter of the
4343 // destructor is equivalent to a bitcast,
4344 // * the destructor does not have an implicit "this" return, and
4345 // * the operator delete has the same calling convention and IR function type
4346 // as the destructor.
4347 // In such cases we should try to emit the deleting dtor as an alias to the
4348 // selected 'operator delete'.
4350 llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4352 if (CGType == StructorCodegen::COMDAT) {
4353 SmallString<256> Buffer;
4354 llvm::raw_svector_ostream Out(Buffer);
4355 if (DD)
4356 getMangleContext().mangleCXXDtorComdat(DD, Out);
4357 else
4358 getMangleContext().mangleCXXCtorComdat(CD, Out);
4359 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4360 Fn->setComdat(C);
4361 } else {
4362 CGM.maybeSetTrivialComdat(*MD, *Fn);
4366 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4367 // void *__cxa_begin_catch(void*);
4368 llvm::FunctionType *FTy = llvm::FunctionType::get(
4369 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4371 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4374 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4375 // void __cxa_end_catch();
4376 llvm::FunctionType *FTy =
4377 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4379 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4382 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4383 // void *__cxa_get_exception_ptr(void*);
4384 llvm::FunctionType *FTy = llvm::FunctionType::get(
4385 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4387 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4390 namespace {
4391 /// A cleanup to call __cxa_end_catch. In many cases, the caught
4392 /// exception type lets us state definitively that the thrown exception
4393 /// type does not have a destructor. In particular:
4394 /// - Catch-alls tell us nothing, so we have to conservatively
4395 /// assume that the thrown exception might have a destructor.
4396 /// - Catches by reference behave according to their base types.
4397 /// - Catches of non-record types will only trigger for exceptions
4398 /// of non-record types, which never have destructors.
4399 /// - Catches of record types can trigger for arbitrary subclasses
4400 /// of the caught type, so we have to assume the actual thrown
4401 /// exception type might have a throwing destructor, even if the
4402 /// caught type's destructor is trivial or nothrow.
4403 struct CallEndCatch final : EHScopeStack::Cleanup {
4404 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4405 bool MightThrow;
4407 void Emit(CodeGenFunction &CGF, Flags flags) override {
4408 if (!MightThrow) {
4409 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4410 return;
4413 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4418 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4419 /// __cxa_end_catch.
4421 /// \param EndMightThrow - true if __cxa_end_catch might throw
4422 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4423 llvm::Value *Exn,
4424 bool EndMightThrow) {
4425 llvm::CallInst *call =
4426 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4428 CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4430 return call;
4433 /// A "special initializer" callback for initializing a catch
4434 /// parameter during catch initialization.
4435 static void InitCatchParam(CodeGenFunction &CGF,
4436 const VarDecl &CatchParam,
4437 Address ParamAddr,
4438 SourceLocation Loc) {
4439 // Load the exception from where the landing pad saved it.
4440 llvm::Value *Exn = CGF.getExceptionFromSlot();
4442 CanQualType CatchType =
4443 CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4444 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4446 // If we're catching by reference, we can just cast the object
4447 // pointer to the appropriate pointer.
4448 if (isa<ReferenceType>(CatchType)) {
4449 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4450 bool EndCatchMightThrow = CaughtType->isRecordType();
4452 // __cxa_begin_catch returns the adjusted object pointer.
4453 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4455 // We have no way to tell the personality function that we're
4456 // catching by reference, so if we're catching a pointer,
4457 // __cxa_begin_catch will actually return that pointer by value.
4458 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4459 QualType PointeeType = PT->getPointeeType();
4461 // When catching by reference, generally we should just ignore
4462 // this by-value pointer and use the exception object instead.
4463 if (!PointeeType->isRecordType()) {
4465 // Exn points to the struct _Unwind_Exception header, which
4466 // we have to skip past in order to reach the exception data.
4467 unsigned HeaderSize =
4468 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4469 AdjustedExn =
4470 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4472 // However, if we're catching a pointer-to-record type that won't
4473 // work, because the personality function might have adjusted
4474 // the pointer. There's actually no way for us to fully satisfy
4475 // the language/ABI contract here: we can't use Exn because it
4476 // might have the wrong adjustment, but we can't use the by-value
4477 // pointer because it's off by a level of abstraction.
4479 // The current solution is to dump the adjusted pointer into an
4480 // alloca, which breaks language semantics (because changing the
4481 // pointer doesn't change the exception) but at least works.
4482 // The better solution would be to filter out non-exact matches
4483 // and rethrow them, but this is tricky because the rethrow
4484 // really needs to be catchable by other sites at this landing
4485 // pad. The best solution is to fix the personality function.
4486 } else {
4487 // Pull the pointer for the reference type off.
4488 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4490 // Create the temporary and write the adjusted pointer into it.
4491 Address ExnPtrTmp =
4492 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4493 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4494 CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4496 // Bind the reference to the temporary.
4497 AdjustedExn = ExnPtrTmp.getPointer();
4501 llvm::Value *ExnCast =
4502 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4503 CGF.Builder.CreateStore(ExnCast, ParamAddr);
4504 return;
4507 // Scalars and complexes.
4508 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4509 if (TEK != TEK_Aggregate) {
4510 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4512 // If the catch type is a pointer type, __cxa_begin_catch returns
4513 // the pointer by value.
4514 if (CatchType->hasPointerRepresentation()) {
4515 llvm::Value *CastExn =
4516 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4518 switch (CatchType.getQualifiers().getObjCLifetime()) {
4519 case Qualifiers::OCL_Strong:
4520 CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4521 [[fallthrough]];
4523 case Qualifiers::OCL_None:
4524 case Qualifiers::OCL_ExplicitNone:
4525 case Qualifiers::OCL_Autoreleasing:
4526 CGF.Builder.CreateStore(CastExn, ParamAddr);
4527 return;
4529 case Qualifiers::OCL_Weak:
4530 CGF.EmitARCInitWeak(ParamAddr, CastExn);
4531 return;
4533 llvm_unreachable("bad ownership qualifier!");
4536 // Otherwise, it returns a pointer into the exception object.
4538 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4539 llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4541 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4542 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4543 switch (TEK) {
4544 case TEK_Complex:
4545 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4546 /*init*/ true);
4547 return;
4548 case TEK_Scalar: {
4549 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4550 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4551 return;
4553 case TEK_Aggregate:
4554 llvm_unreachable("evaluation kind filtered out!");
4556 llvm_unreachable("bad evaluation kind");
4559 assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4560 auto catchRD = CatchType->getAsCXXRecordDecl();
4561 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4563 llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4565 // Check for a copy expression. If we don't have a copy expression,
4566 // that means a trivial copy is okay.
4567 const Expr *copyExpr = CatchParam.getInit();
4568 if (!copyExpr) {
4569 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4570 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4571 LLVMCatchTy, caughtExnAlignment);
4572 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4573 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4574 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4575 return;
4578 // We have to call __cxa_get_exception_ptr to get the adjusted
4579 // pointer before copying.
4580 llvm::CallInst *rawAdjustedExn =
4581 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4583 // Cast that to the appropriate type.
4584 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4585 LLVMCatchTy, caughtExnAlignment);
4587 // The copy expression is defined in terms of an OpaqueValueExpr.
4588 // Find it and map it to the adjusted expression.
4589 CodeGenFunction::OpaqueValueMapping
4590 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4591 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4593 // Call the copy ctor in a terminate scope.
4594 CGF.EHStack.pushTerminate();
4596 // Perform the copy construction.
4597 CGF.EmitAggExpr(copyExpr,
4598 AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4599 AggValueSlot::IsNotDestructed,
4600 AggValueSlot::DoesNotNeedGCBarriers,
4601 AggValueSlot::IsNotAliased,
4602 AggValueSlot::DoesNotOverlap));
4604 // Leave the terminate scope.
4605 CGF.EHStack.popTerminate();
4607 // Undo the opaque value mapping.
4608 opaque.pop();
4610 // Finally we can call __cxa_begin_catch.
4611 CallBeginCatch(CGF, Exn, true);
4614 /// Begins a catch statement by initializing the catch variable and
4615 /// calling __cxa_begin_catch.
4616 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4617 const CXXCatchStmt *S) {
4618 // We have to be very careful with the ordering of cleanups here:
4619 // C++ [except.throw]p4:
4620 // The destruction [of the exception temporary] occurs
4621 // immediately after the destruction of the object declared in
4622 // the exception-declaration in the handler.
4624 // So the precise ordering is:
4625 // 1. Construct catch variable.
4626 // 2. __cxa_begin_catch
4627 // 3. Enter __cxa_end_catch cleanup
4628 // 4. Enter dtor cleanup
4630 // We do this by using a slightly abnormal initialization process.
4631 // Delegation sequence:
4632 // - ExitCXXTryStmt opens a RunCleanupsScope
4633 // - EmitAutoVarAlloca creates the variable and debug info
4634 // - InitCatchParam initializes the variable from the exception
4635 // - CallBeginCatch calls __cxa_begin_catch
4636 // - CallBeginCatch enters the __cxa_end_catch cleanup
4637 // - EmitAutoVarCleanups enters the variable destructor cleanup
4638 // - EmitCXXTryStmt emits the code for the catch body
4639 // - EmitCXXTryStmt close the RunCleanupsScope
4641 VarDecl *CatchParam = S->getExceptionDecl();
4642 if (!CatchParam) {
4643 llvm::Value *Exn = CGF.getExceptionFromSlot();
4644 CallBeginCatch(CGF, Exn, true);
4645 return;
4648 // Emit the local.
4649 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4650 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4651 CGF.EmitAutoVarCleanups(var);
4654 /// Get or define the following function:
4655 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn
4656 /// This code is used only in C++.
4657 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4658 llvm::FunctionType *fnTy =
4659 llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4660 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4661 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4662 llvm::Function *fn =
4663 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4664 if (fn->empty()) {
4665 fn->setDoesNotThrow();
4666 fn->setDoesNotReturn();
4668 // What we really want is to massively penalize inlining without
4669 // forbidding it completely. The difference between that and
4670 // 'noinline' is negligible.
4671 fn->addFnAttr(llvm::Attribute::NoInline);
4673 // Allow this function to be shared across translation units, but
4674 // we don't want it to turn into an exported symbol.
4675 fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4676 fn->setVisibility(llvm::Function::HiddenVisibility);
4677 if (CGM.supportsCOMDAT())
4678 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4680 // Set up the function.
4681 llvm::BasicBlock *entry =
4682 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4683 CGBuilderTy builder(CGM, entry);
4685 // Pull the exception pointer out of the parameter list.
4686 llvm::Value *exn = &*fn->arg_begin();
4688 // Call __cxa_begin_catch(exn).
4689 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4690 catchCall->setDoesNotThrow();
4691 catchCall->setCallingConv(CGM.getRuntimeCC());
4693 // Call std::terminate().
4694 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4695 termCall->setDoesNotThrow();
4696 termCall->setDoesNotReturn();
4697 termCall->setCallingConv(CGM.getRuntimeCC());
4699 // std::terminate cannot return.
4700 builder.CreateUnreachable();
4702 return fnRef;
4705 llvm::CallInst *
4706 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4707 llvm::Value *Exn) {
4708 // In C++, we want to call __cxa_begin_catch() before terminating.
4709 if (Exn) {
4710 assert(CGF.CGM.getLangOpts().CPlusPlus);
4711 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4713 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4716 std::pair<llvm::Value *, const CXXRecordDecl *>
4717 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4718 const CXXRecordDecl *RD) {
4719 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4722 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4723 const CXXCatchStmt *C) {
4724 if (CGF.getTarget().hasFeature("exception-handling"))
4725 CGF.EHStack.pushCleanup<CatchRetScope>(
4726 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4727 ItaniumCXXABI::emitBeginCatch(CGF, C);
4730 llvm::CallInst *
4731 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4732 llvm::Value *Exn) {
4733 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4734 // the violating exception to mark it handled, but it is currently hard to do
4735 // with wasm EH instruction structure with catch/catch_all, we just call
4736 // std::terminate and ignore the violating exception as in CGCXXABI.
4737 // TODO Consider code transformation that makes calling __clang_call_terminate
4738 // possible.
4739 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4742 /// Register a global destructor as best as we know how.
4743 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4744 llvm::FunctionCallee Dtor,
4745 llvm::Constant *Addr) {
4746 if (D.getTLSKind() != VarDecl::TLS_None) {
4747 // atexit routine expects "int(*)(int,...)"
4748 llvm::FunctionType *FTy =
4749 llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4750 llvm::PointerType *FpTy = FTy->getPointerTo();
4752 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4753 llvm::FunctionType *AtExitTy =
4754 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4756 // Fetch the actual function.
4757 llvm::FunctionCallee AtExit =
4758 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4760 // Create __dtor function for the var decl.
4761 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4763 // Register above __dtor with atexit().
4764 // First param is flags and must be 0, second param is function ptr
4765 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4766 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4768 // Cannot unregister TLS __dtor so done
4769 return;
4772 // Create __dtor function for the var decl.
4773 llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4775 // Register above __dtor with atexit().
4776 CGF.registerGlobalDtorWithAtExit(DtorStub);
4778 // Emit __finalize function to unregister __dtor and (as appropriate) call
4779 // __dtor.
4780 emitCXXStermFinalizer(D, DtorStub, Addr);
4783 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4784 llvm::Constant *addr) {
4785 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4786 SmallString<256> FnName;
4788 llvm::raw_svector_ostream Out(FnName);
4789 getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4792 // Create the finalization action associated with a variable.
4793 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4794 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4795 FTy, FnName.str(), FI, D.getLocation());
4797 CodeGenFunction CGF(CGM);
4799 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4800 FunctionArgList(), D.getLocation(),
4801 D.getInit()->getExprLoc());
4803 // The unatexit subroutine unregisters __dtor functions that were previously
4804 // registered by the atexit subroutine. If the referenced function is found,
4805 // the unatexit returns a value of 0, meaning that the cleanup is still
4806 // pending (and we should call the __dtor function).
4807 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4809 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4811 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4812 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4814 // Check if unatexit returns a value of 0. If it does, jump to
4815 // DestructCallBlock, otherwise jump to EndBlock directly.
4816 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4818 CGF.EmitBlock(DestructCallBlock);
4820 // Emit the call to dtorStub.
4821 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4823 // Make sure the call and the callee agree on calling convention.
4824 CI->setCallingConv(dtorStub->getCallingConv());
4826 CGF.EmitBlock(EndBlock);
4828 CGF.FinishFunction();
4830 if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4831 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4832 IPA->getPriority());
4833 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4834 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4835 // According to C++ [basic.start.init]p2, class template static data
4836 // members (i.e., implicitly or explicitly instantiated specializations)
4837 // have unordered initialization. As a consequence, we can put them into
4838 // their own llvm.global_dtors entry.
4839 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4840 } else {
4841 CGM.AddCXXStermFinalizerEntry(StermFinalizer);