1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Decl nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "EHScopeStack.h"
23 #include "PatternInit.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/CharUnits.h"
28 #include "clang/AST/Decl.h"
29 #include "clang/AST/DeclObjC.h"
30 #include "clang/AST/DeclOpenMP.h"
31 #include "clang/Basic/CodeGenOptions.h"
32 #include "clang/Basic/SourceManager.h"
33 #include "clang/Basic/TargetInfo.h"
34 #include "clang/CodeGen/CGFunctionInfo.h"
35 #include "clang/Sema/Sema.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/Type.h"
45 using namespace clang
;
46 using namespace CodeGen
;
48 static_assert(clang::Sema::MaximumAlignment
<= llvm::Value::MaximumAlignment
,
49 "Clang max alignment greater than what LLVM supports?");
51 void CodeGenFunction::EmitDecl(const Decl
&D
) {
52 switch (D
.getKind()) {
53 case Decl::BuiltinTemplate
:
54 case Decl::TranslationUnit
:
55 case Decl::ExternCContext
:
57 case Decl::UnresolvedUsingTypename
:
58 case Decl::ClassTemplateSpecialization
:
59 case Decl::ClassTemplatePartialSpecialization
:
60 case Decl::VarTemplateSpecialization
:
61 case Decl::VarTemplatePartialSpecialization
:
62 case Decl::TemplateTypeParm
:
63 case Decl::UnresolvedUsingValue
:
64 case Decl::NonTypeTemplateParm
:
65 case Decl::CXXDeductionGuide
:
67 case Decl::CXXConstructor
:
68 case Decl::CXXDestructor
:
69 case Decl::CXXConversion
:
71 case Decl::MSProperty
:
72 case Decl::IndirectField
:
74 case Decl::ObjCAtDefsField
:
76 case Decl::ImplicitParam
:
77 case Decl::ClassTemplate
:
78 case Decl::VarTemplate
:
79 case Decl::FunctionTemplate
:
80 case Decl::TypeAliasTemplate
:
81 case Decl::TemplateTemplateParm
:
82 case Decl::ObjCMethod
:
83 case Decl::ObjCCategory
:
84 case Decl::ObjCProtocol
:
85 case Decl::ObjCInterface
:
86 case Decl::ObjCCategoryImpl
:
87 case Decl::ObjCImplementation
:
88 case Decl::ObjCProperty
:
89 case Decl::ObjCCompatibleAlias
:
90 case Decl::PragmaComment
:
91 case Decl::PragmaDetectMismatch
:
92 case Decl::AccessSpec
:
93 case Decl::LinkageSpec
:
95 case Decl::ObjCPropertyImpl
:
96 case Decl::FileScopeAsm
:
97 case Decl::TopLevelStmt
:
99 case Decl::FriendTemplate
:
102 case Decl::UsingShadow
:
103 case Decl::ConstructorUsingShadow
:
104 case Decl::ObjCTypeParam
:
106 case Decl::UnresolvedUsingIfExists
:
107 case Decl::HLSLBuffer
:
108 llvm_unreachable("Declaration should not be in declstmts!");
109 case Decl::Record
: // struct/union/class X;
110 case Decl::CXXRecord
: // struct/union/class X; [C++]
111 if (CGDebugInfo
*DI
= getDebugInfo())
112 if (cast
<RecordDecl
>(D
).getDefinition())
113 DI
->EmitAndRetainType(getContext().getRecordType(cast
<RecordDecl
>(&D
)));
115 case Decl::Enum
: // enum X;
116 if (CGDebugInfo
*DI
= getDebugInfo())
117 if (cast
<EnumDecl
>(D
).getDefinition())
118 DI
->EmitAndRetainType(getContext().getEnumType(cast
<EnumDecl
>(&D
)));
120 case Decl::Function
: // void X();
121 case Decl::EnumConstant
: // enum ? { X = ? }
122 case Decl::StaticAssert
: // static_assert(X, ""); [C++0x]
123 case Decl::Label
: // __label__ x;
125 case Decl::MSGuid
: // __declspec(uuid("..."))
126 case Decl::UnnamedGlobalConstant
:
127 case Decl::TemplateParamObject
:
128 case Decl::OMPThreadPrivate
:
129 case Decl::OMPAllocate
:
130 case Decl::OMPCapturedExpr
:
131 case Decl::OMPRequires
:
134 case Decl::ImplicitConceptSpecialization
:
135 case Decl::LifetimeExtendedTemporary
:
136 case Decl::RequiresExprBody
:
137 // None of these decls require codegen support.
140 case Decl::NamespaceAlias
:
141 if (CGDebugInfo
*DI
= getDebugInfo())
142 DI
->EmitNamespaceAlias(cast
<NamespaceAliasDecl
>(D
));
144 case Decl::Using
: // using X; [C++]
145 if (CGDebugInfo
*DI
= getDebugInfo())
146 DI
->EmitUsingDecl(cast
<UsingDecl
>(D
));
148 case Decl::UsingEnum
: // using enum X; [C++]
149 if (CGDebugInfo
*DI
= getDebugInfo())
150 DI
->EmitUsingEnumDecl(cast
<UsingEnumDecl
>(D
));
152 case Decl::UsingPack
:
153 for (auto *Using
: cast
<UsingPackDecl
>(D
).expansions())
156 case Decl::UsingDirective
: // using namespace X; [C++]
157 if (CGDebugInfo
*DI
= getDebugInfo())
158 DI
->EmitUsingDirective(cast
<UsingDirectiveDecl
>(D
));
161 case Decl::Decomposition
: {
162 const VarDecl
&VD
= cast
<VarDecl
>(D
);
163 assert(VD
.isLocalVarDecl() &&
164 "Should not see file-scope variables inside a function!");
166 if (auto *DD
= dyn_cast
<DecompositionDecl
>(&VD
))
167 for (auto *B
: DD
->bindings())
168 if (auto *HD
= B
->getHoldingVar())
173 case Decl::OMPDeclareReduction
:
174 return CGM
.EmitOMPDeclareReduction(cast
<OMPDeclareReductionDecl
>(&D
), this);
176 case Decl::OMPDeclareMapper
:
177 return CGM
.EmitOMPDeclareMapper(cast
<OMPDeclareMapperDecl
>(&D
), this);
179 case Decl::Typedef
: // typedef int X;
180 case Decl::TypeAlias
: { // using X = int; [C++0x]
181 QualType Ty
= cast
<TypedefNameDecl
>(D
).getUnderlyingType();
182 if (CGDebugInfo
*DI
= getDebugInfo())
183 DI
->EmitAndRetainType(Ty
);
184 if (Ty
->isVariablyModifiedType())
185 EmitVariablyModifiedType(Ty
);
191 /// EmitVarDecl - This method handles emission of any variable declaration
192 /// inside a function, including static vars etc.
193 void CodeGenFunction::EmitVarDecl(const VarDecl
&D
) {
194 if (D
.hasExternalStorage())
195 // Don't emit it now, allow it to be emitted lazily on its first use.
198 // Some function-scope variable does not have static storage but still
199 // needs to be emitted like a static variable, e.g. a function-scope
200 // variable in constant address space in OpenCL.
201 if (D
.getStorageDuration() != SD_Automatic
) {
202 // Static sampler variables translated to function calls.
203 if (D
.getType()->isSamplerT())
206 llvm::GlobalValue::LinkageTypes Linkage
=
207 CGM
.getLLVMLinkageVarDefinition(&D
);
209 // FIXME: We need to force the emission/use of a guard variable for
210 // some variables even if we can constant-evaluate them because
211 // we can't guarantee every translation unit will constant-evaluate them.
213 return EmitStaticVarDecl(D
, Linkage
);
216 if (D
.getType().getAddressSpace() == LangAS::opencl_local
)
217 return CGM
.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D
);
219 assert(D
.hasLocalStorage());
220 return EmitAutoVarDecl(D
);
223 static std::string
getStaticDeclName(CodeGenModule
&CGM
, const VarDecl
&D
) {
224 if (CGM
.getLangOpts().CPlusPlus
)
225 return CGM
.getMangledName(&D
).str();
227 // If this isn't C++, we don't need a mangled name, just a pretty one.
228 assert(!D
.isExternallyVisible() && "name shouldn't matter");
229 std::string ContextName
;
230 const DeclContext
*DC
= D
.getDeclContext();
231 if (auto *CD
= dyn_cast
<CapturedDecl
>(DC
))
232 DC
= cast
<DeclContext
>(CD
->getNonClosureContext());
233 if (const auto *FD
= dyn_cast
<FunctionDecl
>(DC
))
234 ContextName
= std::string(CGM
.getMangledName(FD
));
235 else if (const auto *BD
= dyn_cast
<BlockDecl
>(DC
))
236 ContextName
= std::string(CGM
.getBlockMangledName(GlobalDecl(), BD
));
237 else if (const auto *OMD
= dyn_cast
<ObjCMethodDecl
>(DC
))
238 ContextName
= OMD
->getSelector().getAsString();
240 llvm_unreachable("Unknown context for static var decl");
242 ContextName
+= "." + D
.getNameAsString();
246 llvm::Constant
*CodeGenModule::getOrCreateStaticVarDecl(
247 const VarDecl
&D
, llvm::GlobalValue::LinkageTypes Linkage
) {
248 // In general, we don't always emit static var decls once before we reference
249 // them. It is possible to reference them before emitting the function that
250 // contains them, and it is possible to emit the containing function multiple
252 if (llvm::Constant
*ExistingGV
= StaticLocalDeclMap
[&D
])
255 QualType Ty
= D
.getType();
256 assert(Ty
->isConstantSizeType() && "VLAs can't be static");
258 // Use the label if the variable is renamed with the asm-label extension.
260 if (D
.hasAttr
<AsmLabelAttr
>())
261 Name
= std::string(getMangledName(&D
));
263 Name
= getStaticDeclName(*this, D
);
265 llvm::Type
*LTy
= getTypes().ConvertTypeForMem(Ty
);
266 LangAS AS
= GetGlobalVarAddressSpace(&D
);
267 unsigned TargetAS
= getContext().getTargetAddressSpace(AS
);
269 // OpenCL variables in local address space and CUDA shared
270 // variables cannot have an initializer.
271 llvm::Constant
*Init
= nullptr;
272 if (Ty
.getAddressSpace() == LangAS::opencl_local
||
273 D
.hasAttr
<CUDASharedAttr
>() || D
.hasAttr
<LoaderUninitializedAttr
>())
274 Init
= llvm::UndefValue::get(LTy
);
276 Init
= EmitNullConstant(Ty
);
278 llvm::GlobalVariable
*GV
= new llvm::GlobalVariable(
279 getModule(), LTy
, Ty
.isConstant(getContext()), Linkage
, Init
, Name
,
280 nullptr, llvm::GlobalVariable::NotThreadLocal
, TargetAS
);
281 GV
->setAlignment(getContext().getDeclAlign(&D
).getAsAlign());
283 if (supportsCOMDAT() && GV
->isWeakForLinker())
284 GV
->setComdat(TheModule
.getOrInsertComdat(GV
->getName()));
289 setGVProperties(GV
, &D
);
290 getTargetCodeGenInfo().setTargetAttributes(cast
<Decl
>(&D
), GV
, *this);
292 // Make sure the result is of the correct type.
293 LangAS ExpectedAS
= Ty
.getAddressSpace();
294 llvm::Constant
*Addr
= GV
;
295 if (AS
!= ExpectedAS
) {
296 Addr
= getTargetCodeGenInfo().performAddrSpaceCast(
297 *this, GV
, AS
, ExpectedAS
,
298 llvm::PointerType::get(getLLVMContext(),
299 getContext().getTargetAddressSpace(ExpectedAS
)));
302 setStaticLocalDeclAddress(&D
, Addr
);
304 // Ensure that the static local gets initialized by making sure the parent
305 // function gets emitted eventually.
306 const Decl
*DC
= cast
<Decl
>(D
.getDeclContext());
308 // We can't name blocks or captured statements directly, so try to emit their
310 if (isa
<BlockDecl
>(DC
) || isa
<CapturedDecl
>(DC
)) {
311 DC
= DC
->getNonClosureContext();
312 // FIXME: Ensure that global blocks get emitted.
318 if (const auto *CD
= dyn_cast
<CXXConstructorDecl
>(DC
))
319 GD
= GlobalDecl(CD
, Ctor_Base
);
320 else if (const auto *DD
= dyn_cast
<CXXDestructorDecl
>(DC
))
321 GD
= GlobalDecl(DD
, Dtor_Base
);
322 else if (const auto *FD
= dyn_cast
<FunctionDecl
>(DC
))
325 // Don't do anything for Obj-C method decls or global closures. We should
327 assert(isa
<ObjCMethodDecl
>(DC
) && "unexpected parent code decl");
330 // Disable emission of the parent function for the OpenMP device codegen.
331 CGOpenMPRuntime::DisableAutoDeclareTargetRAII
NoDeclTarget(*this);
332 (void)GetAddrOfGlobal(GD
);
338 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
339 /// global variable that has already been created for it. If the initializer
340 /// has a different type than GV does, this may free GV and return a different
341 /// one. Otherwise it just returns GV.
342 llvm::GlobalVariable
*
343 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl
&D
,
344 llvm::GlobalVariable
*GV
) {
345 ConstantEmitter
emitter(*this);
346 llvm::Constant
*Init
= emitter
.tryEmitForInitializer(D
);
348 // If constant emission failed, then this should be a C++ static
351 if (!getLangOpts().CPlusPlus
)
352 CGM
.ErrorUnsupported(D
.getInit(), "constant l-value expression");
353 else if (D
.hasFlexibleArrayInit(getContext()))
354 CGM
.ErrorUnsupported(D
.getInit(), "flexible array initializer");
355 else if (HaveInsertPoint()) {
356 // Since we have a static initializer, this global variable can't
358 GV
->setConstant(false);
360 EmitCXXGuardedInit(D
, GV
, /*PerformInit*/true);
366 CharUnits VarSize
= CGM
.getContext().getTypeSizeInChars(D
.getType()) +
367 D
.getFlexibleArrayInitChars(getContext());
368 CharUnits CstSize
= CharUnits::fromQuantity(
369 CGM
.getDataLayout().getTypeAllocSize(Init
->getType()));
370 assert(VarSize
== CstSize
&& "Emitted constant has unexpected size");
373 // The initializer may differ in type from the global. Rewrite
374 // the global to match the initializer. (We have to do this
375 // because some types, like unions, can't be completely represented
376 // in the LLVM type system.)
377 if (GV
->getValueType() != Init
->getType()) {
378 llvm::GlobalVariable
*OldGV
= GV
;
380 GV
= new llvm::GlobalVariable(
381 CGM
.getModule(), Init
->getType(), OldGV
->isConstant(),
382 OldGV
->getLinkage(), Init
, "",
383 /*InsertBefore*/ OldGV
, OldGV
->getThreadLocalMode(),
384 OldGV
->getType()->getPointerAddressSpace());
385 GV
->setVisibility(OldGV
->getVisibility());
386 GV
->setDSOLocal(OldGV
->isDSOLocal());
387 GV
->setComdat(OldGV
->getComdat());
389 // Steal the name of the old global
392 // Replace all uses of the old global with the new global
393 OldGV
->replaceAllUsesWith(GV
);
395 // Erase the old global, since it is no longer used.
396 OldGV
->eraseFromParent();
400 D
.needsDestruction(getContext()) == QualType::DK_cxx_destructor
;
403 D
.getType().isConstantStorage(getContext(), true, !NeedsDtor
));
404 GV
->setInitializer(Init
);
406 emitter
.finalize(GV
);
408 if (NeedsDtor
&& HaveInsertPoint()) {
409 // We have a constant initializer, but a nontrivial destructor. We still
410 // need to perform a guarded "initialization" in order to register the
412 EmitCXXGuardedInit(D
, GV
, /*PerformInit*/false);
418 void CodeGenFunction::EmitStaticVarDecl(const VarDecl
&D
,
419 llvm::GlobalValue::LinkageTypes Linkage
) {
420 // Check to see if we already have a global variable for this
421 // declaration. This can happen when double-emitting function
422 // bodies, e.g. with complete and base constructors.
423 llvm::Constant
*addr
= CGM
.getOrCreateStaticVarDecl(D
, Linkage
);
424 CharUnits alignment
= getContext().getDeclAlign(&D
);
426 // Store into LocalDeclMap before generating initializer to handle
427 // circular references.
428 llvm::Type
*elemTy
= ConvertTypeForMem(D
.getType());
429 setAddrOfLocalVar(&D
, Address(addr
, elemTy
, alignment
));
431 // We can't have a VLA here, but we can have a pointer to a VLA,
432 // even though that doesn't really make any sense.
433 // Make sure to evaluate VLA bounds now so that we have them for later.
434 if (D
.getType()->isVariablyModifiedType())
435 EmitVariablyModifiedType(D
.getType());
437 // Save the type in case adding the initializer forces a type change.
438 llvm::Type
*expectedType
= addr
->getType();
440 llvm::GlobalVariable
*var
=
441 cast
<llvm::GlobalVariable
>(addr
->stripPointerCasts());
443 // CUDA's local and local static __shared__ variables should not
444 // have any non-empty initializers. This is ensured by Sema.
445 // Whatever initializer such variable may have when it gets here is
446 // a no-op and should not be emitted.
447 bool isCudaSharedVar
= getLangOpts().CUDA
&& getLangOpts().CUDAIsDevice
&&
448 D
.hasAttr
<CUDASharedAttr
>();
449 // If this value has an initializer, emit it.
450 if (D
.getInit() && !isCudaSharedVar
)
451 var
= AddInitializerToStaticVarDecl(D
, var
);
453 var
->setAlignment(alignment
.getAsAlign());
455 if (D
.hasAttr
<AnnotateAttr
>())
456 CGM
.AddGlobalAnnotations(&D
, var
);
458 if (auto *SA
= D
.getAttr
<PragmaClangBSSSectionAttr
>())
459 var
->addAttribute("bss-section", SA
->getName());
460 if (auto *SA
= D
.getAttr
<PragmaClangDataSectionAttr
>())
461 var
->addAttribute("data-section", SA
->getName());
462 if (auto *SA
= D
.getAttr
<PragmaClangRodataSectionAttr
>())
463 var
->addAttribute("rodata-section", SA
->getName());
464 if (auto *SA
= D
.getAttr
<PragmaClangRelroSectionAttr
>())
465 var
->addAttribute("relro-section", SA
->getName());
467 if (const SectionAttr
*SA
= D
.getAttr
<SectionAttr
>())
468 var
->setSection(SA
->getName());
470 if (D
.hasAttr
<RetainAttr
>())
471 CGM
.addUsedGlobal(var
);
472 else if (D
.hasAttr
<UsedAttr
>())
473 CGM
.addUsedOrCompilerUsedGlobal(var
);
475 if (CGM
.getCodeGenOpts().KeepPersistentStorageVariables
)
476 CGM
.addUsedOrCompilerUsedGlobal(var
);
478 // We may have to cast the constant because of the initializer
481 // FIXME: It is really dangerous to store this in the map; if anyone
482 // RAUW's the GV uses of this constant will be invalid.
483 llvm::Constant
*castedAddr
=
484 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var
, expectedType
);
485 LocalDeclMap
.find(&D
)->second
= Address(castedAddr
, elemTy
, alignment
);
486 CGM
.setStaticLocalDeclAddress(&D
, castedAddr
);
488 CGM
.getSanitizerMetadata()->reportGlobal(var
, D
);
490 // Emit global variable debug descriptor for static vars.
491 CGDebugInfo
*DI
= getDebugInfo();
492 if (DI
&& CGM
.getCodeGenOpts().hasReducedDebugInfo()) {
493 DI
->setLocation(D
.getLocation());
494 DI
->EmitGlobalVariable(var
, &D
);
499 struct DestroyObject final
: EHScopeStack::Cleanup
{
500 DestroyObject(Address addr
, QualType type
,
501 CodeGenFunction::Destroyer
*destroyer
,
502 bool useEHCleanupForArray
)
503 : addr(addr
), type(type
), destroyer(destroyer
),
504 useEHCleanupForArray(useEHCleanupForArray
) {}
508 CodeGenFunction::Destroyer
*destroyer
;
509 bool useEHCleanupForArray
;
511 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
512 // Don't use an EH cleanup recursively from an EH cleanup.
513 bool useEHCleanupForArray
=
514 flags
.isForNormalCleanup() && this->useEHCleanupForArray
;
516 CGF
.emitDestroy(addr
, type
, destroyer
, useEHCleanupForArray
);
520 template <class Derived
>
521 struct DestroyNRVOVariable
: EHScopeStack::Cleanup
{
522 DestroyNRVOVariable(Address addr
, QualType type
, llvm::Value
*NRVOFlag
)
523 : NRVOFlag(NRVOFlag
), Loc(addr
), Ty(type
) {}
525 llvm::Value
*NRVOFlag
;
529 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
530 // Along the exceptions path we always execute the dtor.
531 bool NRVO
= flags
.isForNormalCleanup() && NRVOFlag
;
533 llvm::BasicBlock
*SkipDtorBB
= nullptr;
535 // If we exited via NRVO, we skip the destructor call.
536 llvm::BasicBlock
*RunDtorBB
= CGF
.createBasicBlock("nrvo.unused");
537 SkipDtorBB
= CGF
.createBasicBlock("nrvo.skipdtor");
538 llvm::Value
*DidNRVO
=
539 CGF
.Builder
.CreateFlagLoad(NRVOFlag
, "nrvo.val");
540 CGF
.Builder
.CreateCondBr(DidNRVO
, SkipDtorBB
, RunDtorBB
);
541 CGF
.EmitBlock(RunDtorBB
);
544 static_cast<Derived
*>(this)->emitDestructorCall(CGF
);
546 if (NRVO
) CGF
.EmitBlock(SkipDtorBB
);
549 virtual ~DestroyNRVOVariable() = default;
552 struct DestroyNRVOVariableCXX final
553 : DestroyNRVOVariable
<DestroyNRVOVariableCXX
> {
554 DestroyNRVOVariableCXX(Address addr
, QualType type
,
555 const CXXDestructorDecl
*Dtor
, llvm::Value
*NRVOFlag
)
556 : DestroyNRVOVariable
<DestroyNRVOVariableCXX
>(addr
, type
, NRVOFlag
),
559 const CXXDestructorDecl
*Dtor
;
561 void emitDestructorCall(CodeGenFunction
&CGF
) {
562 CGF
.EmitCXXDestructorCall(Dtor
, Dtor_Complete
,
563 /*ForVirtualBase=*/false,
564 /*Delegating=*/false, Loc
, Ty
);
568 struct DestroyNRVOVariableC final
569 : DestroyNRVOVariable
<DestroyNRVOVariableC
> {
570 DestroyNRVOVariableC(Address addr
, llvm::Value
*NRVOFlag
, QualType Ty
)
571 : DestroyNRVOVariable
<DestroyNRVOVariableC
>(addr
, Ty
, NRVOFlag
) {}
573 void emitDestructorCall(CodeGenFunction
&CGF
) {
574 CGF
.destroyNonTrivialCStruct(CGF
, Loc
, Ty
);
578 struct CallStackRestore final
: EHScopeStack::Cleanup
{
580 CallStackRestore(Address Stack
) : Stack(Stack
) {}
581 bool isRedundantBeforeReturn() override
{ return true; }
582 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
583 llvm::Value
*V
= CGF
.Builder
.CreateLoad(Stack
);
584 CGF
.Builder
.CreateStackRestore(V
);
588 struct KmpcAllocFree final
: EHScopeStack::Cleanup
{
589 std::pair
<llvm::Value
*, llvm::Value
*> AddrSizePair
;
590 KmpcAllocFree(const std::pair
<llvm::Value
*, llvm::Value
*> &AddrSizePair
)
591 : AddrSizePair(AddrSizePair
) {}
592 void Emit(CodeGenFunction
&CGF
, Flags EmissionFlags
) override
{
593 auto &RT
= CGF
.CGM
.getOpenMPRuntime();
594 RT
.getKmpcFreeShared(CGF
, AddrSizePair
);
598 struct ExtendGCLifetime final
: EHScopeStack::Cleanup
{
600 ExtendGCLifetime(const VarDecl
*var
) : Var(*var
) {}
602 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
603 // Compute the address of the local variable, in case it's a
604 // byref or something.
605 DeclRefExpr
DRE(CGF
.getContext(), const_cast<VarDecl
*>(&Var
), false,
606 Var
.getType(), VK_LValue
, SourceLocation());
607 llvm::Value
*value
= CGF
.EmitLoadOfScalar(CGF
.EmitDeclRefLValue(&DRE
),
609 CGF
.EmitExtendGCLifetime(value
);
613 struct CallCleanupFunction final
: EHScopeStack::Cleanup
{
614 llvm::Constant
*CleanupFn
;
615 const CGFunctionInfo
&FnInfo
;
618 CallCleanupFunction(llvm::Constant
*CleanupFn
, const CGFunctionInfo
*Info
,
620 : CleanupFn(CleanupFn
), FnInfo(*Info
), Var(*Var
) {}
622 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
623 DeclRefExpr
DRE(CGF
.getContext(), const_cast<VarDecl
*>(&Var
), false,
624 Var
.getType(), VK_LValue
, SourceLocation());
625 // Compute the address of the local variable, in case it's a byref
627 llvm::Value
*Addr
= CGF
.EmitDeclRefLValue(&DRE
).getPointer(CGF
);
629 // In some cases, the type of the function argument will be different from
630 // the type of the pointer. An example of this is
631 // void f(void* arg);
632 // __attribute__((cleanup(f))) void *g;
634 // To fix this we insert a bitcast here.
635 QualType ArgTy
= FnInfo
.arg_begin()->type
;
637 CGF
.Builder
.CreateBitCast(Addr
, CGF
.ConvertType(ArgTy
));
640 Args
.add(RValue::get(Arg
),
641 CGF
.getContext().getPointerType(Var
.getType()));
642 auto Callee
= CGCallee::forDirect(CleanupFn
);
643 CGF
.EmitCall(FnInfo
, Callee
, ReturnValueSlot(), Args
);
646 } // end anonymous namespace
648 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
649 /// variable with lifetime.
650 static void EmitAutoVarWithLifetime(CodeGenFunction
&CGF
, const VarDecl
&var
,
652 Qualifiers::ObjCLifetime lifetime
) {
654 case Qualifiers::OCL_None
:
655 llvm_unreachable("present but none");
657 case Qualifiers::OCL_ExplicitNone
:
661 case Qualifiers::OCL_Strong
: {
662 CodeGenFunction::Destroyer
*destroyer
=
663 (var
.hasAttr
<ObjCPreciseLifetimeAttr
>()
664 ? CodeGenFunction::destroyARCStrongPrecise
665 : CodeGenFunction::destroyARCStrongImprecise
);
667 CleanupKind cleanupKind
= CGF
.getARCCleanupKind();
668 CGF
.pushDestroy(cleanupKind
, addr
, var
.getType(), destroyer
,
669 cleanupKind
& EHCleanup
);
672 case Qualifiers::OCL_Autoreleasing
:
676 case Qualifiers::OCL_Weak
:
677 // __weak objects always get EH cleanups; otherwise, exceptions
678 // could cause really nasty crashes instead of mere leaks.
679 CGF
.pushDestroy(NormalAndEHCleanup
, addr
, var
.getType(),
680 CodeGenFunction::destroyARCWeak
,
681 /*useEHCleanup*/ true);
686 static bool isAccessedBy(const VarDecl
&var
, const Stmt
*s
) {
687 if (const Expr
*e
= dyn_cast
<Expr
>(s
)) {
688 // Skip the most common kinds of expressions that make
689 // hierarchy-walking expensive.
690 s
= e
= e
->IgnoreParenCasts();
692 if (const DeclRefExpr
*ref
= dyn_cast
<DeclRefExpr
>(e
))
693 return (ref
->getDecl() == &var
);
694 if (const BlockExpr
*be
= dyn_cast
<BlockExpr
>(e
)) {
695 const BlockDecl
*block
= be
->getBlockDecl();
696 for (const auto &I
: block
->captures()) {
697 if (I
.getVariable() == &var
)
703 for (const Stmt
*SubStmt
: s
->children())
704 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
705 if (SubStmt
&& isAccessedBy(var
, SubStmt
))
711 static bool isAccessedBy(const ValueDecl
*decl
, const Expr
*e
) {
712 if (!decl
) return false;
713 if (!isa
<VarDecl
>(decl
)) return false;
714 const VarDecl
*var
= cast
<VarDecl
>(decl
);
715 return isAccessedBy(*var
, e
);
718 static bool tryEmitARCCopyWeakInit(CodeGenFunction
&CGF
,
719 const LValue
&destLV
, const Expr
*init
) {
720 bool needsCast
= false;
722 while (auto castExpr
= dyn_cast
<CastExpr
>(init
->IgnoreParens())) {
723 switch (castExpr
->getCastKind()) {
724 // Look through casts that don't require representation changes.
727 case CK_BlockPointerToObjCPointerCast
:
731 // If we find an l-value to r-value cast from a __weak variable,
732 // emit this operation as a copy or move.
733 case CK_LValueToRValue
: {
734 const Expr
*srcExpr
= castExpr
->getSubExpr();
735 if (srcExpr
->getType().getObjCLifetime() != Qualifiers::OCL_Weak
)
738 // Emit the source l-value.
739 LValue srcLV
= CGF
.EmitLValue(srcExpr
);
741 // Handle a formal type change to avoid asserting.
742 auto srcAddr
= srcLV
.getAddress();
744 srcAddr
= srcAddr
.withElementType(destLV
.getAddress().getElementType());
747 // If it was an l-value, use objc_copyWeak.
748 if (srcExpr
->isLValue()) {
749 CGF
.EmitARCCopyWeak(destLV
.getAddress(), srcAddr
);
751 assert(srcExpr
->isXValue());
752 CGF
.EmitARCMoveWeak(destLV
.getAddress(), srcAddr
);
757 // Stop at anything else.
762 init
= castExpr
->getSubExpr();
767 static void drillIntoBlockVariable(CodeGenFunction
&CGF
,
769 const VarDecl
*var
) {
770 lvalue
.setAddress(CGF
.emitBlockByrefAddress(lvalue
.getAddress(), var
));
773 void CodeGenFunction::EmitNullabilityCheck(LValue LHS
, llvm::Value
*RHS
,
774 SourceLocation Loc
) {
775 if (!SanOpts
.has(SanitizerKind::NullabilityAssign
))
778 auto Nullability
= LHS
.getType()->getNullability();
779 if (!Nullability
|| *Nullability
!= NullabilityKind::NonNull
)
782 // Check if the right hand side of the assignment is nonnull, if the left
783 // hand side must be nonnull.
784 SanitizerScope
SanScope(this);
785 llvm::Value
*IsNotNull
= Builder
.CreateIsNotNull(RHS
);
786 llvm::Constant
*StaticData
[] = {
787 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(LHS
.getType()),
788 llvm::ConstantInt::get(Int8Ty
, 0), // The LogAlignment info is unused.
789 llvm::ConstantInt::get(Int8Ty
, TCK_NonnullAssign
)};
790 EmitCheck({{IsNotNull
, SanitizerKind::NullabilityAssign
}},
791 SanitizerHandler::TypeMismatch
, StaticData
, RHS
);
794 void CodeGenFunction::EmitScalarInit(const Expr
*init
, const ValueDecl
*D
,
795 LValue lvalue
, bool capturedByInit
) {
796 Qualifiers::ObjCLifetime lifetime
= lvalue
.getObjCLifetime();
798 llvm::Value
*value
= EmitScalarExpr(init
);
800 drillIntoBlockVariable(*this, lvalue
, cast
<VarDecl
>(D
));
801 EmitNullabilityCheck(lvalue
, value
, init
->getExprLoc());
802 EmitStoreThroughLValue(RValue::get(value
), lvalue
, true);
806 if (const CXXDefaultInitExpr
*DIE
= dyn_cast
<CXXDefaultInitExpr
>(init
))
807 init
= DIE
->getExpr();
809 // If we're emitting a value with lifetime, we have to do the
810 // initialization *before* we leave the cleanup scopes.
811 if (auto *EWC
= dyn_cast
<ExprWithCleanups
>(init
)) {
812 CodeGenFunction::RunCleanupsScope
Scope(*this);
813 return EmitScalarInit(EWC
->getSubExpr(), D
, lvalue
, capturedByInit
);
816 // We have to maintain the illusion that the variable is
817 // zero-initialized. If the variable might be accessed in its
818 // initializer, zero-initialize before running the initializer, then
819 // actually perform the initialization with an assign.
820 bool accessedByInit
= false;
821 if (lifetime
!= Qualifiers::OCL_ExplicitNone
)
822 accessedByInit
= (capturedByInit
|| isAccessedBy(D
, init
));
823 if (accessedByInit
) {
824 LValue tempLV
= lvalue
;
825 // Drill down to the __block object if necessary.
826 if (capturedByInit
) {
827 // We can use a simple GEP for this because it can't have been
829 tempLV
.setAddress(emitBlockByrefAddress(tempLV
.getAddress(),
834 auto ty
= cast
<llvm::PointerType
>(tempLV
.getAddress().getElementType());
835 llvm::Value
*zero
= CGM
.getNullPointer(ty
, tempLV
.getType());
837 // If __weak, we want to use a barrier under certain conditions.
838 if (lifetime
== Qualifiers::OCL_Weak
)
839 EmitARCInitWeak(tempLV
.getAddress(), zero
);
841 // Otherwise just do a simple store.
843 EmitStoreOfScalar(zero
, tempLV
, /* isInitialization */ true);
846 // Emit the initializer.
847 llvm::Value
*value
= nullptr;
850 case Qualifiers::OCL_None
:
851 llvm_unreachable("present but none");
853 case Qualifiers::OCL_Strong
: {
854 if (!D
|| !isa
<VarDecl
>(D
) || !cast
<VarDecl
>(D
)->isARCPseudoStrong()) {
855 value
= EmitARCRetainScalarExpr(init
);
858 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
859 // that we omit the retain, and causes non-autoreleased return values to be
860 // immediately released.
864 case Qualifiers::OCL_ExplicitNone
:
865 value
= EmitARCUnsafeUnretainedScalarExpr(init
);
868 case Qualifiers::OCL_Weak
: {
869 // If it's not accessed by the initializer, try to emit the
870 // initialization with a copy or move.
871 if (!accessedByInit
&& tryEmitARCCopyWeakInit(*this, lvalue
, init
)) {
875 // No way to optimize a producing initializer into this. It's not
876 // worth optimizing for, because the value will immediately
877 // disappear in the common case.
878 value
= EmitScalarExpr(init
);
880 if (capturedByInit
) drillIntoBlockVariable(*this, lvalue
, cast
<VarDecl
>(D
));
882 EmitARCStoreWeak(lvalue
.getAddress(), value
, /*ignored*/ true);
884 EmitARCInitWeak(lvalue
.getAddress(), value
);
888 case Qualifiers::OCL_Autoreleasing
:
889 value
= EmitARCRetainAutoreleaseScalarExpr(init
);
893 if (capturedByInit
) drillIntoBlockVariable(*this, lvalue
, cast
<VarDecl
>(D
));
895 EmitNullabilityCheck(lvalue
, value
, init
->getExprLoc());
897 // If the variable might have been accessed by its initializer, we
898 // might have to initialize with a barrier. We have to do this for
899 // both __weak and __strong, but __weak got filtered out above.
900 if (accessedByInit
&& lifetime
== Qualifiers::OCL_Strong
) {
901 llvm::Value
*oldValue
= EmitLoadOfScalar(lvalue
, init
->getExprLoc());
902 EmitStoreOfScalar(value
, lvalue
, /* isInitialization */ true);
903 EmitARCRelease(oldValue
, ARCImpreciseLifetime
);
907 EmitStoreOfScalar(value
, lvalue
, /* isInitialization */ true);
910 /// Decide whether we can emit the non-zero parts of the specified initializer
911 /// with equal or fewer than NumStores scalar stores.
912 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant
*Init
,
913 unsigned &NumStores
) {
914 // Zero and Undef never requires any extra stores.
915 if (isa
<llvm::ConstantAggregateZero
>(Init
) ||
916 isa
<llvm::ConstantPointerNull
>(Init
) ||
917 isa
<llvm::UndefValue
>(Init
))
919 if (isa
<llvm::ConstantInt
>(Init
) || isa
<llvm::ConstantFP
>(Init
) ||
920 isa
<llvm::ConstantVector
>(Init
) || isa
<llvm::BlockAddress
>(Init
) ||
921 isa
<llvm::ConstantExpr
>(Init
))
922 return Init
->isNullValue() || NumStores
--;
924 // See if we can emit each element.
925 if (isa
<llvm::ConstantArray
>(Init
) || isa
<llvm::ConstantStruct
>(Init
)) {
926 for (unsigned i
= 0, e
= Init
->getNumOperands(); i
!= e
; ++i
) {
927 llvm::Constant
*Elt
= cast
<llvm::Constant
>(Init
->getOperand(i
));
928 if (!canEmitInitWithFewStoresAfterBZero(Elt
, NumStores
))
934 if (llvm::ConstantDataSequential
*CDS
=
935 dyn_cast
<llvm::ConstantDataSequential
>(Init
)) {
936 for (unsigned i
= 0, e
= CDS
->getNumElements(); i
!= e
; ++i
) {
937 llvm::Constant
*Elt
= CDS
->getElementAsConstant(i
);
938 if (!canEmitInitWithFewStoresAfterBZero(Elt
, NumStores
))
944 // Anything else is hard and scary.
948 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
949 /// the scalar stores that would be required.
950 static void emitStoresForInitAfterBZero(CodeGenModule
&CGM
,
951 llvm::Constant
*Init
, Address Loc
,
952 bool isVolatile
, CGBuilderTy
&Builder
,
954 assert(!Init
->isNullValue() && !isa
<llvm::UndefValue
>(Init
) &&
955 "called emitStoresForInitAfterBZero for zero or undef value.");
957 if (isa
<llvm::ConstantInt
>(Init
) || isa
<llvm::ConstantFP
>(Init
) ||
958 isa
<llvm::ConstantVector
>(Init
) || isa
<llvm::BlockAddress
>(Init
) ||
959 isa
<llvm::ConstantExpr
>(Init
)) {
960 auto *I
= Builder
.CreateStore(Init
, Loc
, isVolatile
);
962 I
->addAnnotationMetadata("auto-init");
966 if (llvm::ConstantDataSequential
*CDS
=
967 dyn_cast
<llvm::ConstantDataSequential
>(Init
)) {
968 for (unsigned i
= 0, e
= CDS
->getNumElements(); i
!= e
; ++i
) {
969 llvm::Constant
*Elt
= CDS
->getElementAsConstant(i
);
971 // If necessary, get a pointer to the element and emit it.
972 if (!Elt
->isNullValue() && !isa
<llvm::UndefValue
>(Elt
))
973 emitStoresForInitAfterBZero(
974 CGM
, Elt
, Builder
.CreateConstInBoundsGEP2_32(Loc
, 0, i
), isVolatile
,
975 Builder
, IsAutoInit
);
980 assert((isa
<llvm::ConstantStruct
>(Init
) || isa
<llvm::ConstantArray
>(Init
)) &&
981 "Unknown value type!");
983 for (unsigned i
= 0, e
= Init
->getNumOperands(); i
!= e
; ++i
) {
984 llvm::Constant
*Elt
= cast
<llvm::Constant
>(Init
->getOperand(i
));
986 // If necessary, get a pointer to the element and emit it.
987 if (!Elt
->isNullValue() && !isa
<llvm::UndefValue
>(Elt
))
988 emitStoresForInitAfterBZero(CGM
, Elt
,
989 Builder
.CreateConstInBoundsGEP2_32(Loc
, 0, i
),
990 isVolatile
, Builder
, IsAutoInit
);
994 /// Decide whether we should use bzero plus some stores to initialize a local
995 /// variable instead of using a memcpy from a constant global. It is beneficial
996 /// to use bzero if the global is all zeros, or mostly zeros and large.
997 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant
*Init
,
998 uint64_t GlobalSize
) {
999 // If a global is all zeros, always use a bzero.
1000 if (isa
<llvm::ConstantAggregateZero
>(Init
)) return true;
1002 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1003 // do it if it will require 6 or fewer scalar stores.
1004 // TODO: Should budget depends on the size? Avoiding a large global warrants
1005 // plopping in more stores.
1006 unsigned StoreBudget
= 6;
1007 uint64_t SizeLimit
= 32;
1009 return GlobalSize
> SizeLimit
&&
1010 canEmitInitWithFewStoresAfterBZero(Init
, StoreBudget
);
1013 /// Decide whether we should use memset to initialize a local variable instead
1014 /// of using a memcpy from a constant global. Assumes we've already decided to
1016 /// FIXME We could be more clever, as we are for bzero above, and generate
1017 /// memset followed by stores. It's unclear that's worth the effort.
1018 static llvm::Value
*shouldUseMemSetToInitialize(llvm::Constant
*Init
,
1019 uint64_t GlobalSize
,
1020 const llvm::DataLayout
&DL
) {
1021 uint64_t SizeLimit
= 32;
1022 if (GlobalSize
<= SizeLimit
)
1024 return llvm::isBytewiseValue(Init
, DL
);
1027 /// Decide whether we want to split a constant structure or array store into a
1028 /// sequence of its fields' stores. This may cost us code size and compilation
1029 /// speed, but plays better with store optimizations.
1030 static bool shouldSplitConstantStore(CodeGenModule
&CGM
,
1031 uint64_t GlobalByteSize
) {
1032 // Don't break things that occupy more than one cacheline.
1033 uint64_t ByteSizeLimit
= 64;
1034 if (CGM
.getCodeGenOpts().OptimizationLevel
== 0)
1036 if (GlobalByteSize
<= ByteSizeLimit
)
1041 enum class IsPattern
{ No
, Yes
};
1043 /// Generate a constant filled with either a pattern or zeroes.
1044 static llvm::Constant
*patternOrZeroFor(CodeGenModule
&CGM
, IsPattern isPattern
,
1046 if (isPattern
== IsPattern::Yes
)
1047 return initializationPatternFor(CGM
, Ty
);
1049 return llvm::Constant::getNullValue(Ty
);
1052 static llvm::Constant
*constWithPadding(CodeGenModule
&CGM
, IsPattern isPattern
,
1053 llvm::Constant
*constant
);
1055 /// Helper function for constWithPadding() to deal with padding in structures.
1056 static llvm::Constant
*constStructWithPadding(CodeGenModule
&CGM
,
1057 IsPattern isPattern
,
1058 llvm::StructType
*STy
,
1059 llvm::Constant
*constant
) {
1060 const llvm::DataLayout
&DL
= CGM
.getDataLayout();
1061 const llvm::StructLayout
*Layout
= DL
.getStructLayout(STy
);
1062 llvm::Type
*Int8Ty
= llvm::IntegerType::getInt8Ty(CGM
.getLLVMContext());
1063 unsigned SizeSoFar
= 0;
1064 SmallVector
<llvm::Constant
*, 8> Values
;
1065 bool NestedIntact
= true;
1066 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; i
++) {
1067 unsigned CurOff
= Layout
->getElementOffset(i
);
1068 if (SizeSoFar
< CurOff
) {
1069 assert(!STy
->isPacked());
1070 auto *PadTy
= llvm::ArrayType::get(Int8Ty
, CurOff
- SizeSoFar
);
1071 Values
.push_back(patternOrZeroFor(CGM
, isPattern
, PadTy
));
1073 llvm::Constant
*CurOp
;
1074 if (constant
->isZeroValue())
1075 CurOp
= llvm::Constant::getNullValue(STy
->getElementType(i
));
1077 CurOp
= cast
<llvm::Constant
>(constant
->getAggregateElement(i
));
1078 auto *NewOp
= constWithPadding(CGM
, isPattern
, CurOp
);
1080 NestedIntact
= false;
1081 Values
.push_back(NewOp
);
1082 SizeSoFar
= CurOff
+ DL
.getTypeAllocSize(CurOp
->getType());
1084 unsigned TotalSize
= Layout
->getSizeInBytes();
1085 if (SizeSoFar
< TotalSize
) {
1086 auto *PadTy
= llvm::ArrayType::get(Int8Ty
, TotalSize
- SizeSoFar
);
1087 Values
.push_back(patternOrZeroFor(CGM
, isPattern
, PadTy
));
1089 if (NestedIntact
&& Values
.size() == STy
->getNumElements())
1091 return llvm::ConstantStruct::getAnon(Values
, STy
->isPacked());
1094 /// Replace all padding bytes in a given constant with either a pattern byte or
1096 static llvm::Constant
*constWithPadding(CodeGenModule
&CGM
, IsPattern isPattern
,
1097 llvm::Constant
*constant
) {
1098 llvm::Type
*OrigTy
= constant
->getType();
1099 if (const auto STy
= dyn_cast
<llvm::StructType
>(OrigTy
))
1100 return constStructWithPadding(CGM
, isPattern
, STy
, constant
);
1101 if (auto *ArrayTy
= dyn_cast
<llvm::ArrayType
>(OrigTy
)) {
1102 llvm::SmallVector
<llvm::Constant
*, 8> Values
;
1103 uint64_t Size
= ArrayTy
->getNumElements();
1106 llvm::Type
*ElemTy
= ArrayTy
->getElementType();
1107 bool ZeroInitializer
= constant
->isNullValue();
1108 llvm::Constant
*OpValue
, *PaddedOp
;
1109 if (ZeroInitializer
) {
1110 OpValue
= llvm::Constant::getNullValue(ElemTy
);
1111 PaddedOp
= constWithPadding(CGM
, isPattern
, OpValue
);
1113 for (unsigned Op
= 0; Op
!= Size
; ++Op
) {
1114 if (!ZeroInitializer
) {
1115 OpValue
= constant
->getAggregateElement(Op
);
1116 PaddedOp
= constWithPadding(CGM
, isPattern
, OpValue
);
1118 Values
.push_back(PaddedOp
);
1120 auto *NewElemTy
= Values
[0]->getType();
1121 if (NewElemTy
== ElemTy
)
1123 auto *NewArrayTy
= llvm::ArrayType::get(NewElemTy
, Size
);
1124 return llvm::ConstantArray::get(NewArrayTy
, Values
);
1126 // FIXME: Add handling for tail padding in vectors. Vectors don't
1127 // have padding between or inside elements, but the total amount of
1128 // data can be less than the allocated size.
1132 Address
CodeGenModule::createUnnamedGlobalFrom(const VarDecl
&D
,
1133 llvm::Constant
*Constant
,
1135 auto FunctionName
= [&](const DeclContext
*DC
) -> std::string
{
1136 if (const auto *FD
= dyn_cast
<FunctionDecl
>(DC
)) {
1137 if (const auto *CC
= dyn_cast
<CXXConstructorDecl
>(FD
))
1138 return CC
->getNameAsString();
1139 if (const auto *CD
= dyn_cast
<CXXDestructorDecl
>(FD
))
1140 return CD
->getNameAsString();
1141 return std::string(getMangledName(FD
));
1142 } else if (const auto *OM
= dyn_cast
<ObjCMethodDecl
>(DC
)) {
1143 return OM
->getNameAsString();
1144 } else if (isa
<BlockDecl
>(DC
)) {
1146 } else if (isa
<CapturedDecl
>(DC
)) {
1147 return "<captured>";
1149 llvm_unreachable("expected a function or method");
1153 // Form a simple per-variable cache of these values in case we find we
1154 // want to reuse them.
1155 llvm::GlobalVariable
*&CacheEntry
= InitializerConstants
[&D
];
1156 if (!CacheEntry
|| CacheEntry
->getInitializer() != Constant
) {
1157 auto *Ty
= Constant
->getType();
1158 bool isConstant
= true;
1159 llvm::GlobalVariable
*InsertBefore
= nullptr;
1161 getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
1163 if (D
.hasGlobalStorage())
1164 Name
= getMangledName(&D
).str() + ".const";
1165 else if (const DeclContext
*DC
= D
.getParentFunctionOrMethod())
1166 Name
= ("__const." + FunctionName(DC
) + "." + D
.getName()).str();
1168 llvm_unreachable("local variable has no parent function or method");
1169 llvm::GlobalVariable
*GV
= new llvm::GlobalVariable(
1170 getModule(), Ty
, isConstant
, llvm::GlobalValue::PrivateLinkage
,
1171 Constant
, Name
, InsertBefore
, llvm::GlobalValue::NotThreadLocal
, AS
);
1172 GV
->setAlignment(Align
.getAsAlign());
1173 GV
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
1175 } else if (CacheEntry
->getAlignment() < uint64_t(Align
.getQuantity())) {
1176 CacheEntry
->setAlignment(Align
.getAsAlign());
1179 return Address(CacheEntry
, CacheEntry
->getValueType(), Align
);
1182 static Address
createUnnamedGlobalForMemcpyFrom(CodeGenModule
&CGM
,
1184 CGBuilderTy
&Builder
,
1185 llvm::Constant
*Constant
,
1187 Address SrcPtr
= CGM
.createUnnamedGlobalFrom(D
, Constant
, Align
);
1188 return SrcPtr
.withElementType(CGM
.Int8Ty
);
1191 static void emitStoresForConstant(CodeGenModule
&CGM
, const VarDecl
&D
,
1192 Address Loc
, bool isVolatile
,
1193 CGBuilderTy
&Builder
,
1194 llvm::Constant
*constant
, bool IsAutoInit
) {
1195 auto *Ty
= constant
->getType();
1196 uint64_t ConstantSize
= CGM
.getDataLayout().getTypeAllocSize(Ty
);
1200 bool canDoSingleStore
= Ty
->isIntOrIntVectorTy() ||
1201 Ty
->isPtrOrPtrVectorTy() || Ty
->isFPOrFPVectorTy();
1202 if (canDoSingleStore
) {
1203 auto *I
= Builder
.CreateStore(constant
, Loc
, isVolatile
);
1205 I
->addAnnotationMetadata("auto-init");
1209 auto *SizeVal
= llvm::ConstantInt::get(CGM
.IntPtrTy
, ConstantSize
);
1211 // If the initializer is all or mostly the same, codegen with bzero / memset
1212 // then do a few stores afterward.
1213 if (shouldUseBZeroPlusStoresToInitialize(constant
, ConstantSize
)) {
1214 auto *I
= Builder
.CreateMemSet(Loc
, llvm::ConstantInt::get(CGM
.Int8Ty
, 0),
1215 SizeVal
, isVolatile
);
1217 I
->addAnnotationMetadata("auto-init");
1219 bool valueAlreadyCorrect
=
1220 constant
->isNullValue() || isa
<llvm::UndefValue
>(constant
);
1221 if (!valueAlreadyCorrect
) {
1222 Loc
= Loc
.withElementType(Ty
);
1223 emitStoresForInitAfterBZero(CGM
, constant
, Loc
, isVolatile
, Builder
,
1229 // If the initializer is a repeated byte pattern, use memset.
1230 llvm::Value
*Pattern
=
1231 shouldUseMemSetToInitialize(constant
, ConstantSize
, CGM
.getDataLayout());
1233 uint64_t Value
= 0x00;
1234 if (!isa
<llvm::UndefValue
>(Pattern
)) {
1235 const llvm::APInt
&AP
= cast
<llvm::ConstantInt
>(Pattern
)->getValue();
1236 assert(AP
.getBitWidth() <= 8);
1237 Value
= AP
.getLimitedValue();
1239 auto *I
= Builder
.CreateMemSet(
1240 Loc
, llvm::ConstantInt::get(CGM
.Int8Ty
, Value
), SizeVal
, isVolatile
);
1242 I
->addAnnotationMetadata("auto-init");
1246 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1248 bool IsTrivialAutoVarInitPattern
=
1249 CGM
.getContext().getLangOpts().getTrivialAutoVarInit() ==
1250 LangOptions::TrivialAutoVarInitKind::Pattern
;
1251 if (shouldSplitConstantStore(CGM
, ConstantSize
)) {
1252 if (auto *STy
= dyn_cast
<llvm::StructType
>(Ty
)) {
1253 if (STy
== Loc
.getElementType() ||
1254 (STy
!= Loc
.getElementType() && IsTrivialAutoVarInitPattern
)) {
1255 const llvm::StructLayout
*Layout
=
1256 CGM
.getDataLayout().getStructLayout(STy
);
1257 for (unsigned i
= 0; i
!= constant
->getNumOperands(); i
++) {
1259 CharUnits::fromQuantity(Layout
->getElementOffset(i
));
1260 Address EltPtr
= Builder
.CreateConstInBoundsByteGEP(
1261 Loc
.withElementType(CGM
.Int8Ty
), CurOff
);
1262 emitStoresForConstant(CGM
, D
, EltPtr
, isVolatile
, Builder
,
1263 constant
->getAggregateElement(i
), IsAutoInit
);
1267 } else if (auto *ATy
= dyn_cast
<llvm::ArrayType
>(Ty
)) {
1268 if (ATy
== Loc
.getElementType() ||
1269 (ATy
!= Loc
.getElementType() && IsTrivialAutoVarInitPattern
)) {
1270 for (unsigned i
= 0; i
!= ATy
->getNumElements(); i
++) {
1271 Address EltPtr
= Builder
.CreateConstGEP(
1272 Loc
.withElementType(ATy
->getElementType()), i
);
1273 emitStoresForConstant(CGM
, D
, EltPtr
, isVolatile
, Builder
,
1274 constant
->getAggregateElement(i
), IsAutoInit
);
1281 // Copy from a global.
1283 Builder
.CreateMemCpy(Loc
,
1284 createUnnamedGlobalForMemcpyFrom(
1285 CGM
, D
, Builder
, constant
, Loc
.getAlignment()),
1286 SizeVal
, isVolatile
);
1288 I
->addAnnotationMetadata("auto-init");
1291 static void emitStoresForZeroInit(CodeGenModule
&CGM
, const VarDecl
&D
,
1292 Address Loc
, bool isVolatile
,
1293 CGBuilderTy
&Builder
) {
1294 llvm::Type
*ElTy
= Loc
.getElementType();
1295 llvm::Constant
*constant
=
1296 constWithPadding(CGM
, IsPattern::No
, llvm::Constant::getNullValue(ElTy
));
1297 emitStoresForConstant(CGM
, D
, Loc
, isVolatile
, Builder
, constant
,
1298 /*IsAutoInit=*/true);
1301 static void emitStoresForPatternInit(CodeGenModule
&CGM
, const VarDecl
&D
,
1302 Address Loc
, bool isVolatile
,
1303 CGBuilderTy
&Builder
) {
1304 llvm::Type
*ElTy
= Loc
.getElementType();
1305 llvm::Constant
*constant
= constWithPadding(
1306 CGM
, IsPattern::Yes
, initializationPatternFor(CGM
, ElTy
));
1307 assert(!isa
<llvm::UndefValue
>(constant
));
1308 emitStoresForConstant(CGM
, D
, Loc
, isVolatile
, Builder
, constant
,
1309 /*IsAutoInit=*/true);
1312 static bool containsUndef(llvm::Constant
*constant
) {
1313 auto *Ty
= constant
->getType();
1314 if (isa
<llvm::UndefValue
>(constant
))
1316 if (Ty
->isStructTy() || Ty
->isArrayTy() || Ty
->isVectorTy())
1317 for (llvm::Use
&Op
: constant
->operands())
1318 if (containsUndef(cast
<llvm::Constant
>(Op
)))
1323 static llvm::Constant
*replaceUndef(CodeGenModule
&CGM
, IsPattern isPattern
,
1324 llvm::Constant
*constant
) {
1325 auto *Ty
= constant
->getType();
1326 if (isa
<llvm::UndefValue
>(constant
))
1327 return patternOrZeroFor(CGM
, isPattern
, Ty
);
1328 if (!(Ty
->isStructTy() || Ty
->isArrayTy() || Ty
->isVectorTy()))
1330 if (!containsUndef(constant
))
1332 llvm::SmallVector
<llvm::Constant
*, 8> Values(constant
->getNumOperands());
1333 for (unsigned Op
= 0, NumOp
= constant
->getNumOperands(); Op
!= NumOp
; ++Op
) {
1334 auto *OpValue
= cast
<llvm::Constant
>(constant
->getOperand(Op
));
1335 Values
[Op
] = replaceUndef(CGM
, isPattern
, OpValue
);
1337 if (Ty
->isStructTy())
1338 return llvm::ConstantStruct::get(cast
<llvm::StructType
>(Ty
), Values
);
1339 if (Ty
->isArrayTy())
1340 return llvm::ConstantArray::get(cast
<llvm::ArrayType
>(Ty
), Values
);
1341 assert(Ty
->isVectorTy());
1342 return llvm::ConstantVector::get(Values
);
1345 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1346 /// variable declaration with auto, register, or no storage class specifier.
1347 /// These turn into simple stack objects, or GlobalValues depending on target.
1348 void CodeGenFunction::EmitAutoVarDecl(const VarDecl
&D
) {
1349 AutoVarEmission emission
= EmitAutoVarAlloca(D
);
1350 EmitAutoVarInit(emission
);
1351 EmitAutoVarCleanups(emission
);
1354 /// Emit a lifetime.begin marker if some criteria are satisfied.
1355 /// \return a pointer to the temporary size Value if a marker was emitted, null
1357 llvm::Value
*CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size
,
1358 llvm::Value
*Addr
) {
1359 if (!ShouldEmitLifetimeMarkers
)
1362 assert(Addr
->getType()->getPointerAddressSpace() ==
1363 CGM
.getDataLayout().getAllocaAddrSpace() &&
1364 "Pointer should be in alloca address space");
1365 llvm::Value
*SizeV
= llvm::ConstantInt::get(
1366 Int64Ty
, Size
.isScalable() ? -1 : Size
.getFixedValue());
1368 Builder
.CreateCall(CGM
.getLLVMLifetimeStartFn(), {SizeV
, Addr
});
1369 C
->setDoesNotThrow();
1373 void CodeGenFunction::EmitLifetimeEnd(llvm::Value
*Size
, llvm::Value
*Addr
) {
1374 assert(Addr
->getType()->getPointerAddressSpace() ==
1375 CGM
.getDataLayout().getAllocaAddrSpace() &&
1376 "Pointer should be in alloca address space");
1378 Builder
.CreateCall(CGM
.getLLVMLifetimeEndFn(), {Size
, Addr
});
1379 C
->setDoesNotThrow();
1382 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1383 CGDebugInfo
*DI
, const VarDecl
&D
, bool EmitDebugInfo
) {
1384 // For each dimension stores its QualType and corresponding
1385 // size-expression Value.
1386 SmallVector
<CodeGenFunction::VlaSizePair
, 4> Dimensions
;
1387 SmallVector
<const IdentifierInfo
*, 4> VLAExprNames
;
1389 // Break down the array into individual dimensions.
1390 QualType Type1D
= D
.getType();
1391 while (getContext().getAsVariableArrayType(Type1D
)) {
1392 auto VlaSize
= getVLAElements1D(Type1D
);
1393 if (auto *C
= dyn_cast
<llvm::ConstantInt
>(VlaSize
.NumElts
))
1394 Dimensions
.emplace_back(C
, Type1D
.getUnqualifiedType());
1396 // Generate a locally unique name for the size expression.
1397 Twine Name
= Twine("__vla_expr") + Twine(VLAExprCounter
++);
1398 SmallString
<12> Buffer
;
1399 StringRef NameRef
= Name
.toStringRef(Buffer
);
1400 auto &Ident
= getContext().Idents
.getOwn(NameRef
);
1401 VLAExprNames
.push_back(&Ident
);
1403 CreateDefaultAlignTempAlloca(VlaSize
.NumElts
->getType(), NameRef
);
1404 Builder
.CreateStore(VlaSize
.NumElts
, SizeExprAddr
);
1405 Dimensions
.emplace_back(SizeExprAddr
.getPointer(),
1406 Type1D
.getUnqualifiedType());
1408 Type1D
= VlaSize
.Type
;
1414 // Register each dimension's size-expression with a DILocalVariable,
1415 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1416 // to describe this array.
1417 unsigned NameIdx
= 0;
1418 for (auto &VlaSize
: Dimensions
) {
1420 if (auto *C
= dyn_cast
<llvm::ConstantInt
>(VlaSize
.NumElts
))
1421 MD
= llvm::ConstantAsMetadata::get(C
);
1423 // Create an artificial VarDecl to generate debug info for.
1424 const IdentifierInfo
*NameIdent
= VLAExprNames
[NameIdx
++];
1425 auto QT
= getContext().getIntTypeForBitwidth(
1426 SizeTy
->getScalarSizeInBits(), false);
1427 auto *ArtificialDecl
= VarDecl::Create(
1428 getContext(), const_cast<DeclContext
*>(D
.getDeclContext()),
1429 D
.getLocation(), D
.getLocation(), NameIdent
, QT
,
1430 getContext().CreateTypeSourceInfo(QT
), SC_Auto
);
1431 ArtificialDecl
->setImplicit();
1433 MD
= DI
->EmitDeclareOfAutoVariable(ArtificialDecl
, VlaSize
.NumElts
,
1436 assert(MD
&& "No Size expression debug node created");
1437 DI
->registerVLASizeExpression(VlaSize
.Type
, MD
);
1441 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1442 /// local variable. Does not emit initialization or destruction.
1443 CodeGenFunction::AutoVarEmission
1444 CodeGenFunction::EmitAutoVarAlloca(const VarDecl
&D
) {
1445 QualType Ty
= D
.getType();
1447 Ty
.getAddressSpace() == LangAS::Default
||
1448 (Ty
.getAddressSpace() == LangAS::opencl_private
&& getLangOpts().OpenCL
));
1450 AutoVarEmission
emission(D
);
1452 bool isEscapingByRef
= D
.isEscapingByref();
1453 emission
.IsEscapingByRef
= isEscapingByRef
;
1455 CharUnits alignment
= getContext().getDeclAlign(&D
);
1457 // If the type is variably-modified, emit all the VLA sizes for it.
1458 if (Ty
->isVariablyModifiedType())
1459 EmitVariablyModifiedType(Ty
);
1461 auto *DI
= getDebugInfo();
1462 bool EmitDebugInfo
= DI
&& CGM
.getCodeGenOpts().hasReducedDebugInfo();
1464 Address address
= Address::invalid();
1465 RawAddress AllocaAddr
= RawAddress::invalid();
1466 Address OpenMPLocalAddr
= Address::invalid();
1467 if (CGM
.getLangOpts().OpenMPIRBuilder
)
1468 OpenMPLocalAddr
= OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D
);
1471 getLangOpts().OpenMP
1472 ? CGM
.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D
)
1473 : Address::invalid();
1475 bool NRVO
= getLangOpts().ElideConstructors
&& D
.isNRVOVariable();
1477 if (getLangOpts().OpenMP
&& OpenMPLocalAddr
.isValid()) {
1478 address
= OpenMPLocalAddr
;
1479 AllocaAddr
= OpenMPLocalAddr
;
1480 } else if (Ty
->isConstantSizeType()) {
1481 // If this value is an array or struct with a statically determinable
1482 // constant initializer, there are optimizations we can do.
1484 // TODO: We should constant-evaluate the initializer of any variable,
1485 // as long as it is initialized by a constant expression. Currently,
1486 // isConstantInitializer produces wrong answers for structs with
1487 // reference or bitfield members, and a few other cases, and checking
1488 // for POD-ness protects us from some of these.
1489 if (D
.getInit() && (Ty
->isArrayType() || Ty
->isRecordType()) &&
1491 ((Ty
.isPODType(getContext()) ||
1492 getContext().getBaseElementType(Ty
)->isObjCObjectPointerType()) &&
1493 D
.getInit()->isConstantInitializer(getContext(), false)))) {
1495 // If the variable's a const type, and it's neither an NRVO
1496 // candidate nor a __block variable and has no mutable members,
1497 // emit it as a global instead.
1498 // Exception is if a variable is located in non-constant address space
1501 D
.needsDestruction(getContext()) == QualType::DK_cxx_destructor
;
1502 if ((!getLangOpts().OpenCL
||
1503 Ty
.getAddressSpace() == LangAS::opencl_constant
) &&
1504 (CGM
.getCodeGenOpts().MergeAllConstants
&& !NRVO
&&
1506 Ty
.isConstantStorage(getContext(), true, !NeedsDtor
))) {
1507 EmitStaticVarDecl(D
, llvm::GlobalValue::InternalLinkage
);
1509 // Signal this condition to later callbacks.
1510 emission
.Addr
= Address::invalid();
1511 assert(emission
.wasEmittedAsGlobal());
1515 // Otherwise, tell the initialization code that we're in this case.
1516 emission
.IsConstantAggregate
= true;
1519 // A normal fixed sized variable becomes an alloca in the entry block,
1521 // - it's an NRVO variable.
1522 // - we are compiling OpenMP and it's an OpenMP local variable.
1524 // The named return value optimization: allocate this variable in the
1525 // return slot, so that we can elide the copy when returning this
1526 // variable (C++0x [class.copy]p34).
1527 address
= ReturnValue
;
1529 RawAddress(ReturnValue
.emitRawPointer(*this),
1530 ReturnValue
.getElementType(), ReturnValue
.getAlignment());
1533 if (const RecordType
*RecordTy
= Ty
->getAs
<RecordType
>()) {
1534 const auto *RD
= RecordTy
->getDecl();
1535 const auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
);
1536 if ((CXXRD
&& !CXXRD
->hasTrivialDestructor()) ||
1537 RD
->isNonTrivialToPrimitiveDestroy()) {
1538 // Create a flag that is used to indicate when the NRVO was applied
1539 // to this variable. Set it to zero to indicate that NRVO was not
1541 llvm::Value
*Zero
= Builder
.getFalse();
1542 RawAddress NRVOFlag
=
1543 CreateTempAlloca(Zero
->getType(), CharUnits::One(), "nrvo");
1544 EnsureInsertPoint();
1545 Builder
.CreateStore(Zero
, NRVOFlag
);
1547 // Record the NRVO flag for this variable.
1548 NRVOFlags
[&D
] = NRVOFlag
.getPointer();
1549 emission
.NRVOFlag
= NRVOFlag
.getPointer();
1553 CharUnits allocaAlignment
;
1554 llvm::Type
*allocaTy
;
1555 if (isEscapingByRef
) {
1556 auto &byrefInfo
= getBlockByrefInfo(&D
);
1557 allocaTy
= byrefInfo
.Type
;
1558 allocaAlignment
= byrefInfo
.ByrefAlignment
;
1560 allocaTy
= ConvertTypeForMem(Ty
);
1561 allocaAlignment
= alignment
;
1564 // Create the alloca. Note that we set the name separately from
1565 // building the instruction so that it's there even in no-asserts
1567 address
= CreateTempAlloca(allocaTy
, allocaAlignment
, D
.getName(),
1568 /*ArraySize=*/nullptr, &AllocaAddr
);
1570 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1571 // the catch parameter starts in the catchpad instruction, and we can't
1572 // insert code in those basic blocks.
1573 bool IsMSCatchParam
=
1574 D
.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1576 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1577 // if we don't have a valid insertion point (?).
1578 if (HaveInsertPoint() && !IsMSCatchParam
) {
1579 // If there's a jump into the lifetime of this variable, its lifetime
1580 // gets broken up into several regions in IR, which requires more work
1581 // to handle correctly. For now, just omit the intrinsics; this is a
1582 // rare case, and it's better to just be conservatively correct.
1585 // We have to do this in all language modes if there's a jump past the
1586 // declaration. We also have to do it in C if there's a jump to an
1587 // earlier point in the current block because non-VLA lifetimes begin as
1588 // soon as the containing block is entered, not when its variables
1589 // actually come into scope; suppressing the lifetime annotations
1590 // completely in this case is unnecessarily pessimistic, but again, this
1592 if (!Bypasses
.IsBypassed(&D
) &&
1593 !(!getLangOpts().CPlusPlus
&& hasLabelBeenSeenInCurrentScope())) {
1594 llvm::TypeSize Size
= CGM
.getDataLayout().getTypeAllocSize(allocaTy
);
1595 emission
.SizeForLifetimeMarkers
=
1596 EmitLifetimeStart(Size
, AllocaAddr
.getPointer());
1599 assert(!emission
.useLifetimeMarkers());
1603 EnsureInsertPoint();
1605 // Delayed globalization for variable length declarations. This ensures that
1606 // the expression representing the length has been emitted and can be used
1607 // by the definition of the VLA. Since this is an escaped declaration, in
1608 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1609 // deallocation call to __kmpc_free_shared() is emitted later.
1610 bool VarAllocated
= false;
1611 if (getLangOpts().OpenMPIsTargetDevice
) {
1612 auto &RT
= CGM
.getOpenMPRuntime();
1613 if (RT
.isDelayedVariableLengthDecl(*this, &D
)) {
1614 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1615 std::pair
<llvm::Value
*, llvm::Value
*> AddrSizePair
=
1616 RT
.getKmpcAllocShared(*this, &D
);
1618 // Save the address of the allocation:
1619 LValue Base
= MakeAddrLValue(AddrSizePair
.first
, D
.getType(),
1620 CGM
.getContext().getDeclAlign(&D
),
1621 AlignmentSource::Decl
);
1622 address
= Base
.getAddress();
1624 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1625 // appropriate location at the end of the scope of the
1626 // __kmpc_alloc_shared functions:
1627 pushKmpcAllocFree(NormalCleanup
, AddrSizePair
);
1629 // Mark variable as allocated:
1630 VarAllocated
= true;
1634 if (!VarAllocated
) {
1635 if (!DidCallStackSave
) {
1638 CreateDefaultAlignTempAlloca(AllocaInt8PtrTy
, "saved_stack");
1640 llvm::Value
*V
= Builder
.CreateStackSave();
1641 assert(V
->getType() == AllocaInt8PtrTy
);
1642 Builder
.CreateStore(V
, Stack
);
1644 DidCallStackSave
= true;
1646 // Push a cleanup block and restore the stack there.
1647 // FIXME: in general circumstances, this should be an EH cleanup.
1648 pushStackRestore(NormalCleanup
, Stack
);
1651 auto VlaSize
= getVLASize(Ty
);
1652 llvm::Type
*llvmTy
= ConvertTypeForMem(VlaSize
.Type
);
1654 // Allocate memory for the array.
1655 address
= CreateTempAlloca(llvmTy
, alignment
, "vla", VlaSize
.NumElts
,
1659 // If we have debug info enabled, properly describe the VLA dimensions for
1660 // this type by registering the vla size expression for each of the
1662 EmitAndRegisterVariableArrayDimensions(DI
, D
, EmitDebugInfo
);
1665 setAddrOfLocalVar(&D
, address
);
1666 emission
.Addr
= address
;
1667 emission
.AllocaAddr
= AllocaAddr
;
1669 // Emit debug info for local var declaration.
1670 if (EmitDebugInfo
&& HaveInsertPoint()) {
1671 Address DebugAddr
= address
;
1672 bool UsePointerValue
= NRVO
&& ReturnValuePointer
.isValid();
1673 DI
->setLocation(D
.getLocation());
1675 // If NRVO, use a pointer to the return address.
1676 if (UsePointerValue
) {
1677 DebugAddr
= ReturnValuePointer
;
1678 AllocaAddr
= ReturnValuePointer
;
1680 (void)DI
->EmitDeclareOfAutoVariable(&D
, AllocaAddr
.getPointer(), Builder
,
1684 if (D
.hasAttr
<AnnotateAttr
>() && HaveInsertPoint())
1685 EmitVarAnnotations(&D
, address
.emitRawPointer(*this));
1687 // Make sure we call @llvm.lifetime.end.
1688 if (emission
.useLifetimeMarkers())
1689 EHStack
.pushCleanup
<CallLifetimeEnd
>(NormalEHLifetimeMarker
,
1690 emission
.getOriginalAllocatedAddress(),
1691 emission
.getSizeForLifetimeMarkers());
1696 static bool isCapturedBy(const VarDecl
&, const Expr
*);
1698 /// Determines whether the given __block variable is potentially
1699 /// captured by the given statement.
1700 static bool isCapturedBy(const VarDecl
&Var
, const Stmt
*S
) {
1701 if (const Expr
*E
= dyn_cast
<Expr
>(S
))
1702 return isCapturedBy(Var
, E
);
1703 for (const Stmt
*SubStmt
: S
->children())
1704 if (isCapturedBy(Var
, SubStmt
))
1709 /// Determines whether the given __block variable is potentially
1710 /// captured by the given expression.
1711 static bool isCapturedBy(const VarDecl
&Var
, const Expr
*E
) {
1712 // Skip the most common kinds of expressions that make
1713 // hierarchy-walking expensive.
1714 E
= E
->IgnoreParenCasts();
1716 if (const BlockExpr
*BE
= dyn_cast
<BlockExpr
>(E
)) {
1717 const BlockDecl
*Block
= BE
->getBlockDecl();
1718 for (const auto &I
: Block
->captures()) {
1719 if (I
.getVariable() == &Var
)
1723 // No need to walk into the subexpressions.
1727 if (const StmtExpr
*SE
= dyn_cast
<StmtExpr
>(E
)) {
1728 const CompoundStmt
*CS
= SE
->getSubStmt();
1729 for (const auto *BI
: CS
->body())
1730 if (const auto *BIE
= dyn_cast
<Expr
>(BI
)) {
1731 if (isCapturedBy(Var
, BIE
))
1734 else if (const auto *DS
= dyn_cast
<DeclStmt
>(BI
)) {
1735 // special case declarations
1736 for (const auto *I
: DS
->decls()) {
1737 if (const auto *VD
= dyn_cast
<VarDecl
>((I
))) {
1738 const Expr
*Init
= VD
->getInit();
1739 if (Init
&& isCapturedBy(Var
, Init
))
1745 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1746 // Later, provide code to poke into statements for capture analysis.
1751 for (const Stmt
*SubStmt
: E
->children())
1752 if (isCapturedBy(Var
, SubStmt
))
1758 /// Determine whether the given initializer is trivial in the sense
1759 /// that it requires no code to be generated.
1760 bool CodeGenFunction::isTrivialInitializer(const Expr
*Init
) {
1764 if (const CXXConstructExpr
*Construct
= dyn_cast
<CXXConstructExpr
>(Init
))
1765 if (CXXConstructorDecl
*Constructor
= Construct
->getConstructor())
1766 if (Constructor
->isTrivial() &&
1767 Constructor
->isDefaultConstructor() &&
1768 !Construct
->requiresZeroInitialization())
1774 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type
,
1777 auto trivialAutoVarInit
= getContext().getLangOpts().getTrivialAutoVarInit();
1778 auto trivialAutoVarInitMaxSize
=
1779 getContext().getLangOpts().TrivialAutoVarInitMaxSize
;
1780 CharUnits Size
= getContext().getTypeSizeInChars(type
);
1781 bool isVolatile
= type
.isVolatileQualified();
1782 if (!Size
.isZero()) {
1783 // We skip auto-init variables by their alloc size. Take this as an example:
1784 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1785 // All Foo type variables will be skipped. Ideally, we only skip the buff
1786 // array and still auto-init X in this example.
1787 // TODO: Improve the size filtering to by member size.
1788 auto allocSize
= CGM
.getDataLayout().getTypeAllocSize(Loc
.getElementType());
1789 switch (trivialAutoVarInit
) {
1790 case LangOptions::TrivialAutoVarInitKind::Uninitialized
:
1791 llvm_unreachable("Uninitialized handled by caller");
1792 case LangOptions::TrivialAutoVarInitKind::Zero
:
1793 if (CGM
.stopAutoInit())
1795 if (trivialAutoVarInitMaxSize
> 0 &&
1796 allocSize
> trivialAutoVarInitMaxSize
)
1798 emitStoresForZeroInit(CGM
, D
, Loc
, isVolatile
, Builder
);
1800 case LangOptions::TrivialAutoVarInitKind::Pattern
:
1801 if (CGM
.stopAutoInit())
1803 if (trivialAutoVarInitMaxSize
> 0 &&
1804 allocSize
> trivialAutoVarInitMaxSize
)
1806 emitStoresForPatternInit(CGM
, D
, Loc
, isVolatile
, Builder
);
1812 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1813 // them, so emit a memcpy with the VLA size to initialize each element.
1814 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1815 // will catch that code, but there exists code which generates zero-sized
1816 // VLAs. Be nice and initialize whatever they requested.
1817 const auto *VlaType
= getContext().getAsVariableArrayType(type
);
1820 auto VlaSize
= getVLASize(VlaType
);
1821 auto SizeVal
= VlaSize
.NumElts
;
1822 CharUnits EltSize
= getContext().getTypeSizeInChars(VlaSize
.Type
);
1823 switch (trivialAutoVarInit
) {
1824 case LangOptions::TrivialAutoVarInitKind::Uninitialized
:
1825 llvm_unreachable("Uninitialized handled by caller");
1827 case LangOptions::TrivialAutoVarInitKind::Zero
: {
1828 if (CGM
.stopAutoInit())
1830 if (!EltSize
.isOne())
1831 SizeVal
= Builder
.CreateNUWMul(SizeVal
, CGM
.getSize(EltSize
));
1832 auto *I
= Builder
.CreateMemSet(Loc
, llvm::ConstantInt::get(Int8Ty
, 0),
1833 SizeVal
, isVolatile
);
1834 I
->addAnnotationMetadata("auto-init");
1838 case LangOptions::TrivialAutoVarInitKind::Pattern
: {
1839 if (CGM
.stopAutoInit())
1841 llvm::Type
*ElTy
= Loc
.getElementType();
1842 llvm::Constant
*Constant
= constWithPadding(
1843 CGM
, IsPattern::Yes
, initializationPatternFor(CGM
, ElTy
));
1844 CharUnits ConstantAlign
= getContext().getTypeAlignInChars(VlaSize
.Type
);
1845 llvm::BasicBlock
*SetupBB
= createBasicBlock("vla-setup.loop");
1846 llvm::BasicBlock
*LoopBB
= createBasicBlock("vla-init.loop");
1847 llvm::BasicBlock
*ContBB
= createBasicBlock("vla-init.cont");
1848 llvm::Value
*IsZeroSizedVLA
= Builder
.CreateICmpEQ(
1849 SizeVal
, llvm::ConstantInt::get(SizeVal
->getType(), 0),
1851 Builder
.CreateCondBr(IsZeroSizedVLA
, ContBB
, SetupBB
);
1853 if (!EltSize
.isOne())
1854 SizeVal
= Builder
.CreateNUWMul(SizeVal
, CGM
.getSize(EltSize
));
1855 llvm::Value
*BaseSizeInChars
=
1856 llvm::ConstantInt::get(IntPtrTy
, EltSize
.getQuantity());
1857 Address Begin
= Loc
.withElementType(Int8Ty
);
1858 llvm::Value
*End
= Builder
.CreateInBoundsGEP(Begin
.getElementType(),
1859 Begin
.emitRawPointer(*this),
1860 SizeVal
, "vla.end");
1861 llvm::BasicBlock
*OriginBB
= Builder
.GetInsertBlock();
1863 llvm::PHINode
*Cur
= Builder
.CreatePHI(Begin
.getType(), 2, "vla.cur");
1864 Cur
->addIncoming(Begin
.emitRawPointer(*this), OriginBB
);
1865 CharUnits CurAlign
= Loc
.getAlignment().alignmentOfArrayElement(EltSize
);
1867 Builder
.CreateMemCpy(Address(Cur
, Int8Ty
, CurAlign
),
1868 createUnnamedGlobalForMemcpyFrom(
1869 CGM
, D
, Builder
, Constant
, ConstantAlign
),
1870 BaseSizeInChars
, isVolatile
);
1871 I
->addAnnotationMetadata("auto-init");
1873 Builder
.CreateInBoundsGEP(Int8Ty
, Cur
, BaseSizeInChars
, "vla.next");
1874 llvm::Value
*Done
= Builder
.CreateICmpEQ(Next
, End
, "vla-init.isdone");
1875 Builder
.CreateCondBr(Done
, ContBB
, LoopBB
);
1876 Cur
->addIncoming(Next
, LoopBB
);
1882 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission
&emission
) {
1883 assert(emission
.Variable
&& "emission was not valid!");
1885 // If this was emitted as a global constant, we're done.
1886 if (emission
.wasEmittedAsGlobal()) return;
1888 const VarDecl
&D
= *emission
.Variable
;
1889 auto DL
= ApplyDebugLocation::CreateDefaultArtificial(*this, D
.getLocation());
1890 QualType type
= D
.getType();
1892 // If this local has an initializer, emit it now.
1893 const Expr
*Init
= D
.getInit();
1895 // If we are at an unreachable point, we don't need to emit the initializer
1896 // unless it contains a label.
1897 if (!HaveInsertPoint()) {
1898 if (!Init
|| !ContainsLabel(Init
)) return;
1899 EnsureInsertPoint();
1902 // Initialize the structure of a __block variable.
1903 if (emission
.IsEscapingByRef
)
1904 emitByrefStructureInit(emission
);
1906 // Initialize the variable here if it doesn't have a initializer and it is a
1907 // C struct that is non-trivial to initialize or an array containing such a
1910 type
.isNonTrivialToPrimitiveDefaultInitialize() ==
1911 QualType::PDIK_Struct
) {
1912 LValue Dst
= MakeAddrLValue(emission
.getAllocatedAddress(), type
);
1913 if (emission
.IsEscapingByRef
)
1914 drillIntoBlockVariable(*this, Dst
, &D
);
1915 defaultInitNonTrivialCStructVar(Dst
);
1919 // Check whether this is a byref variable that's potentially
1920 // captured and moved by its own initializer. If so, we'll need to
1921 // emit the initializer first, then copy into the variable.
1922 bool capturedByInit
=
1923 Init
&& emission
.IsEscapingByRef
&& isCapturedBy(D
, Init
);
1925 bool locIsByrefHeader
= !capturedByInit
;
1927 locIsByrefHeader
? emission
.getObjectAddress(*this) : emission
.Addr
;
1929 // Note: constexpr already initializes everything correctly.
1930 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit
=
1932 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1933 : (D
.getAttr
<UninitializedAttr
>()
1934 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1935 : getContext().getLangOpts().getTrivialAutoVarInit()));
1937 auto initializeWhatIsTechnicallyUninitialized
= [&](Address Loc
) {
1938 if (trivialAutoVarInit
==
1939 LangOptions::TrivialAutoVarInitKind::Uninitialized
)
1942 // Only initialize a __block's storage: we always initialize the header.
1943 if (emission
.IsEscapingByRef
&& !locIsByrefHeader
)
1944 Loc
= emitBlockByrefAddress(Loc
, &D
, /*follow=*/false);
1946 return emitZeroOrPatternForAutoVarInit(type
, D
, Loc
);
1949 if (isTrivialInitializer(Init
))
1950 return initializeWhatIsTechnicallyUninitialized(Loc
);
1952 llvm::Constant
*constant
= nullptr;
1953 if (emission
.IsConstantAggregate
||
1954 D
.mightBeUsableInConstantExpressions(getContext())) {
1955 assert(!capturedByInit
&& "constant init contains a capturing block?");
1956 constant
= ConstantEmitter(*this).tryEmitAbstractForInitializer(D
);
1957 if (constant
&& !constant
->isZeroValue() &&
1958 (trivialAutoVarInit
!=
1959 LangOptions::TrivialAutoVarInitKind::Uninitialized
)) {
1960 IsPattern isPattern
=
1961 (trivialAutoVarInit
== LangOptions::TrivialAutoVarInitKind::Pattern
)
1964 // C guarantees that brace-init with fewer initializers than members in
1965 // the aggregate will initialize the rest of the aggregate as-if it were
1966 // static initialization. In turn static initialization guarantees that
1967 // padding is initialized to zero bits. We could instead pattern-init if D
1968 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1970 constant
= constWithPadding(CGM
, IsPattern::No
,
1971 replaceUndef(CGM
, isPattern
, constant
));
1974 if (D
.getType()->isBitIntType() &&
1975 CGM
.getTypes().typeRequiresSplitIntoByteArray(D
.getType())) {
1976 // Constants for long _BitInt types are split into individual bytes.
1977 // Try to fold these back into an integer constant so it can be stored
1979 llvm::Type
*LoadType
= CGM
.getTypes().convertTypeForLoadStore(
1980 D
.getType(), constant
->getType());
1981 constant
= llvm::ConstantFoldLoadFromConst(
1982 constant
, LoadType
, llvm::APInt::getZero(32), CGM
.getDataLayout());
1987 if (trivialAutoVarInit
!=
1988 LangOptions::TrivialAutoVarInitKind::Uninitialized
) {
1989 // At this point, we know D has an Init expression, but isn't a constant.
1990 // - If D is not a scalar, auto-var-init conservatively (members may be
1991 // left uninitialized by constructor Init expressions for example).
1992 // - If D is a scalar, we only need to auto-var-init if there is a
1993 // self-reference. Otherwise, the Init expression should be sufficient.
1994 // It may be that the Init expression uses other uninitialized memory,
1995 // but auto-var-init here would not help, as auto-init would get
1996 // overwritten by Init.
1997 if (!D
.getType()->isScalarType() || capturedByInit
||
1998 isAccessedBy(D
, Init
)) {
1999 initializeWhatIsTechnicallyUninitialized(Loc
);
2002 LValue lv
= MakeAddrLValue(Loc
, type
);
2004 return EmitExprAsInit(Init
, &D
, lv
, capturedByInit
);
2007 if (!emission
.IsConstantAggregate
) {
2008 // For simple scalar/complex initialization, store the value directly.
2009 LValue lv
= MakeAddrLValue(Loc
, type
);
2011 return EmitStoreThroughLValue(RValue::get(constant
), lv
, true);
2014 emitStoresForConstant(CGM
, D
, Loc
.withElementType(CGM
.Int8Ty
),
2015 type
.isVolatileQualified(), Builder
, constant
,
2016 /*IsAutoInit=*/false);
2019 /// Emit an expression as an initializer for an object (variable, field, etc.)
2020 /// at the given location. The expression is not necessarily the normal
2021 /// initializer for the object, and the address is not necessarily
2022 /// its normal location.
2024 /// \param init the initializing expression
2025 /// \param D the object to act as if we're initializing
2026 /// \param lvalue the lvalue to initialize
2027 /// \param capturedByInit true if \p D is a __block variable
2028 /// whose address is potentially changed by the initializer
2029 void CodeGenFunction::EmitExprAsInit(const Expr
*init
, const ValueDecl
*D
,
2030 LValue lvalue
, bool capturedByInit
) {
2031 QualType type
= D
->getType();
2033 if (type
->isReferenceType()) {
2034 RValue rvalue
= EmitReferenceBindingToExpr(init
);
2036 drillIntoBlockVariable(*this, lvalue
, cast
<VarDecl
>(D
));
2037 EmitStoreThroughLValue(rvalue
, lvalue
, true);
2040 switch (getEvaluationKind(type
)) {
2042 EmitScalarInit(init
, D
, lvalue
, capturedByInit
);
2045 ComplexPairTy
complex = EmitComplexExpr(init
);
2047 drillIntoBlockVariable(*this, lvalue
, cast
<VarDecl
>(D
));
2048 EmitStoreOfComplex(complex, lvalue
, /*init*/ true);
2052 if (type
->isAtomicType()) {
2053 EmitAtomicInit(const_cast<Expr
*>(init
), lvalue
);
2055 AggValueSlot::Overlap_t Overlap
= AggValueSlot::MayOverlap
;
2056 if (isa
<VarDecl
>(D
))
2057 Overlap
= AggValueSlot::DoesNotOverlap
;
2058 else if (auto *FD
= dyn_cast
<FieldDecl
>(D
))
2059 Overlap
= getOverlapForFieldInit(FD
);
2060 // TODO: how can we delay here if D is captured by its initializer?
2062 AggValueSlot::forLValue(lvalue
, AggValueSlot::IsDestructed
,
2063 AggValueSlot::DoesNotNeedGCBarriers
,
2064 AggValueSlot::IsNotAliased
, Overlap
));
2068 llvm_unreachable("bad evaluation kind");
2071 /// Enter a destroy cleanup for the given local variable.
2072 void CodeGenFunction::emitAutoVarTypeCleanup(
2073 const CodeGenFunction::AutoVarEmission
&emission
,
2074 QualType::DestructionKind dtorKind
) {
2075 assert(dtorKind
!= QualType::DK_none
);
2077 // Note that for __block variables, we want to destroy the
2078 // original stack object, not the possibly forwarded object.
2079 Address addr
= emission
.getObjectAddress(*this);
2081 const VarDecl
*var
= emission
.Variable
;
2082 QualType type
= var
->getType();
2084 CleanupKind cleanupKind
= NormalAndEHCleanup
;
2085 CodeGenFunction::Destroyer
*destroyer
= nullptr;
2088 case QualType::DK_none
:
2089 llvm_unreachable("no cleanup for trivially-destructible variable");
2091 case QualType::DK_cxx_destructor
:
2092 // If there's an NRVO flag on the emission, we need a different
2094 if (emission
.NRVOFlag
) {
2095 assert(!type
->isArrayType());
2096 CXXDestructorDecl
*dtor
= type
->getAsCXXRecordDecl()->getDestructor();
2097 EHStack
.pushCleanup
<DestroyNRVOVariableCXX
>(cleanupKind
, addr
, type
, dtor
,
2103 case QualType::DK_objc_strong_lifetime
:
2104 // Suppress cleanups for pseudo-strong variables.
2105 if (var
->isARCPseudoStrong()) return;
2107 // Otherwise, consider whether to use an EH cleanup or not.
2108 cleanupKind
= getARCCleanupKind();
2110 // Use the imprecise destroyer by default.
2111 if (!var
->hasAttr
<ObjCPreciseLifetimeAttr
>())
2112 destroyer
= CodeGenFunction::destroyARCStrongImprecise
;
2115 case QualType::DK_objc_weak_lifetime
:
2118 case QualType::DK_nontrivial_c_struct
:
2119 destroyer
= CodeGenFunction::destroyNonTrivialCStruct
;
2120 if (emission
.NRVOFlag
) {
2121 assert(!type
->isArrayType());
2122 EHStack
.pushCleanup
<DestroyNRVOVariableC
>(cleanupKind
, addr
,
2123 emission
.NRVOFlag
, type
);
2129 // If we haven't chosen a more specific destroyer, use the default.
2130 if (!destroyer
) destroyer
= getDestroyer(dtorKind
);
2132 // Use an EH cleanup in array destructors iff the destructor itself
2133 // is being pushed as an EH cleanup.
2134 bool useEHCleanup
= (cleanupKind
& EHCleanup
);
2135 EHStack
.pushCleanup
<DestroyObject
>(cleanupKind
, addr
, type
, destroyer
,
2139 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission
&emission
) {
2140 assert(emission
.Variable
&& "emission was not valid!");
2142 // If this was emitted as a global constant, we're done.
2143 if (emission
.wasEmittedAsGlobal()) return;
2145 // If we don't have an insertion point, we're done. Sema prevents
2146 // us from jumping into any of these scopes anyway.
2147 if (!HaveInsertPoint()) return;
2149 const VarDecl
&D
= *emission
.Variable
;
2151 // Check the type for a cleanup.
2152 if (QualType::DestructionKind dtorKind
= D
.needsDestruction(getContext()))
2153 emitAutoVarTypeCleanup(emission
, dtorKind
);
2155 // In GC mode, honor objc_precise_lifetime.
2156 if (getLangOpts().getGC() != LangOptions::NonGC
&&
2157 D
.hasAttr
<ObjCPreciseLifetimeAttr
>()) {
2158 EHStack
.pushCleanup
<ExtendGCLifetime
>(NormalCleanup
, &D
);
2161 // Handle the cleanup attribute.
2162 if (const CleanupAttr
*CA
= D
.getAttr
<CleanupAttr
>()) {
2163 const FunctionDecl
*FD
= CA
->getFunctionDecl();
2165 llvm::Constant
*F
= CGM
.GetAddrOfFunction(FD
);
2166 assert(F
&& "Could not find function!");
2168 const CGFunctionInfo
&Info
= CGM
.getTypes().arrangeFunctionDeclaration(FD
);
2169 EHStack
.pushCleanup
<CallCleanupFunction
>(NormalAndEHCleanup
, F
, &Info
, &D
);
2172 // If this is a block variable, call _Block_object_destroy
2173 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2175 if (emission
.IsEscapingByRef
&&
2176 CGM
.getLangOpts().getGC() != LangOptions::GCOnly
) {
2177 BlockFieldFlags Flags
= BLOCK_FIELD_IS_BYREF
;
2178 if (emission
.Variable
->getType().isObjCGCWeak())
2179 Flags
|= BLOCK_FIELD_IS_WEAK
;
2180 enterByrefCleanup(NormalAndEHCleanup
, emission
.Addr
, Flags
,
2181 /*LoadBlockVarAddr*/ false,
2182 cxxDestructorCanThrow(emission
.Variable
->getType()));
2186 CodeGenFunction::Destroyer
*
2187 CodeGenFunction::getDestroyer(QualType::DestructionKind kind
) {
2189 case QualType::DK_none
: llvm_unreachable("no destroyer for trivial dtor");
2190 case QualType::DK_cxx_destructor
:
2191 return destroyCXXObject
;
2192 case QualType::DK_objc_strong_lifetime
:
2193 return destroyARCStrongPrecise
;
2194 case QualType::DK_objc_weak_lifetime
:
2195 return destroyARCWeak
;
2196 case QualType::DK_nontrivial_c_struct
:
2197 return destroyNonTrivialCStruct
;
2199 llvm_unreachable("Unknown DestructionKind");
2202 /// pushEHDestroy - Push the standard destructor for the given type as
2203 /// an EH-only cleanup.
2204 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind
,
2205 Address addr
, QualType type
) {
2206 assert(dtorKind
&& "cannot push destructor for trivial type");
2207 assert(needsEHCleanup(dtorKind
));
2209 pushDestroy(EHCleanup
, addr
, type
, getDestroyer(dtorKind
), true);
2212 /// pushDestroy - Push the standard destructor for the given type as
2213 /// at least a normal cleanup.
2214 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind
,
2215 Address addr
, QualType type
) {
2216 assert(dtorKind
&& "cannot push destructor for trivial type");
2218 CleanupKind cleanupKind
= getCleanupKind(dtorKind
);
2219 pushDestroy(cleanupKind
, addr
, type
, getDestroyer(dtorKind
),
2220 cleanupKind
& EHCleanup
);
2223 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind
, Address addr
,
2224 QualType type
, Destroyer
*destroyer
,
2225 bool useEHCleanupForArray
) {
2226 pushFullExprCleanup
<DestroyObject
>(cleanupKind
, addr
, type
,
2227 destroyer
, useEHCleanupForArray
);
2230 // Pushes a destroy and defers its deactivation until its
2231 // CleanupDeactivationScope is exited.
2232 void CodeGenFunction::pushDestroyAndDeferDeactivation(
2233 QualType::DestructionKind dtorKind
, Address addr
, QualType type
) {
2234 assert(dtorKind
&& "cannot push destructor for trivial type");
2236 CleanupKind cleanupKind
= getCleanupKind(dtorKind
);
2237 pushDestroyAndDeferDeactivation(
2238 cleanupKind
, addr
, type
, getDestroyer(dtorKind
), cleanupKind
& EHCleanup
);
2241 void CodeGenFunction::pushDestroyAndDeferDeactivation(
2242 CleanupKind cleanupKind
, Address addr
, QualType type
, Destroyer
*destroyer
,
2243 bool useEHCleanupForArray
) {
2244 llvm::Instruction
*DominatingIP
=
2245 Builder
.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy
));
2246 pushDestroy(cleanupKind
, addr
, type
, destroyer
, useEHCleanupForArray
);
2247 DeferredDeactivationCleanupStack
.push_back(
2248 {EHStack
.stable_begin(), DominatingIP
});
2251 void CodeGenFunction::pushStackRestore(CleanupKind Kind
, Address SPMem
) {
2252 EHStack
.pushCleanup
<CallStackRestore
>(Kind
, SPMem
);
2255 void CodeGenFunction::pushKmpcAllocFree(
2256 CleanupKind Kind
, std::pair
<llvm::Value
*, llvm::Value
*> AddrSizePair
) {
2257 EHStack
.pushCleanup
<KmpcAllocFree
>(Kind
, AddrSizePair
);
2260 void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind
,
2261 Address addr
, QualType type
,
2262 Destroyer
*destroyer
,
2263 bool useEHCleanupForArray
) {
2264 // If we're not in a conditional branch, we don't need to bother generating a
2265 // conditional cleanup.
2266 if (!isInConditionalBranch()) {
2267 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2268 // around in case a temporary's destructor throws an exception.
2270 // Add the cleanup to the EHStack. After the full-expr, this would be
2271 // deactivated before being popped from the stack.
2272 pushDestroyAndDeferDeactivation(cleanupKind
, addr
, type
, destroyer
,
2273 useEHCleanupForArray
);
2275 // Since this is lifetime-extended, push it once again to the EHStack after
2276 // the full expression.
2277 return pushCleanupAfterFullExprWithActiveFlag
<DestroyObject
>(
2278 cleanupKind
, Address::invalid(), addr
, type
, destroyer
,
2279 useEHCleanupForArray
);
2282 // Otherwise, we should only destroy the object if it's been initialized.
2284 using ConditionalCleanupType
=
2285 EHScopeStack::ConditionalCleanup
<DestroyObject
, Address
, QualType
,
2287 DominatingValue
<Address
>::saved_type SavedAddr
= saveValueInCond(addr
);
2289 // Remember to emit cleanup if we branch-out before end of full-expression
2290 // (eg: through stmt-expr or coro suspensions).
2291 AllocaTrackerRAII
DeactivationAllocas(*this);
2292 Address ActiveFlagForDeactivation
= createCleanupActiveFlag();
2294 pushCleanupAndDeferDeactivation
<ConditionalCleanupType
>(
2295 cleanupKind
, SavedAddr
, type
, destroyer
, useEHCleanupForArray
);
2296 initFullExprCleanupWithFlag(ActiveFlagForDeactivation
);
2297 EHCleanupScope
&cleanup
= cast
<EHCleanupScope
>(*EHStack
.begin());
2298 // Erase the active flag if the cleanup was not emitted.
2299 cleanup
.AddAuxAllocas(std::move(DeactivationAllocas
).Take());
2301 // Since this is lifetime-extended, push it once again to the EHStack after
2302 // the full expression.
2303 // The previous active flag would always be 'false' due to forced deferred
2304 // deactivation. Use a separate flag for lifetime-extension to correctly
2305 // remember if this branch was taken and the object was initialized.
2306 Address ActiveFlagForLifetimeExt
= createCleanupActiveFlag();
2307 pushCleanupAfterFullExprWithActiveFlag
<ConditionalCleanupType
>(
2308 cleanupKind
, ActiveFlagForLifetimeExt
, SavedAddr
, type
, destroyer
,
2309 useEHCleanupForArray
);
2312 /// emitDestroy - Immediately perform the destruction of the given
2315 /// \param addr - the address of the object; a type*
2316 /// \param type - the type of the object; if an array type, all
2317 /// objects are destroyed in reverse order
2318 /// \param destroyer - the function to call to destroy individual
2320 /// \param useEHCleanupForArray - whether an EH cleanup should be
2321 /// used when destroying array elements, in case one of the
2322 /// destructions throws an exception
2323 void CodeGenFunction::emitDestroy(Address addr
, QualType type
,
2324 Destroyer
*destroyer
,
2325 bool useEHCleanupForArray
) {
2326 const ArrayType
*arrayType
= getContext().getAsArrayType(type
);
2328 return destroyer(*this, addr
, type
);
2330 llvm::Value
*length
= emitArrayLength(arrayType
, type
, addr
);
2332 CharUnits elementAlign
=
2334 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type
));
2336 // Normally we have to check whether the array is zero-length.
2337 bool checkZeroLength
= true;
2339 // But if the array length is constant, we can suppress that.
2340 if (llvm::ConstantInt
*constLength
= dyn_cast
<llvm::ConstantInt
>(length
)) {
2341 // ...and if it's constant zero, we can just skip the entire thing.
2342 if (constLength
->isZero()) return;
2343 checkZeroLength
= false;
2346 llvm::Value
*begin
= addr
.emitRawPointer(*this);
2348 Builder
.CreateInBoundsGEP(addr
.getElementType(), begin
, length
);
2349 emitArrayDestroy(begin
, end
, type
, elementAlign
, destroyer
,
2350 checkZeroLength
, useEHCleanupForArray
);
2353 /// emitArrayDestroy - Destroys all the elements of the given array,
2354 /// beginning from last to first. The array cannot be zero-length.
2356 /// \param begin - a type* denoting the first element of the array
2357 /// \param end - a type* denoting one past the end of the array
2358 /// \param elementType - the element type of the array
2359 /// \param destroyer - the function to call to destroy elements
2360 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2361 /// the remaining elements in case the destruction of a single
2363 void CodeGenFunction::emitArrayDestroy(llvm::Value
*begin
,
2365 QualType elementType
,
2366 CharUnits elementAlign
,
2367 Destroyer
*destroyer
,
2368 bool checkZeroLength
,
2369 bool useEHCleanup
) {
2370 assert(!elementType
->isArrayType());
2372 // The basic structure here is a do-while loop, because we don't
2373 // need to check for the zero-element case.
2374 llvm::BasicBlock
*bodyBB
= createBasicBlock("arraydestroy.body");
2375 llvm::BasicBlock
*doneBB
= createBasicBlock("arraydestroy.done");
2377 if (checkZeroLength
) {
2378 llvm::Value
*isEmpty
= Builder
.CreateICmpEQ(begin
, end
,
2379 "arraydestroy.isempty");
2380 Builder
.CreateCondBr(isEmpty
, doneBB
, bodyBB
);
2383 // Enter the loop body, making that address the current address.
2384 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
2386 llvm::PHINode
*elementPast
=
2387 Builder
.CreatePHI(begin
->getType(), 2, "arraydestroy.elementPast");
2388 elementPast
->addIncoming(end
, entryBB
);
2390 // Shift the address back by one element.
2391 llvm::Value
*negativeOne
= llvm::ConstantInt::get(SizeTy
, -1, true);
2392 llvm::Type
*llvmElementType
= ConvertTypeForMem(elementType
);
2393 llvm::Value
*element
= Builder
.CreateInBoundsGEP(
2394 llvmElementType
, elementPast
, negativeOne
, "arraydestroy.element");
2397 pushRegularPartialArrayCleanup(begin
, element
, elementType
, elementAlign
,
2400 // Perform the actual destruction there.
2401 destroyer(*this, Address(element
, llvmElementType
, elementAlign
),
2407 // Check whether we've reached the end.
2408 llvm::Value
*done
= Builder
.CreateICmpEQ(element
, begin
, "arraydestroy.done");
2409 Builder
.CreateCondBr(done
, doneBB
, bodyBB
);
2410 elementPast
->addIncoming(element
, Builder
.GetInsertBlock());
2416 /// Perform partial array destruction as if in an EH cleanup. Unlike
2417 /// emitArrayDestroy, the element type here may still be an array type.
2418 static void emitPartialArrayDestroy(CodeGenFunction
&CGF
,
2419 llvm::Value
*begin
, llvm::Value
*end
,
2420 QualType type
, CharUnits elementAlign
,
2421 CodeGenFunction::Destroyer
*destroyer
) {
2422 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(type
);
2424 // If the element type is itself an array, drill down.
2425 unsigned arrayDepth
= 0;
2426 while (const ArrayType
*arrayType
= CGF
.getContext().getAsArrayType(type
)) {
2427 // VLAs don't require a GEP index to walk into.
2428 if (!isa
<VariableArrayType
>(arrayType
))
2430 type
= arrayType
->getElementType();
2434 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
2436 SmallVector
<llvm::Value
*,4> gepIndices(arrayDepth
+1, zero
);
2437 begin
= CGF
.Builder
.CreateInBoundsGEP(
2438 elemTy
, begin
, gepIndices
, "pad.arraybegin");
2439 end
= CGF
.Builder
.CreateInBoundsGEP(
2440 elemTy
, end
, gepIndices
, "pad.arrayend");
2443 // Destroy the array. We don't ever need an EH cleanup because we
2444 // assume that we're in an EH cleanup ourselves, so a throwing
2445 // destructor causes an immediate terminate.
2446 CGF
.emitArrayDestroy(begin
, end
, type
, elementAlign
, destroyer
,
2447 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2451 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2452 /// array destroy where the end pointer is regularly determined and
2453 /// does not need to be loaded from a local.
2454 class RegularPartialArrayDestroy final
: public EHScopeStack::Cleanup
{
2455 llvm::Value
*ArrayBegin
;
2456 llvm::Value
*ArrayEnd
;
2457 QualType ElementType
;
2458 CodeGenFunction::Destroyer
*Destroyer
;
2459 CharUnits ElementAlign
;
2461 RegularPartialArrayDestroy(llvm::Value
*arrayBegin
, llvm::Value
*arrayEnd
,
2462 QualType elementType
, CharUnits elementAlign
,
2463 CodeGenFunction::Destroyer
*destroyer
)
2464 : ArrayBegin(arrayBegin
), ArrayEnd(arrayEnd
),
2465 ElementType(elementType
), Destroyer(destroyer
),
2466 ElementAlign(elementAlign
) {}
2468 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2469 emitPartialArrayDestroy(CGF
, ArrayBegin
, ArrayEnd
,
2470 ElementType
, ElementAlign
, Destroyer
);
2474 /// IrregularPartialArrayDestroy - a cleanup which performs a
2475 /// partial array destroy where the end pointer is irregularly
2476 /// determined and must be loaded from a local.
2477 class IrregularPartialArrayDestroy final
: public EHScopeStack::Cleanup
{
2478 llvm::Value
*ArrayBegin
;
2479 Address ArrayEndPointer
;
2480 QualType ElementType
;
2481 CodeGenFunction::Destroyer
*Destroyer
;
2482 CharUnits ElementAlign
;
2484 IrregularPartialArrayDestroy(llvm::Value
*arrayBegin
,
2485 Address arrayEndPointer
,
2486 QualType elementType
,
2487 CharUnits elementAlign
,
2488 CodeGenFunction::Destroyer
*destroyer
)
2489 : ArrayBegin(arrayBegin
), ArrayEndPointer(arrayEndPointer
),
2490 ElementType(elementType
), Destroyer(destroyer
),
2491 ElementAlign(elementAlign
) {}
2493 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2494 llvm::Value
*arrayEnd
= CGF
.Builder
.CreateLoad(ArrayEndPointer
);
2495 emitPartialArrayDestroy(CGF
, ArrayBegin
, arrayEnd
,
2496 ElementType
, ElementAlign
, Destroyer
);
2499 } // end anonymous namespace
2501 /// pushIrregularPartialArrayCleanup - Push a NormalAndEHCleanup to
2502 /// destroy already-constructed elements of the given array. The cleanup may be
2503 /// popped with DeactivateCleanupBlock or PopCleanupBlock.
2505 /// \param elementType - the immediate element type of the array;
2506 /// possibly still an array type
2507 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2508 Address arrayEndPointer
,
2509 QualType elementType
,
2510 CharUnits elementAlign
,
2511 Destroyer
*destroyer
) {
2512 pushFullExprCleanup
<IrregularPartialArrayDestroy
>(
2513 NormalAndEHCleanup
, arrayBegin
, arrayEndPointer
, elementType
,
2514 elementAlign
, destroyer
);
2517 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2518 /// already-constructed elements of the given array. The cleanup
2519 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2521 /// \param elementType - the immediate element type of the array;
2522 /// possibly still an array type
2523 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2524 llvm::Value
*arrayEnd
,
2525 QualType elementType
,
2526 CharUnits elementAlign
,
2527 Destroyer
*destroyer
) {
2528 pushFullExprCleanup
<RegularPartialArrayDestroy
>(EHCleanup
,
2529 arrayBegin
, arrayEnd
,
2530 elementType
, elementAlign
,
2534 /// Lazily declare the @llvm.lifetime.start intrinsic.
2535 llvm::Function
*CodeGenModule::getLLVMLifetimeStartFn() {
2536 if (LifetimeStartFn
)
2537 return LifetimeStartFn
;
2538 LifetimeStartFn
= llvm::Intrinsic::getDeclaration(&getModule(),
2539 llvm::Intrinsic::lifetime_start
, AllocaInt8PtrTy
);
2540 return LifetimeStartFn
;
2543 /// Lazily declare the @llvm.lifetime.end intrinsic.
2544 llvm::Function
*CodeGenModule::getLLVMLifetimeEndFn() {
2546 return LifetimeEndFn
;
2547 LifetimeEndFn
= llvm::Intrinsic::getDeclaration(&getModule(),
2548 llvm::Intrinsic::lifetime_end
, AllocaInt8PtrTy
);
2549 return LifetimeEndFn
;
2553 /// A cleanup to perform a release of an object at the end of a
2554 /// function. This is used to balance out the incoming +1 of a
2555 /// ns_consumed argument when we can't reasonably do that just by
2556 /// not doing the initial retain for a __block argument.
2557 struct ConsumeARCParameter final
: EHScopeStack::Cleanup
{
2558 ConsumeARCParameter(llvm::Value
*param
,
2559 ARCPreciseLifetime_t precise
)
2560 : Param(param
), Precise(precise
) {}
2563 ARCPreciseLifetime_t Precise
;
2565 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2566 CGF
.EmitARCRelease(Param
, Precise
);
2569 } // end anonymous namespace
2571 /// Emit an alloca (or GlobalValue depending on target)
2572 /// for the specified parameter and set up LocalDeclMap.
2573 void CodeGenFunction::EmitParmDecl(const VarDecl
&D
, ParamValue Arg
,
2575 bool NoDebugInfo
= false;
2576 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2577 assert((isa
<ParmVarDecl
>(D
) || isa
<ImplicitParamDecl
>(D
)) &&
2578 "Invalid argument to EmitParmDecl");
2580 // Set the name of the parameter's initial value to make IR easier to
2581 // read. Don't modify the names of globals.
2582 if (!isa
<llvm::GlobalValue
>(Arg
.getAnyValue()))
2583 Arg
.getAnyValue()->setName(D
.getName());
2585 QualType Ty
= D
.getType();
2587 // Use better IR generation for certain implicit parameters.
2588 if (auto IPD
= dyn_cast
<ImplicitParamDecl
>(&D
)) {
2589 // The only implicit argument a block has is its literal.
2590 // This may be passed as an inalloca'ed value on Windows x86.
2592 llvm::Value
*V
= Arg
.isIndirect()
2593 ? Builder
.CreateLoad(Arg
.getIndirectAddress())
2594 : Arg
.getDirectValue();
2595 setBlockContextParameter(IPD
, ArgNo
, V
);
2598 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2599 // debug info of TLS variables.
2601 (IPD
->getParameterKind() == ImplicitParamKind::ThreadPrivateVar
);
2604 Address DeclPtr
= Address::invalid();
2605 RawAddress AllocaPtr
= Address::invalid();
2606 bool DoStore
= false;
2607 bool IsScalar
= hasScalarEvaluationKind(Ty
);
2608 bool UseIndirectDebugAddress
= false;
2610 // If we already have a pointer to the argument, reuse the input pointer.
2611 if (Arg
.isIndirect()) {
2612 DeclPtr
= Arg
.getIndirectAddress();
2613 DeclPtr
= DeclPtr
.withElementType(ConvertTypeForMem(Ty
));
2614 // Indirect argument is in alloca address space, which may be different
2615 // from the default address space.
2616 auto AllocaAS
= CGM
.getASTAllocaAddressSpace();
2617 auto *V
= DeclPtr
.emitRawPointer(*this);
2618 AllocaPtr
= RawAddress(V
, DeclPtr
.getElementType(), DeclPtr
.getAlignment());
2620 // For truly ABI indirect arguments -- those that are not `byval` -- store
2621 // the address of the argument on the stack to preserve debug information.
2622 ABIArgInfo ArgInfo
= CurFnInfo
->arguments()[ArgNo
- 1].info
;
2623 if (ArgInfo
.isIndirect())
2624 UseIndirectDebugAddress
= !ArgInfo
.getIndirectByVal();
2625 if (UseIndirectDebugAddress
) {
2626 auto PtrTy
= getContext().getPointerType(Ty
);
2627 AllocaPtr
= CreateMemTemp(PtrTy
, getContext().getTypeAlignInChars(PtrTy
),
2628 D
.getName() + ".indirect_addr");
2629 EmitStoreOfScalar(V
, AllocaPtr
, /* Volatile */ false, PtrTy
);
2632 auto SrcLangAS
= getLangOpts().OpenCL
? LangAS::opencl_private
: AllocaAS
;
2634 getLangOpts().OpenCL
? LangAS::opencl_private
: LangAS::Default
;
2635 if (SrcLangAS
!= DestLangAS
) {
2636 assert(getContext().getTargetAddressSpace(SrcLangAS
) ==
2637 CGM
.getDataLayout().getAllocaAddrSpace());
2638 auto DestAS
= getContext().getTargetAddressSpace(DestLangAS
);
2639 auto *T
= llvm::PointerType::get(getLLVMContext(), DestAS
);
2641 DeclPtr
.withPointer(getTargetHooks().performAddrSpaceCast(
2642 *this, V
, SrcLangAS
, DestLangAS
, T
, true),
2643 DeclPtr
.isKnownNonNull());
2646 // Push a destructor cleanup for this parameter if the ABI requires it.
2647 // Don't push a cleanup in a thunk for a method that will also emit a
2649 if (Ty
->isRecordType() && !CurFuncIsThunk
&&
2650 Ty
->castAs
<RecordType
>()->getDecl()->isParamDestroyedInCallee()) {
2651 if (QualType::DestructionKind DtorKind
=
2652 D
.needsDestruction(getContext())) {
2653 assert((DtorKind
== QualType::DK_cxx_destructor
||
2654 DtorKind
== QualType::DK_nontrivial_c_struct
) &&
2655 "unexpected destructor type");
2656 pushDestroy(DtorKind
, DeclPtr
, Ty
);
2657 CalleeDestructedParamCleanups
[cast
<ParmVarDecl
>(&D
)] =
2658 EHStack
.stable_begin();
2662 // Check if the parameter address is controlled by OpenMP runtime.
2663 Address OpenMPLocalAddr
=
2664 getLangOpts().OpenMP
2665 ? CGM
.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D
)
2666 : Address::invalid();
2667 if (getLangOpts().OpenMP
&& OpenMPLocalAddr
.isValid()) {
2668 DeclPtr
= OpenMPLocalAddr
;
2669 AllocaPtr
= DeclPtr
;
2671 // Otherwise, create a temporary to hold the value.
2672 DeclPtr
= CreateMemTemp(Ty
, getContext().getDeclAlign(&D
),
2673 D
.getName() + ".addr", &AllocaPtr
);
2678 llvm::Value
*ArgVal
= (DoStore
? Arg
.getDirectValue() : nullptr);
2680 LValue lv
= MakeAddrLValue(DeclPtr
, Ty
);
2682 Qualifiers qs
= Ty
.getQualifiers();
2683 if (Qualifiers::ObjCLifetime lt
= qs
.getObjCLifetime()) {
2684 // We honor __attribute__((ns_consumed)) for types with lifetime.
2685 // For __strong, it's handled by just skipping the initial retain;
2686 // otherwise we have to balance out the initial +1 with an extra
2687 // cleanup to do the release at the end of the function.
2688 bool isConsumed
= D
.hasAttr
<NSConsumedAttr
>();
2690 // If a parameter is pseudo-strong then we can omit the implicit retain.
2691 if (D
.isARCPseudoStrong()) {
2692 assert(lt
== Qualifiers::OCL_Strong
&&
2693 "pseudo-strong variable isn't strong?");
2694 assert(qs
.hasConst() && "pseudo-strong variable should be const!");
2695 lt
= Qualifiers::OCL_ExplicitNone
;
2698 // Load objects passed indirectly.
2699 if (Arg
.isIndirect() && !ArgVal
)
2700 ArgVal
= Builder
.CreateLoad(DeclPtr
);
2702 if (lt
== Qualifiers::OCL_Strong
) {
2704 if (CGM
.getCodeGenOpts().OptimizationLevel
== 0) {
2705 // use objc_storeStrong(&dest, value) for retaining the
2706 // object. But first, store a null into 'dest' because
2707 // objc_storeStrong attempts to release its old value.
2708 llvm::Value
*Null
= CGM
.EmitNullConstant(D
.getType());
2709 EmitStoreOfScalar(Null
, lv
, /* isInitialization */ true);
2710 EmitARCStoreStrongCall(lv
.getAddress(), ArgVal
, true);
2714 // Don't use objc_retainBlock for block pointers, because we
2715 // don't want to Block_copy something just because we got it
2717 ArgVal
= EmitARCRetainNonBlock(ArgVal
);
2720 // Push the cleanup for a consumed parameter.
2722 ARCPreciseLifetime_t precise
= (D
.hasAttr
<ObjCPreciseLifetimeAttr
>()
2723 ? ARCPreciseLifetime
: ARCImpreciseLifetime
);
2724 EHStack
.pushCleanup
<ConsumeARCParameter
>(getARCCleanupKind(), ArgVal
,
2728 if (lt
== Qualifiers::OCL_Weak
) {
2729 EmitARCInitWeak(DeclPtr
, ArgVal
);
2730 DoStore
= false; // The weak init is a store, no need to do two.
2734 // Enter the cleanup scope.
2735 EmitAutoVarWithLifetime(*this, D
, DeclPtr
, lt
);
2739 // Store the initial value into the alloca.
2741 EmitStoreOfScalar(ArgVal
, lv
, /* isInitialization */ true);
2743 setAddrOfLocalVar(&D
, DeclPtr
);
2745 // Emit debug info for param declarations in non-thunk functions.
2746 if (CGDebugInfo
*DI
= getDebugInfo()) {
2747 if (CGM
.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk
&&
2749 llvm::DILocalVariable
*DILocalVar
= DI
->EmitDeclareOfArgVariable(
2750 &D
, AllocaPtr
.getPointer(), ArgNo
, Builder
, UseIndirectDebugAddress
);
2751 if (const auto *Var
= dyn_cast_or_null
<ParmVarDecl
>(&D
))
2752 DI
->getParamDbgMappings().insert({Var
, DILocalVar
});
2756 if (D
.hasAttr
<AnnotateAttr
>())
2757 EmitVarAnnotations(&D
, DeclPtr
.emitRawPointer(*this));
2759 // We can only check return value nullability if all arguments to the
2760 // function satisfy their nullability preconditions. This makes it necessary
2761 // to emit null checks for args in the function body itself.
2762 if (requiresReturnValueNullabilityCheck()) {
2763 auto Nullability
= Ty
->getNullability();
2764 if (Nullability
&& *Nullability
== NullabilityKind::NonNull
) {
2765 SanitizerScope
SanScope(this);
2766 RetValNullabilityPrecondition
=
2767 Builder
.CreateAnd(RetValNullabilityPrecondition
,
2768 Builder
.CreateIsNotNull(Arg
.getAnyValue()));
2773 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl
*D
,
2774 CodeGenFunction
*CGF
) {
2775 if (!LangOpts
.OpenMP
|| (!LangOpts
.EmitAllDecls
&& !D
->isUsed()))
2777 getOpenMPRuntime().emitUserDefinedReduction(CGF
, D
);
2780 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl
*D
,
2781 CodeGenFunction
*CGF
) {
2782 if (!LangOpts
.OpenMP
|| LangOpts
.OpenMPSimd
||
2783 (!LangOpts
.EmitAllDecls
&& !D
->isUsed()))
2785 getOpenMPRuntime().emitUserDefinedMapper(D
, CGF
);
2788 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl
*D
) {
2789 getOpenMPRuntime().processRequiresDirective(D
);
2792 void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl
*D
) {
2793 for (const Expr
*E
: D
->varlists()) {
2794 const auto *DE
= cast
<DeclRefExpr
>(E
);
2795 const auto *VD
= cast
<VarDecl
>(DE
->getDecl());
2797 // Skip all but globals.
2798 if (!VD
->hasGlobalStorage())
2801 // Check if the global has been materialized yet or not. If not, we are done
2802 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2803 // we already emitted the global we might have done so before the
2804 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2805 // (potentially). While not pretty, common practise is to remove the old IR
2806 // global and generate a new one, so we do that here too. Uses are replaced
2808 StringRef MangledName
= getMangledName(VD
);
2809 llvm::GlobalValue
*Entry
= GetGlobalValue(MangledName
);
2813 // We can also keep the existing global if the address space is what we
2814 // expect it to be, if not, it is replaced.
2815 QualType ASTTy
= VD
->getType();
2816 clang::LangAS GVAS
= GetGlobalVarAddressSpace(VD
);
2817 auto TargetAS
= getContext().getTargetAddressSpace(GVAS
);
2818 if (Entry
->getType()->getAddressSpace() == TargetAS
)
2821 // Make a new global with the correct type / address space.
2822 llvm::Type
*Ty
= getTypes().ConvertTypeForMem(ASTTy
);
2823 llvm::PointerType
*PTy
= llvm::PointerType::get(Ty
, TargetAS
);
2825 // Replace all uses of the old global with a cast. Since we mutate the type
2826 // in place we neeed an intermediate that takes the spot of the old entry
2827 // until we can create the cast.
2828 llvm::GlobalVariable
*DummyGV
= new llvm::GlobalVariable(
2829 getModule(), Entry
->getValueType(), false,
2830 llvm::GlobalValue::CommonLinkage
, nullptr, "dummy", nullptr,
2831 llvm::GlobalVariable::NotThreadLocal
, Entry
->getAddressSpace());
2832 Entry
->replaceAllUsesWith(DummyGV
);
2834 Entry
->mutateType(PTy
);
2835 llvm::Constant
*NewPtrForOldDecl
=
2836 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2837 Entry
, DummyGV
->getType());
2839 // Now we have a casted version of the changed global, the dummy can be
2840 // replaced and deleted.
2841 DummyGV
->replaceAllUsesWith(NewPtrForOldDecl
);
2842 DummyGV
->eraseFromParent();
2846 std::optional
<CharUnits
>
2847 CodeGenModule::getOMPAllocateAlignment(const VarDecl
*VD
) {
2848 if (const auto *AA
= VD
->getAttr
<OMPAllocateDeclAttr
>()) {
2849 if (Expr
*Alignment
= AA
->getAlignment()) {
2850 unsigned UserAlign
=
2851 Alignment
->EvaluateKnownConstInt(getContext()).getExtValue();
2852 CharUnits NaturalAlign
=
2853 getNaturalTypeAlignment(VD
->getType().getNonReferenceType());
2855 // OpenMP5.1 pg 185 lines 7-10
2856 // Each item in the align modifier list must be aligned to the maximum
2857 // of the specified alignment and the type's natural alignment.
2858 return CharUnits::fromQuantity(
2859 std::max
<unsigned>(UserAlign
, NaturalAlign
.getQuantity()));
2862 return std::nullopt
;