1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This is the internal per-function state used for llvm translation.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
16 #include "CGBuilder.h"
17 #include "CGDebugInfo.h"
18 #include "CGLoopInfo.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "EHScopeStack.h"
23 #include "VarBypassDetector.h"
24 #include "clang/AST/CharUnits.h"
25 #include "clang/AST/CurrentSourceLocExprScope.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/StmtOpenMP.h"
30 #include "clang/AST/Type.h"
31 #include "clang/Basic/ABI.h"
32 #include "clang/Basic/CapturedStmt.h"
33 #include "clang/Basic/CodeGenOptions.h"
34 #include "clang/Basic/OpenMPKinds.h"
35 #include "clang/Basic/TargetInfo.h"
36 #include "llvm/ADT/ArrayRef.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/MapVector.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Transforms/Utils/SanitizerStats.h"
53 class CanonicalLoopInfo
;
58 class CXXDestructorDecl
;
59 class CXXForRangeStmt
;
64 class FunctionProtoType
;
66 class ObjCContainerDecl
;
67 class ObjCInterfaceDecl
;
70 class ObjCImplementationDecl
;
71 class ObjCPropertyImplDecl
;
74 class ObjCForCollectionStmt
;
76 class ObjCAtThrowStmt
;
77 class ObjCAtSynchronizedStmt
;
78 class ObjCAutoreleasePoolStmt
;
79 class OMPUseDevicePtrClause
;
80 class OMPUseDeviceAddrClause
;
82 class OMPExecutableDirective
;
84 namespace analyze_os_log
{
85 class OSLogBufferLayout
;
94 class BlockByrefHelpers
;
96 class BlockFieldFlags
;
97 class RegionCodeGenTy
;
98 class TargetCodeGenInfo
;
102 /// The kind of evaluation to perform on values of a particular
103 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
106 /// TODO: should vectors maybe be split out into their own thing?
107 enum TypeEvaluationKind
{
113 #define LIST_SANITIZER_CHECKS \
114 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
115 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
116 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
117 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
118 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
119 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
120 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
121 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
122 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
123 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
124 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
125 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
126 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
127 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
128 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
129 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
130 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
131 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
132 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
133 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
134 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
135 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
136 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
137 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
138 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
140 enum SanitizerHandler
{
141 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
142 LIST_SANITIZER_CHECKS
143 #undef SANITIZER_CHECK
146 /// Helper class with most of the code for saving a value for a
147 /// conditional expression cleanup.
148 struct DominatingLLVMValue
{
149 typedef llvm::PointerIntPair
<llvm::Value
*, 1, bool> saved_type
;
151 /// Answer whether the given value needs extra work to be saved.
152 static bool needsSaving(llvm::Value
*value
) {
153 // If it's not an instruction, we don't need to save.
154 if (!isa
<llvm::Instruction
>(value
)) return false;
156 // If it's an instruction in the entry block, we don't need to save.
157 llvm::BasicBlock
*block
= cast
<llvm::Instruction
>(value
)->getParent();
158 return (block
!= &block
->getParent()->getEntryBlock());
161 static saved_type
save(CodeGenFunction
&CGF
, llvm::Value
*value
);
162 static llvm::Value
*restore(CodeGenFunction
&CGF
, saved_type value
);
165 /// A partial specialization of DominatingValue for llvm::Values that
166 /// might be llvm::Instructions.
167 template <class T
> struct DominatingPointer
<T
,true> : DominatingLLVMValue
{
169 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
170 return static_cast<T
*>(DominatingLLVMValue::restore(CGF
, value
));
174 /// A specialization of DominatingValue for Address.
175 template <> struct DominatingValue
<Address
> {
176 typedef Address type
;
179 DominatingLLVMValue::saved_type SavedValue
;
180 llvm::Type
*ElementType
;
184 static bool needsSaving(type value
) {
185 return DominatingLLVMValue::needsSaving(value
.getPointer());
187 static saved_type
save(CodeGenFunction
&CGF
, type value
) {
188 return { DominatingLLVMValue::save(CGF
, value
.getPointer()),
189 value
.getElementType(), value
.getAlignment() };
191 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
192 return Address(DominatingLLVMValue::restore(CGF
, value
.SavedValue
),
193 value
.ElementType
, value
.Alignment
);
197 /// A specialization of DominatingValue for RValue.
198 template <> struct DominatingValue
<RValue
> {
201 enum Kind
{ ScalarLiteral
, ScalarAddress
, AggregateLiteral
,
202 AggregateAddress
, ComplexAddress
};
205 llvm::Type
*ElementType
;
208 saved_type(llvm::Value
*v
, llvm::Type
*e
, Kind k
, unsigned a
= 0)
209 : Value(v
), ElementType(e
), K(k
), Align(a
) {}
212 static bool needsSaving(RValue value
);
213 static saved_type
save(CodeGenFunction
&CGF
, RValue value
);
214 RValue
restore(CodeGenFunction
&CGF
);
216 // implementations in CGCleanup.cpp
219 static bool needsSaving(type value
) {
220 return saved_type::needsSaving(value
);
222 static saved_type
save(CodeGenFunction
&CGF
, type value
) {
223 return saved_type::save(CGF
, value
);
225 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
226 return value
.restore(CGF
);
230 /// CodeGenFunction - This class organizes the per-function state that is used
231 /// while generating LLVM code.
232 class CodeGenFunction
: public CodeGenTypeCache
{
233 CodeGenFunction(const CodeGenFunction
&) = delete;
234 void operator=(const CodeGenFunction
&) = delete;
236 friend class CGCXXABI
;
238 /// A jump destination is an abstract label, branching to which may
239 /// require a jump out through normal cleanups.
241 JumpDest() : Block(nullptr), Index(0) {}
242 JumpDest(llvm::BasicBlock
*Block
, EHScopeStack::stable_iterator Depth
,
244 : Block(Block
), ScopeDepth(Depth
), Index(Index
) {}
246 bool isValid() const { return Block
!= nullptr; }
247 llvm::BasicBlock
*getBlock() const { return Block
; }
248 EHScopeStack::stable_iterator
getScopeDepth() const { return ScopeDepth
; }
249 unsigned getDestIndex() const { return Index
; }
251 // This should be used cautiously.
252 void setScopeDepth(EHScopeStack::stable_iterator depth
) {
257 llvm::BasicBlock
*Block
;
258 EHScopeStack::stable_iterator ScopeDepth
;
262 CodeGenModule
&CGM
; // Per-module state.
263 const TargetInfo
&Target
;
265 // For EH/SEH outlined funclets, this field points to parent's CGF
266 CodeGenFunction
*ParentCGF
= nullptr;
268 typedef std::pair
<llvm::Value
*, llvm::Value
*> ComplexPairTy
;
269 LoopInfoStack LoopStack
;
272 // Stores variables for which we can't generate correct lifetime markers
274 VarBypassDetector Bypasses
;
276 /// List of recently emitted OMPCanonicalLoops.
278 /// Since OMPCanonicalLoops are nested inside other statements (in particular
279 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
280 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
281 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
282 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
283 /// this stack when done. Entering a new loop requires clearing this list; it
284 /// either means we start parsing a new loop nest (in which case the previous
285 /// loop nest goes out of scope) or a second loop in the same level in which
286 /// case it would be ambiguous into which of the two (or more) loops the loop
287 /// nest would extend.
288 SmallVector
<llvm::CanonicalLoopInfo
*, 4> OMPLoopNestStack
;
290 /// Number of nested loop to be consumed by the last surrounding
291 /// loop-associated directive.
292 int ExpectedOMPLoopDepth
= 0;
294 // CodeGen lambda for loops and support for ordered clause
295 typedef llvm::function_ref
<void(CodeGenFunction
&, const OMPLoopDirective
&,
298 typedef llvm::function_ref
<void(CodeGenFunction
&, SourceLocation
,
299 const unsigned, const bool)>
302 // Codegen lambda for loop bounds in worksharing loop constructs
303 typedef llvm::function_ref
<std::pair
<LValue
, LValue
>(
304 CodeGenFunction
&, const OMPExecutableDirective
&S
)>
307 // Codegen lambda for loop bounds in dispatch-based loop implementation
308 typedef llvm::function_ref
<std::pair
<llvm::Value
*, llvm::Value
*>(
309 CodeGenFunction
&, const OMPExecutableDirective
&S
, Address LB
,
311 CodeGenDispatchBoundsTy
;
313 /// CGBuilder insert helper. This function is called after an
314 /// instruction is created using Builder.
315 void InsertHelper(llvm::Instruction
*I
, const llvm::Twine
&Name
,
316 llvm::BasicBlock
*BB
,
317 llvm::BasicBlock::iterator InsertPt
) const;
319 /// CurFuncDecl - Holds the Decl for the current outermost
320 /// non-closure context.
321 const Decl
*CurFuncDecl
;
322 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
323 const Decl
*CurCodeDecl
;
324 const CGFunctionInfo
*CurFnInfo
;
326 llvm::Function
*CurFn
= nullptr;
328 /// Save Parameter Decl for coroutine.
329 llvm::SmallVector
<const ParmVarDecl
*, 4> FnArgs
;
331 // Holds coroutine data if the current function is a coroutine. We use a
332 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
335 std::unique_ptr
<CGCoroData
> Data
;
336 bool InSuspendBlock
= false;
342 bool isCoroutine() const {
343 return CurCoro
.Data
!= nullptr;
346 bool inSuspendBlock() const {
347 return isCoroutine() && CurCoro
.InSuspendBlock
;
350 /// CurGD - The GlobalDecl for the current function being compiled.
353 /// PrologueCleanupDepth - The cleanup depth enclosing all the
354 /// cleanups associated with the parameters.
355 EHScopeStack::stable_iterator PrologueCleanupDepth
;
357 /// ReturnBlock - Unified return block.
358 JumpDest ReturnBlock
;
360 /// ReturnValue - The temporary alloca to hold the return
361 /// value. This is invalid iff the function has no return value.
362 Address ReturnValue
= Address::invalid();
364 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
365 /// This is invalid if sret is not in use.
366 Address ReturnValuePointer
= Address::invalid();
368 /// If a return statement is being visited, this holds the return statment's
369 /// result expression.
370 const Expr
*RetExpr
= nullptr;
372 /// Return true if a label was seen in the current scope.
373 bool hasLabelBeenSeenInCurrentScope() const {
375 return CurLexicalScope
->hasLabels();
376 return !LabelMap
.empty();
379 /// AllocaInsertPoint - This is an instruction in the entry block before which
380 /// we prefer to insert allocas.
381 llvm::AssertingVH
<llvm::Instruction
> AllocaInsertPt
;
384 /// PostAllocaInsertPt - This is a place in the prologue where code can be
385 /// inserted that will be dominated by all the static allocas. This helps
386 /// achieve two things:
387 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
388 /// 2. All other prologue code (which are dominated by static allocas) do
389 /// appear in the source order immediately after all static allocas.
391 /// PostAllocaInsertPt will be lazily created when it is *really* required.
392 llvm::AssertingVH
<llvm::Instruction
> PostAllocaInsertPt
= nullptr;
395 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
396 /// immediately after AllocaInsertPt.
397 llvm::Instruction
*getPostAllocaInsertPoint() {
398 if (!PostAllocaInsertPt
) {
399 assert(AllocaInsertPt
&&
400 "Expected static alloca insertion point at function prologue");
401 assert(AllocaInsertPt
->getParent()->isEntryBlock() &&
402 "EBB should be entry block of the current code gen function");
403 PostAllocaInsertPt
= AllocaInsertPt
->clone();
404 PostAllocaInsertPt
->setName("postallocapt");
405 PostAllocaInsertPt
->insertAfter(AllocaInsertPt
);
408 return PostAllocaInsertPt
;
411 /// API for captured statement code generation.
412 class CGCapturedStmtInfo
{
414 explicit CGCapturedStmtInfo(CapturedRegionKind K
= CR_Default
)
415 : Kind(K
), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
416 explicit CGCapturedStmtInfo(const CapturedStmt
&S
,
417 CapturedRegionKind K
= CR_Default
)
418 : Kind(K
), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
420 RecordDecl::field_iterator Field
=
421 S
.getCapturedRecordDecl()->field_begin();
422 for (CapturedStmt::const_capture_iterator I
= S
.capture_begin(),
424 I
!= E
; ++I
, ++Field
) {
425 if (I
->capturesThis())
426 CXXThisFieldDecl
= *Field
;
427 else if (I
->capturesVariable())
428 CaptureFields
[I
->getCapturedVar()->getCanonicalDecl()] = *Field
;
429 else if (I
->capturesVariableByCopy())
430 CaptureFields
[I
->getCapturedVar()->getCanonicalDecl()] = *Field
;
434 virtual ~CGCapturedStmtInfo();
436 CapturedRegionKind
getKind() const { return Kind
; }
438 virtual void setContextValue(llvm::Value
*V
) { ThisValue
= V
; }
439 // Retrieve the value of the context parameter.
440 virtual llvm::Value
*getContextValue() const { return ThisValue
; }
442 /// Lookup the captured field decl for a variable.
443 virtual const FieldDecl
*lookup(const VarDecl
*VD
) const {
444 return CaptureFields
.lookup(VD
->getCanonicalDecl());
447 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
448 virtual FieldDecl
*getThisFieldDecl() const { return CXXThisFieldDecl
; }
450 static bool classof(const CGCapturedStmtInfo
*) {
454 /// Emit the captured statement body.
455 virtual void EmitBody(CodeGenFunction
&CGF
, const Stmt
*S
) {
456 CGF
.incrementProfileCounter(S
);
460 /// Get the name of the capture helper.
461 virtual StringRef
getHelperName() const { return "__captured_stmt"; }
463 /// Get the CaptureFields
464 llvm::SmallDenseMap
<const VarDecl
*, FieldDecl
*> getCaptureFields() {
465 return CaptureFields
;
469 /// The kind of captured statement being generated.
470 CapturedRegionKind Kind
;
472 /// Keep the map between VarDecl and FieldDecl.
473 llvm::SmallDenseMap
<const VarDecl
*, FieldDecl
*> CaptureFields
;
475 /// The base address of the captured record, passed in as the first
476 /// argument of the parallel region function.
477 llvm::Value
*ThisValue
;
479 /// Captured 'this' type.
480 FieldDecl
*CXXThisFieldDecl
;
482 CGCapturedStmtInfo
*CapturedStmtInfo
= nullptr;
484 /// RAII for correct setting/restoring of CapturedStmtInfo.
485 class CGCapturedStmtRAII
{
487 CodeGenFunction
&CGF
;
488 CGCapturedStmtInfo
*PrevCapturedStmtInfo
;
490 CGCapturedStmtRAII(CodeGenFunction
&CGF
,
491 CGCapturedStmtInfo
*NewCapturedStmtInfo
)
492 : CGF(CGF
), PrevCapturedStmtInfo(CGF
.CapturedStmtInfo
) {
493 CGF
.CapturedStmtInfo
= NewCapturedStmtInfo
;
495 ~CGCapturedStmtRAII() { CGF
.CapturedStmtInfo
= PrevCapturedStmtInfo
; }
498 /// An abstract representation of regular/ObjC call/message targets.
499 class AbstractCallee
{
500 /// The function declaration of the callee.
501 const Decl
*CalleeDecl
;
504 AbstractCallee() : CalleeDecl(nullptr) {}
505 AbstractCallee(const FunctionDecl
*FD
) : CalleeDecl(FD
) {}
506 AbstractCallee(const ObjCMethodDecl
*OMD
) : CalleeDecl(OMD
) {}
507 bool hasFunctionDecl() const {
508 return isa_and_nonnull
<FunctionDecl
>(CalleeDecl
);
510 const Decl
*getDecl() const { return CalleeDecl
; }
511 unsigned getNumParams() const {
512 if (const auto *FD
= dyn_cast
<FunctionDecl
>(CalleeDecl
))
513 return FD
->getNumParams();
514 return cast
<ObjCMethodDecl
>(CalleeDecl
)->param_size();
516 const ParmVarDecl
*getParamDecl(unsigned I
) const {
517 if (const auto *FD
= dyn_cast
<FunctionDecl
>(CalleeDecl
))
518 return FD
->getParamDecl(I
);
519 return *(cast
<ObjCMethodDecl
>(CalleeDecl
)->param_begin() + I
);
523 /// Sanitizers enabled for this function.
524 SanitizerSet SanOpts
;
526 /// True if CodeGen currently emits code implementing sanitizer checks.
527 bool IsSanitizerScope
= false;
529 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
530 class SanitizerScope
{
531 CodeGenFunction
*CGF
;
533 SanitizerScope(CodeGenFunction
*CGF
);
537 /// In C++, whether we are code generating a thunk. This controls whether we
538 /// should emit cleanups.
539 bool CurFuncIsThunk
= false;
541 /// In ARC, whether we should autorelease the return value.
542 bool AutoreleaseResult
= false;
544 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
545 /// potentially set the return value.
546 bool SawAsmBlock
= false;
548 GlobalDecl CurSEHParent
;
550 /// True if the current function is an outlined SEH helper. This can be a
551 /// finally block or filter expression.
552 bool IsOutlinedSEHHelper
= false;
554 /// True if CodeGen currently emits code inside presereved access index
556 bool IsInPreservedAIRegion
= false;
558 /// True if the current statement has nomerge attribute.
559 bool InNoMergeAttributedStmt
= false;
561 /// True if the current statement has noinline attribute.
562 bool InNoInlineAttributedStmt
= false;
564 /// True if the current statement has always_inline attribute.
565 bool InAlwaysInlineAttributedStmt
= false;
567 // The CallExpr within the current statement that the musttail attribute
568 // applies to. nullptr if there is no 'musttail' on the current statement.
569 const CallExpr
*MustTailCall
= nullptr;
571 /// Returns true if a function must make progress, which means the
572 /// mustprogress attribute can be added.
573 bool checkIfFunctionMustProgress() {
574 if (CGM
.getCodeGenOpts().getFiniteLoops() ==
575 CodeGenOptions::FiniteLoopsKind::Never
)
578 // C++11 and later guarantees that a thread eventually will do one of the
579 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
581 // - make a call to a library I/O function,
582 // - perform an access through a volatile glvalue, or
583 // - perform a synchronization operation or an atomic operation.
585 // Hence each function is 'mustprogress' in C++11 or later.
586 return getLangOpts().CPlusPlus11
;
589 /// Returns true if a loop must make progress, which means the mustprogress
590 /// attribute can be added. \p HasConstantCond indicates whether the branch
591 /// condition is a known constant.
592 bool checkIfLoopMustProgress(bool HasConstantCond
) {
593 if (CGM
.getCodeGenOpts().getFiniteLoops() ==
594 CodeGenOptions::FiniteLoopsKind::Always
)
596 if (CGM
.getCodeGenOpts().getFiniteLoops() ==
597 CodeGenOptions::FiniteLoopsKind::Never
)
600 // If the containing function must make progress, loops also must make
601 // progress (as in C++11 and later).
602 if (checkIfFunctionMustProgress())
605 // Now apply rules for plain C (see 6.8.5.6 in C11).
606 // Loops with constant conditions do not have to make progress in any C
611 // Loops with non-constant conditions must make progress in C11 and later.
612 return getLangOpts().C11
;
615 const CodeGen::CGBlockInfo
*BlockInfo
= nullptr;
616 llvm::Value
*BlockPointer
= nullptr;
618 llvm::DenseMap
<const ValueDecl
*, FieldDecl
*> LambdaCaptureFields
;
619 FieldDecl
*LambdaThisCaptureField
= nullptr;
621 /// A mapping from NRVO variables to the flags used to indicate
622 /// when the NRVO has been applied to this variable.
623 llvm::DenseMap
<const VarDecl
*, llvm::Value
*> NRVOFlags
;
625 EHScopeStack EHStack
;
626 llvm::SmallVector
<char, 256> LifetimeExtendedCleanupStack
;
627 llvm::SmallVector
<const JumpDest
*, 2> SEHTryEpilogueStack
;
629 llvm::Instruction
*CurrentFuncletPad
= nullptr;
631 class CallLifetimeEnd final
: public EHScopeStack::Cleanup
{
632 bool isRedundantBeforeReturn() override
{ return true; }
638 CallLifetimeEnd(Address addr
, llvm::Value
*size
)
639 : Addr(addr
.getPointer()), Size(size
) {}
641 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
642 CGF
.EmitLifetimeEnd(Size
, Addr
);
646 /// Header for data within LifetimeExtendedCleanupStack.
647 struct LifetimeExtendedCleanupHeader
{
648 /// The size of the following cleanup object.
650 /// The kind of cleanup to push: a value from the CleanupKind enumeration.
652 /// Whether this is a conditional cleanup.
653 unsigned IsConditional
: 1;
655 size_t getSize() const { return Size
; }
656 CleanupKind
getKind() const { return (CleanupKind
)Kind
; }
657 bool isConditional() const { return IsConditional
; }
660 /// i32s containing the indexes of the cleanup destinations.
661 Address NormalCleanupDest
= Address::invalid();
663 unsigned NextCleanupDestIndex
= 1;
665 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
666 llvm::BasicBlock
*EHResumeBlock
= nullptr;
668 /// The exception slot. All landing pads write the current exception pointer
669 /// into this alloca.
670 llvm::Value
*ExceptionSlot
= nullptr;
672 /// The selector slot. Under the MandatoryCleanup model, all landing pads
673 /// write the current selector value into this alloca.
674 llvm::AllocaInst
*EHSelectorSlot
= nullptr;
676 /// A stack of exception code slots. Entering an __except block pushes a slot
677 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
678 /// a value from the top of the stack.
679 SmallVector
<Address
, 1> SEHCodeSlotStack
;
681 /// Value returned by __exception_info intrinsic.
682 llvm::Value
*SEHInfo
= nullptr;
684 /// Emits a landing pad for the current EH stack.
685 llvm::BasicBlock
*EmitLandingPad();
687 llvm::BasicBlock
*getInvokeDestImpl();
689 /// Parent loop-based directive for scan directive.
690 const OMPExecutableDirective
*OMPParentLoopDirectiveForScan
= nullptr;
691 llvm::BasicBlock
*OMPBeforeScanBlock
= nullptr;
692 llvm::BasicBlock
*OMPAfterScanBlock
= nullptr;
693 llvm::BasicBlock
*OMPScanExitBlock
= nullptr;
694 llvm::BasicBlock
*OMPScanDispatch
= nullptr;
695 bool OMPFirstScanLoop
= false;
697 /// Manages parent directive for scan directives.
698 class ParentLoopDirectiveForScanRegion
{
699 CodeGenFunction
&CGF
;
700 const OMPExecutableDirective
*ParentLoopDirectiveForScan
;
703 ParentLoopDirectiveForScanRegion(
704 CodeGenFunction
&CGF
,
705 const OMPExecutableDirective
&ParentLoopDirectiveForScan
)
707 ParentLoopDirectiveForScan(CGF
.OMPParentLoopDirectiveForScan
) {
708 CGF
.OMPParentLoopDirectiveForScan
= &ParentLoopDirectiveForScan
;
710 ~ParentLoopDirectiveForScanRegion() {
711 CGF
.OMPParentLoopDirectiveForScan
= ParentLoopDirectiveForScan
;
716 typename DominatingValue
<T
>::saved_type
saveValueInCond(T value
) {
717 return DominatingValue
<T
>::save(*this, value
);
720 class CGFPOptionsRAII
{
722 CGFPOptionsRAII(CodeGenFunction
&CGF
, FPOptions FPFeatures
);
723 CGFPOptionsRAII(CodeGenFunction
&CGF
, const Expr
*E
);
727 void ConstructorHelper(FPOptions FPFeatures
);
728 CodeGenFunction
&CGF
;
729 FPOptions OldFPFeatures
;
730 llvm::fp::ExceptionBehavior OldExcept
;
731 llvm::RoundingMode OldRounding
;
732 std::optional
<CGBuilderTy::FastMathFlagGuard
> FMFGuard
;
734 FPOptions CurFPFeatures
;
737 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
739 SmallVector
<llvm::Value
*, 8> ObjCEHValueStack
;
741 /// A class controlling the emission of a finally block.
743 /// Where the catchall's edge through the cleanup should go.
744 JumpDest RethrowDest
;
746 /// A function to call to enter the catch.
747 llvm::FunctionCallee BeginCatchFn
;
749 /// An i1 variable indicating whether or not the @finally is
750 /// running for an exception.
751 llvm::AllocaInst
*ForEHVar
;
753 /// An i8* variable into which the exception pointer to rethrow
755 llvm::AllocaInst
*SavedExnVar
;
758 void enter(CodeGenFunction
&CGF
, const Stmt
*Finally
,
759 llvm::FunctionCallee beginCatchFn
,
760 llvm::FunctionCallee endCatchFn
, llvm::FunctionCallee rethrowFn
);
761 void exit(CodeGenFunction
&CGF
);
764 /// Returns true inside SEH __try blocks.
765 bool isSEHTryScope() const { return !SEHTryEpilogueStack
.empty(); }
767 /// Returns true while emitting a cleanuppad.
768 bool isCleanupPadScope() const {
769 return CurrentFuncletPad
&& isa
<llvm::CleanupPadInst
>(CurrentFuncletPad
);
772 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
773 /// current full-expression. Safe against the possibility that
774 /// we're currently inside a conditionally-evaluated expression.
775 template <class T
, class... As
>
776 void pushFullExprCleanup(CleanupKind kind
, As
... A
) {
777 // If we're not in a conditional branch, or if none of the
778 // arguments requires saving, then use the unconditional cleanup.
779 if (!isInConditionalBranch())
780 return EHStack
.pushCleanup
<T
>(kind
, A
...);
782 // Stash values in a tuple so we can guarantee the order of saves.
783 typedef std::tuple
<typename DominatingValue
<As
>::saved_type
...> SavedTuple
;
784 SavedTuple Saved
{saveValueInCond(A
)...};
786 typedef EHScopeStack::ConditionalCleanup
<T
, As
...> CleanupType
;
787 EHStack
.pushCleanupTuple
<CleanupType
>(kind
, Saved
);
788 initFullExprCleanup();
791 /// Queue a cleanup to be pushed after finishing the current full-expression,
792 /// potentially with an active flag.
793 template <class T
, class... As
>
794 void pushCleanupAfterFullExpr(CleanupKind Kind
, As
... A
) {
795 if (!isInConditionalBranch())
796 return pushCleanupAfterFullExprWithActiveFlag
<T
>(Kind
, Address::invalid(),
799 Address ActiveFlag
= createCleanupActiveFlag();
800 assert(!DominatingValue
<Address
>::needsSaving(ActiveFlag
) &&
801 "cleanup active flag should never need saving");
803 typedef std::tuple
<typename DominatingValue
<As
>::saved_type
...> SavedTuple
;
804 SavedTuple Saved
{saveValueInCond(A
)...};
806 typedef EHScopeStack::ConditionalCleanup
<T
, As
...> CleanupType
;
807 pushCleanupAfterFullExprWithActiveFlag
<CleanupType
>(Kind
, ActiveFlag
, Saved
);
810 template <class T
, class... As
>
811 void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind
,
812 Address ActiveFlag
, As
... A
) {
813 LifetimeExtendedCleanupHeader Header
= {sizeof(T
), Kind
,
814 ActiveFlag
.isValid()};
816 size_t OldSize
= LifetimeExtendedCleanupStack
.size();
817 LifetimeExtendedCleanupStack
.resize(
818 LifetimeExtendedCleanupStack
.size() + sizeof(Header
) + Header
.Size
+
819 (Header
.IsConditional
? sizeof(ActiveFlag
) : 0));
821 static_assert(sizeof(Header
) % alignof(T
) == 0,
822 "Cleanup will be allocated on misaligned address");
823 char *Buffer
= &LifetimeExtendedCleanupStack
[OldSize
];
824 new (Buffer
) LifetimeExtendedCleanupHeader(Header
);
825 new (Buffer
+ sizeof(Header
)) T(A
...);
826 if (Header
.IsConditional
)
827 new (Buffer
+ sizeof(Header
) + sizeof(T
)) Address(ActiveFlag
);
830 /// Set up the last cleanup that was pushed as a conditional
831 /// full-expression cleanup.
832 void initFullExprCleanup() {
833 initFullExprCleanupWithFlag(createCleanupActiveFlag());
836 void initFullExprCleanupWithFlag(Address ActiveFlag
);
837 Address
createCleanupActiveFlag();
839 /// PushDestructorCleanup - Push a cleanup to call the
840 /// complete-object destructor of an object of the given type at the
841 /// given address. Does nothing if T is not a C++ class type with a
842 /// non-trivial destructor.
843 void PushDestructorCleanup(QualType T
, Address Addr
);
845 /// PushDestructorCleanup - Push a cleanup to call the
846 /// complete-object variant of the given destructor on the object at
847 /// the given address.
848 void PushDestructorCleanup(const CXXDestructorDecl
*Dtor
, QualType T
,
851 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
852 /// process all branch fixups.
853 void PopCleanupBlock(bool FallThroughIsBranchThrough
= false);
855 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
856 /// The block cannot be reactivated. Pops it if it's the top of the
859 /// \param DominatingIP - An instruction which is known to
860 /// dominate the current IP (if set) and which lies along
861 /// all paths of execution between the current IP and the
862 /// the point at which the cleanup comes into scope.
863 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup
,
864 llvm::Instruction
*DominatingIP
);
866 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
867 /// Cannot be used to resurrect a deactivated cleanup.
869 /// \param DominatingIP - An instruction which is known to
870 /// dominate the current IP (if set) and which lies along
871 /// all paths of execution between the current IP and the
872 /// the point at which the cleanup comes into scope.
873 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup
,
874 llvm::Instruction
*DominatingIP
);
876 /// Enters a new scope for capturing cleanups, all of which
877 /// will be executed once the scope is exited.
878 class RunCleanupsScope
{
879 EHScopeStack::stable_iterator CleanupStackDepth
, OldCleanupScopeDepth
;
880 size_t LifetimeExtendedCleanupStackSize
;
881 bool OldDidCallStackSave
;
886 RunCleanupsScope(const RunCleanupsScope
&) = delete;
887 void operator=(const RunCleanupsScope
&) = delete;
890 CodeGenFunction
& CGF
;
893 /// Enter a new cleanup scope.
894 explicit RunCleanupsScope(CodeGenFunction
&CGF
)
895 : PerformCleanup(true), CGF(CGF
)
897 CleanupStackDepth
= CGF
.EHStack
.stable_begin();
898 LifetimeExtendedCleanupStackSize
=
899 CGF
.LifetimeExtendedCleanupStack
.size();
900 OldDidCallStackSave
= CGF
.DidCallStackSave
;
901 CGF
.DidCallStackSave
= false;
902 OldCleanupScopeDepth
= CGF
.CurrentCleanupScopeDepth
;
903 CGF
.CurrentCleanupScopeDepth
= CleanupStackDepth
;
906 /// Exit this cleanup scope, emitting any accumulated cleanups.
907 ~RunCleanupsScope() {
912 /// Determine whether this scope requires any cleanups.
913 bool requiresCleanups() const {
914 return CGF
.EHStack
.stable_begin() != CleanupStackDepth
;
917 /// Force the emission of cleanups now, instead of waiting
918 /// until this object is destroyed.
919 /// \param ValuesToReload - A list of values that need to be available at
920 /// the insertion point after cleanup emission. If cleanup emission created
921 /// a shared cleanup block, these value pointers will be rewritten.
922 /// Otherwise, they not will be modified.
923 void ForceCleanup(std::initializer_list
<llvm::Value
**> ValuesToReload
= {}) {
924 assert(PerformCleanup
&& "Already forced cleanup");
925 CGF
.DidCallStackSave
= OldDidCallStackSave
;
926 CGF
.PopCleanupBlocks(CleanupStackDepth
, LifetimeExtendedCleanupStackSize
,
928 PerformCleanup
= false;
929 CGF
.CurrentCleanupScopeDepth
= OldCleanupScopeDepth
;
933 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
934 EHScopeStack::stable_iterator CurrentCleanupScopeDepth
=
935 EHScopeStack::stable_end();
937 class LexicalScope
: public RunCleanupsScope
{
939 SmallVector
<const LabelDecl
*, 4> Labels
;
940 LexicalScope
*ParentScope
;
942 LexicalScope(const LexicalScope
&) = delete;
943 void operator=(const LexicalScope
&) = delete;
946 /// Enter a new cleanup scope.
947 explicit LexicalScope(CodeGenFunction
&CGF
, SourceRange Range
)
948 : RunCleanupsScope(CGF
), Range(Range
), ParentScope(CGF
.CurLexicalScope
) {
949 CGF
.CurLexicalScope
= this;
950 if (CGDebugInfo
*DI
= CGF
.getDebugInfo())
951 DI
->EmitLexicalBlockStart(CGF
.Builder
, Range
.getBegin());
954 void addLabel(const LabelDecl
*label
) {
955 assert(PerformCleanup
&& "adding label to dead scope?");
956 Labels
.push_back(label
);
959 /// Exit this cleanup scope, emitting any accumulated
962 if (CGDebugInfo
*DI
= CGF
.getDebugInfo())
963 DI
->EmitLexicalBlockEnd(CGF
.Builder
, Range
.getEnd());
965 // If we should perform a cleanup, force them now. Note that
966 // this ends the cleanup scope before rescoping any labels.
967 if (PerformCleanup
) {
968 ApplyDebugLocation
DL(CGF
, Range
.getEnd());
973 /// Force the emission of cleanups now, instead of waiting
974 /// until this object is destroyed.
975 void ForceCleanup() {
976 CGF
.CurLexicalScope
= ParentScope
;
977 RunCleanupsScope::ForceCleanup();
983 bool hasLabels() const {
984 return !Labels
.empty();
987 void rescopeLabels();
990 typedef llvm::DenseMap
<const Decl
*, Address
> DeclMapTy
;
992 /// The class used to assign some variables some temporarily addresses.
994 DeclMapTy SavedLocals
;
995 DeclMapTy SavedTempAddresses
;
996 OMPMapVars(const OMPMapVars
&) = delete;
997 void operator=(const OMPMapVars
&) = delete;
1000 explicit OMPMapVars() = default;
1002 assert(SavedLocals
.empty() && "Did not restored original addresses.");
1005 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1006 /// function \p CGF.
1007 /// \return true if at least one variable was set already, false otherwise.
1008 bool setVarAddr(CodeGenFunction
&CGF
, const VarDecl
*LocalVD
,
1010 LocalVD
= LocalVD
->getCanonicalDecl();
1011 // Only save it once.
1012 if (SavedLocals
.count(LocalVD
)) return false;
1014 // Copy the existing local entry to SavedLocals.
1015 auto it
= CGF
.LocalDeclMap
.find(LocalVD
);
1016 if (it
!= CGF
.LocalDeclMap
.end())
1017 SavedLocals
.try_emplace(LocalVD
, it
->second
);
1019 SavedLocals
.try_emplace(LocalVD
, Address::invalid());
1021 // Generate the private entry.
1022 QualType VarTy
= LocalVD
->getType();
1023 if (VarTy
->isReferenceType()) {
1024 Address Temp
= CGF
.CreateMemTemp(VarTy
);
1025 CGF
.Builder
.CreateStore(TempAddr
.getPointer(), Temp
);
1028 SavedTempAddresses
.try_emplace(LocalVD
, TempAddr
);
1033 /// Applies new addresses to the list of the variables.
1034 /// \return true if at least one variable is using new address, false
1036 bool apply(CodeGenFunction
&CGF
) {
1037 copyInto(SavedTempAddresses
, CGF
.LocalDeclMap
);
1038 SavedTempAddresses
.clear();
1039 return !SavedLocals
.empty();
1042 /// Restores original addresses of the variables.
1043 void restore(CodeGenFunction
&CGF
) {
1044 if (!SavedLocals
.empty()) {
1045 copyInto(SavedLocals
, CGF
.LocalDeclMap
);
1046 SavedLocals
.clear();
1051 /// Copy all the entries in the source map over the corresponding
1052 /// entries in the destination, which must exist.
1053 static void copyInto(const DeclMapTy
&Src
, DeclMapTy
&Dest
) {
1054 for (auto &Pair
: Src
) {
1055 if (!Pair
.second
.isValid()) {
1056 Dest
.erase(Pair
.first
);
1060 auto I
= Dest
.find(Pair
.first
);
1061 if (I
!= Dest
.end())
1062 I
->second
= Pair
.second
;
1069 /// The scope used to remap some variables as private in the OpenMP loop body
1070 /// (or other captured region emitted without outlining), and to restore old
1071 /// vars back on exit.
1072 class OMPPrivateScope
: public RunCleanupsScope
{
1073 OMPMapVars MappedVars
;
1074 OMPPrivateScope(const OMPPrivateScope
&) = delete;
1075 void operator=(const OMPPrivateScope
&) = delete;
1078 /// Enter a new OpenMP private scope.
1079 explicit OMPPrivateScope(CodeGenFunction
&CGF
) : RunCleanupsScope(CGF
) {}
1081 /// Registers \p LocalVD variable as a private with \p Addr as the address
1082 /// of the corresponding private variable. \p
1083 /// PrivateGen is the address of the generated private variable.
1084 /// \return true if the variable is registered as private, false if it has
1085 /// been privatized already.
1086 bool addPrivate(const VarDecl
*LocalVD
, Address Addr
) {
1087 assert(PerformCleanup
&& "adding private to dead scope");
1088 return MappedVars
.setVarAddr(CGF
, LocalVD
, Addr
);
1091 /// Privatizes local variables previously registered as private.
1092 /// Registration is separate from the actual privatization to allow
1093 /// initializers use values of the original variables, not the private one.
1094 /// This is important, for example, if the private variable is a class
1095 /// variable initialized by a constructor that references other private
1096 /// variables. But at initialization original variables must be used, not
1098 /// \return true if at least one variable was privatized, false otherwise.
1099 bool Privatize() { return MappedVars
.apply(CGF
); }
1101 void ForceCleanup() {
1102 RunCleanupsScope::ForceCleanup();
1106 /// Exit scope - all the mapped variables are restored.
1107 ~OMPPrivateScope() {
1112 /// Checks if the global variable is captured in current function.
1113 bool isGlobalVarCaptured(const VarDecl
*VD
) const {
1114 VD
= VD
->getCanonicalDecl();
1115 return !VD
->isLocalVarDeclOrParm() && CGF
.LocalDeclMap
.count(VD
) > 0;
1118 /// Restore all mapped variables w/o clean up. This is usefully when we want
1119 /// to reference the original variables but don't want the clean up because
1120 /// that could emit lifetime end too early, causing backend issue #56913.
1121 void restoreMap() { MappedVars
.restore(CGF
); }
1124 /// Save/restore original map of previously emitted local vars in case when we
1125 /// need to duplicate emission of the same code several times in the same
1126 /// function for OpenMP code.
1127 class OMPLocalDeclMapRAII
{
1128 CodeGenFunction
&CGF
;
1132 OMPLocalDeclMapRAII(CodeGenFunction
&CGF
)
1133 : CGF(CGF
), SavedMap(CGF
.LocalDeclMap
) {}
1134 ~OMPLocalDeclMapRAII() { SavedMap
.swap(CGF
.LocalDeclMap
); }
1137 /// Takes the old cleanup stack size and emits the cleanup blocks
1138 /// that have been added.
1140 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize
,
1141 std::initializer_list
<llvm::Value
**> ValuesToReload
= {});
1143 /// Takes the old cleanup stack size and emits the cleanup blocks
1144 /// that have been added, then adds all lifetime-extended cleanups from
1145 /// the given position to the stack.
1147 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize
,
1148 size_t OldLifetimeExtendedStackSize
,
1149 std::initializer_list
<llvm::Value
**> ValuesToReload
= {});
1151 void ResolveBranchFixups(llvm::BasicBlock
*Target
);
1153 /// The given basic block lies in the current EH scope, but may be a
1154 /// target of a potentially scope-crossing jump; get a stable handle
1155 /// to which we can perform this jump later.
1156 JumpDest
getJumpDestInCurrentScope(llvm::BasicBlock
*Target
) {
1157 return JumpDest(Target
,
1158 EHStack
.getInnermostNormalCleanup(),
1159 NextCleanupDestIndex
++);
1162 /// The given basic block lies in the current EH scope, but may be a
1163 /// target of a potentially scope-crossing jump; get a stable handle
1164 /// to which we can perform this jump later.
1165 JumpDest
getJumpDestInCurrentScope(StringRef Name
= StringRef()) {
1166 return getJumpDestInCurrentScope(createBasicBlock(Name
));
1169 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1170 /// block through the normal cleanup handling code (if any) and then
1171 /// on to \arg Dest.
1172 void EmitBranchThroughCleanup(JumpDest Dest
);
1174 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1175 /// specified destination obviously has no cleanups to run. 'false' is always
1176 /// a conservatively correct answer for this method.
1177 bool isObviouslyBranchWithoutCleanups(JumpDest Dest
) const;
1179 /// popCatchScope - Pops the catch scope at the top of the EHScope
1180 /// stack, emitting any required code (other than the catch handlers
1182 void popCatchScope();
1184 llvm::BasicBlock
*getEHResumeBlock(bool isCleanup
);
1185 llvm::BasicBlock
*getEHDispatchBlock(EHScopeStack::stable_iterator scope
);
1187 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope
);
1189 /// An object to manage conditionally-evaluated expressions.
1190 class ConditionalEvaluation
{
1191 llvm::BasicBlock
*StartBB
;
1194 ConditionalEvaluation(CodeGenFunction
&CGF
)
1195 : StartBB(CGF
.Builder
.GetInsertBlock()) {}
1197 void begin(CodeGenFunction
&CGF
) {
1198 assert(CGF
.OutermostConditional
!= this);
1199 if (!CGF
.OutermostConditional
)
1200 CGF
.OutermostConditional
= this;
1203 void end(CodeGenFunction
&CGF
) {
1204 assert(CGF
.OutermostConditional
!= nullptr);
1205 if (CGF
.OutermostConditional
== this)
1206 CGF
.OutermostConditional
= nullptr;
1209 /// Returns a block which will be executed prior to each
1210 /// evaluation of the conditional code.
1211 llvm::BasicBlock
*getStartingBlock() const {
1216 /// isInConditionalBranch - Return true if we're currently emitting
1217 /// one branch or the other of a conditional expression.
1218 bool isInConditionalBranch() const { return OutermostConditional
!= nullptr; }
1220 void setBeforeOutermostConditional(llvm::Value
*value
, Address addr
) {
1221 assert(isInConditionalBranch());
1222 llvm::BasicBlock
*block
= OutermostConditional
->getStartingBlock();
1223 auto store
= new llvm::StoreInst(value
, addr
.getPointer(), &block
->back());
1224 store
->setAlignment(addr
.getAlignment().getAsAlign());
1227 /// An RAII object to record that we're evaluating a statement
1229 class StmtExprEvaluation
{
1230 CodeGenFunction
&CGF
;
1232 /// We have to save the outermost conditional: cleanups in a
1233 /// statement expression aren't conditional just because the
1235 ConditionalEvaluation
*SavedOutermostConditional
;
1238 StmtExprEvaluation(CodeGenFunction
&CGF
)
1239 : CGF(CGF
), SavedOutermostConditional(CGF
.OutermostConditional
) {
1240 CGF
.OutermostConditional
= nullptr;
1243 ~StmtExprEvaluation() {
1244 CGF
.OutermostConditional
= SavedOutermostConditional
;
1245 CGF
.EnsureInsertPoint();
1249 /// An object which temporarily prevents a value from being
1250 /// destroyed by aggressive peephole optimizations that assume that
1251 /// all uses of a value have been realized in the IR.
1252 class PeepholeProtection
{
1253 llvm::Instruction
*Inst
;
1254 friend class CodeGenFunction
;
1257 PeepholeProtection() : Inst(nullptr) {}
1260 /// A non-RAII class containing all the information about a bound
1261 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1262 /// this which makes individual mappings very simple; using this
1263 /// class directly is useful when you have a variable number of
1264 /// opaque values or don't want the RAII functionality for some
1266 class OpaqueValueMappingData
{
1267 const OpaqueValueExpr
*OpaqueValue
;
1269 CodeGenFunction::PeepholeProtection Protection
;
1271 OpaqueValueMappingData(const OpaqueValueExpr
*ov
,
1273 : OpaqueValue(ov
), BoundLValue(boundLValue
) {}
1275 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1277 static bool shouldBindAsLValue(const Expr
*expr
) {
1278 // gl-values should be bound as l-values for obvious reasons.
1279 // Records should be bound as l-values because IR generation
1280 // always keeps them in memory. Expressions of function type
1281 // act exactly like l-values but are formally required to be
1283 return expr
->isGLValue() ||
1284 expr
->getType()->isFunctionType() ||
1285 hasAggregateEvaluationKind(expr
->getType());
1288 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1289 const OpaqueValueExpr
*ov
,
1291 if (shouldBindAsLValue(ov
))
1292 return bind(CGF
, ov
, CGF
.EmitLValue(e
));
1293 return bind(CGF
, ov
, CGF
.EmitAnyExpr(e
));
1296 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1297 const OpaqueValueExpr
*ov
,
1299 assert(shouldBindAsLValue(ov
));
1300 CGF
.OpaqueLValues
.insert(std::make_pair(ov
, lv
));
1301 return OpaqueValueMappingData(ov
, true);
1304 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1305 const OpaqueValueExpr
*ov
,
1307 assert(!shouldBindAsLValue(ov
));
1308 CGF
.OpaqueRValues
.insert(std::make_pair(ov
, rv
));
1310 OpaqueValueMappingData
data(ov
, false);
1312 // Work around an extremely aggressive peephole optimization in
1313 // EmitScalarConversion which assumes that all other uses of a
1314 // value are extant.
1315 data
.Protection
= CGF
.protectFromPeepholes(rv
);
1320 bool isValid() const { return OpaqueValue
!= nullptr; }
1321 void clear() { OpaqueValue
= nullptr; }
1323 void unbind(CodeGenFunction
&CGF
) {
1324 assert(OpaqueValue
&& "no data to unbind!");
1327 CGF
.OpaqueLValues
.erase(OpaqueValue
);
1329 CGF
.OpaqueRValues
.erase(OpaqueValue
);
1330 CGF
.unprotectFromPeepholes(Protection
);
1335 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1336 class OpaqueValueMapping
{
1337 CodeGenFunction
&CGF
;
1338 OpaqueValueMappingData Data
;
1341 static bool shouldBindAsLValue(const Expr
*expr
) {
1342 return OpaqueValueMappingData::shouldBindAsLValue(expr
);
1345 /// Build the opaque value mapping for the given conditional
1346 /// operator if it's the GNU ?: extension. This is a common
1347 /// enough pattern that the convenience operator is really
1350 OpaqueValueMapping(CodeGenFunction
&CGF
,
1351 const AbstractConditionalOperator
*op
) : CGF(CGF
) {
1352 if (isa
<ConditionalOperator
>(op
))
1353 // Leave Data empty.
1356 const BinaryConditionalOperator
*e
= cast
<BinaryConditionalOperator
>(op
);
1357 Data
= OpaqueValueMappingData::bind(CGF
, e
->getOpaqueValue(),
1361 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1362 /// expression is set to the expression the OVE represents.
1363 OpaqueValueMapping(CodeGenFunction
&CGF
, const OpaqueValueExpr
*OV
)
1366 assert(OV
->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1367 "for OVE with no source expression");
1368 Data
= OpaqueValueMappingData::bind(CGF
, OV
, OV
->getSourceExpr());
1372 OpaqueValueMapping(CodeGenFunction
&CGF
,
1373 const OpaqueValueExpr
*opaqueValue
,
1375 : CGF(CGF
), Data(OpaqueValueMappingData::bind(CGF
, opaqueValue
, lvalue
)) {
1378 OpaqueValueMapping(CodeGenFunction
&CGF
,
1379 const OpaqueValueExpr
*opaqueValue
,
1381 : CGF(CGF
), Data(OpaqueValueMappingData::bind(CGF
, opaqueValue
, rvalue
)) {
1389 ~OpaqueValueMapping() {
1390 if (Data
.isValid()) Data
.unbind(CGF
);
1395 CGDebugInfo
*DebugInfo
;
1396 /// Used to create unique names for artificial VLA size debug info variables.
1397 unsigned VLAExprCounter
= 0;
1398 bool DisableDebugInfo
= false;
1400 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1401 /// calling llvm.stacksave for multiple VLAs in the same scope.
1402 bool DidCallStackSave
= false;
1404 /// IndirectBranch - The first time an indirect goto is seen we create a block
1405 /// with an indirect branch. Every time we see the address of a label taken,
1406 /// we add the label to the indirect goto. Every subsequent indirect goto is
1407 /// codegen'd as a jump to the IndirectBranch's basic block.
1408 llvm::IndirectBrInst
*IndirectBranch
= nullptr;
1410 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1412 DeclMapTy LocalDeclMap
;
1414 // Keep track of the cleanups for callee-destructed parameters pushed to the
1415 // cleanup stack so that they can be deactivated later.
1416 llvm::DenseMap
<const ParmVarDecl
*, EHScopeStack::stable_iterator
>
1417 CalleeDestructedParamCleanups
;
1419 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1420 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1422 llvm::SmallDenseMap
<const ParmVarDecl
*, const ImplicitParamDecl
*, 2>
1425 /// Track escaped local variables with auto storage. Used during SEH
1426 /// outlining to produce a call to llvm.localescape.
1427 llvm::DenseMap
<llvm::AllocaInst
*, int> EscapedLocals
;
1429 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1430 llvm::DenseMap
<const LabelDecl
*, JumpDest
> LabelMap
;
1432 // BreakContinueStack - This keeps track of where break and continue
1433 // statements should jump to.
1434 struct BreakContinue
{
1435 BreakContinue(JumpDest Break
, JumpDest Continue
)
1436 : BreakBlock(Break
), ContinueBlock(Continue
) {}
1438 JumpDest BreakBlock
;
1439 JumpDest ContinueBlock
;
1441 SmallVector
<BreakContinue
, 8> BreakContinueStack
;
1443 /// Handles cancellation exit points in OpenMP-related constructs.
1444 class OpenMPCancelExitStack
{
1445 /// Tracks cancellation exit point and join point for cancel-related exit
1446 /// and normal exit.
1448 CancelExit() = default;
1449 CancelExit(OpenMPDirectiveKind Kind
, JumpDest ExitBlock
,
1451 : Kind(Kind
), ExitBlock(ExitBlock
), ContBlock(ContBlock
) {}
1452 OpenMPDirectiveKind Kind
= llvm::omp::OMPD_unknown
;
1453 /// true if the exit block has been emitted already by the special
1454 /// emitExit() call, false if the default codegen is used.
1455 bool HasBeenEmitted
= false;
1460 SmallVector
<CancelExit
, 8> Stack
;
1463 OpenMPCancelExitStack() : Stack(1) {}
1464 ~OpenMPCancelExitStack() = default;
1465 /// Fetches the exit block for the current OpenMP construct.
1466 JumpDest
getExitBlock() const { return Stack
.back().ExitBlock
; }
1467 /// Emits exit block with special codegen procedure specific for the related
1468 /// OpenMP construct + emits code for normal construct cleanup.
1469 void emitExit(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
,
1470 const llvm::function_ref
<void(CodeGenFunction
&)> CodeGen
) {
1471 if (Stack
.back().Kind
== Kind
&& getExitBlock().isValid()) {
1472 assert(CGF
.getOMPCancelDestination(Kind
).isValid());
1473 assert(CGF
.HaveInsertPoint());
1474 assert(!Stack
.back().HasBeenEmitted
);
1475 auto IP
= CGF
.Builder
.saveAndClearIP();
1476 CGF
.EmitBlock(Stack
.back().ExitBlock
.getBlock());
1478 CGF
.EmitBranch(Stack
.back().ContBlock
.getBlock());
1479 CGF
.Builder
.restoreIP(IP
);
1480 Stack
.back().HasBeenEmitted
= true;
1484 /// Enter the cancel supporting \a Kind construct.
1485 /// \param Kind OpenMP directive that supports cancel constructs.
1486 /// \param HasCancel true, if the construct has inner cancel directive,
1487 /// false otherwise.
1488 void enter(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
, bool HasCancel
) {
1489 Stack
.push_back({Kind
,
1490 HasCancel
? CGF
.getJumpDestInCurrentScope("cancel.exit")
1492 HasCancel
? CGF
.getJumpDestInCurrentScope("cancel.cont")
1495 /// Emits default exit point for the cancel construct (if the special one
1496 /// has not be used) + join point for cancel/normal exits.
1497 void exit(CodeGenFunction
&CGF
) {
1498 if (getExitBlock().isValid()) {
1499 assert(CGF
.getOMPCancelDestination(Stack
.back().Kind
).isValid());
1500 bool HaveIP
= CGF
.HaveInsertPoint();
1501 if (!Stack
.back().HasBeenEmitted
) {
1503 CGF
.EmitBranchThroughCleanup(Stack
.back().ContBlock
);
1504 CGF
.EmitBlock(Stack
.back().ExitBlock
.getBlock());
1505 CGF
.EmitBranchThroughCleanup(Stack
.back().ContBlock
);
1507 CGF
.EmitBlock(Stack
.back().ContBlock
.getBlock());
1509 CGF
.Builder
.CreateUnreachable();
1510 CGF
.Builder
.ClearInsertionPoint();
1516 OpenMPCancelExitStack OMPCancelStack
;
1518 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1519 llvm::Value
*emitCondLikelihoodViaExpectIntrinsic(llvm::Value
*Cond
,
1520 Stmt::Likelihood LH
);
1524 /// Calculate branch weights appropriate for PGO data
1525 llvm::MDNode
*createProfileWeights(uint64_t TrueCount
,
1526 uint64_t FalseCount
) const;
1527 llvm::MDNode
*createProfileWeights(ArrayRef
<uint64_t> Weights
) const;
1528 llvm::MDNode
*createProfileWeightsForLoop(const Stmt
*Cond
,
1529 uint64_t LoopCount
) const;
1532 /// Increment the profiler's counter for the given statement by \p StepV.
1533 /// If \p StepV is null, the default increment is 1.
1534 void incrementProfileCounter(const Stmt
*S
, llvm::Value
*StepV
= nullptr) {
1535 if (CGM
.getCodeGenOpts().hasProfileClangInstr() &&
1536 !CurFn
->hasFnAttribute(llvm::Attribute::NoProfile
) &&
1537 !CurFn
->hasFnAttribute(llvm::Attribute::SkipProfile
))
1538 PGO
.emitCounterIncrement(Builder
, S
, StepV
);
1539 PGO
.setCurrentStmt(S
);
1542 /// Get the profiler's count for the given statement.
1543 uint64_t getProfileCount(const Stmt
*S
) {
1544 return PGO
.getStmtCount(S
).value_or(0);
1547 /// Set the profiler's current count.
1548 void setCurrentProfileCount(uint64_t Count
) {
1549 PGO
.setCurrentRegionCount(Count
);
1552 /// Get the profiler's current count. This is generally the count for the most
1553 /// recently incremented counter.
1554 uint64_t getCurrentProfileCount() {
1555 return PGO
.getCurrentRegionCount();
1560 /// SwitchInsn - This is nearest current switch instruction. It is null if
1561 /// current context is not in a switch.
1562 llvm::SwitchInst
*SwitchInsn
= nullptr;
1563 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1564 SmallVector
<uint64_t, 16> *SwitchWeights
= nullptr;
1566 /// The likelihood attributes of the SwitchCase.
1567 SmallVector
<Stmt::Likelihood
, 16> *SwitchLikelihood
= nullptr;
1569 /// CaseRangeBlock - This block holds if condition check for last case
1570 /// statement range in current switch instruction.
1571 llvm::BasicBlock
*CaseRangeBlock
= nullptr;
1573 /// OpaqueLValues - Keeps track of the current set of opaque value
1575 llvm::DenseMap
<const OpaqueValueExpr
*, LValue
> OpaqueLValues
;
1576 llvm::DenseMap
<const OpaqueValueExpr
*, RValue
> OpaqueRValues
;
1578 // VLASizeMap - This keeps track of the associated size for each VLA type.
1579 // We track this by the size expression rather than the type itself because
1580 // in certain situations, like a const qualifier applied to an VLA typedef,
1581 // multiple VLA types can share the same size expression.
1582 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1583 // enter/leave scopes.
1584 llvm::DenseMap
<const Expr
*, llvm::Value
*> VLASizeMap
;
1586 /// A block containing a single 'unreachable' instruction. Created
1587 /// lazily by getUnreachableBlock().
1588 llvm::BasicBlock
*UnreachableBlock
= nullptr;
1590 /// Counts of the number return expressions in the function.
1591 unsigned NumReturnExprs
= 0;
1593 /// Count the number of simple (constant) return expressions in the function.
1594 unsigned NumSimpleReturnExprs
= 0;
1596 /// The last regular (non-return) debug location (breakpoint) in the function.
1597 SourceLocation LastStopPoint
;
1600 /// Source location information about the default argument or member
1601 /// initializer expression we're evaluating, if any.
1602 CurrentSourceLocExprScope CurSourceLocExprScope
;
1603 using SourceLocExprScopeGuard
=
1604 CurrentSourceLocExprScope::SourceLocExprScopeGuard
;
1606 /// A scope within which we are constructing the fields of an object which
1607 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1608 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1609 class FieldConstructionScope
{
1611 FieldConstructionScope(CodeGenFunction
&CGF
, Address This
)
1612 : CGF(CGF
), OldCXXDefaultInitExprThis(CGF
.CXXDefaultInitExprThis
) {
1613 CGF
.CXXDefaultInitExprThis
= This
;
1615 ~FieldConstructionScope() {
1616 CGF
.CXXDefaultInitExprThis
= OldCXXDefaultInitExprThis
;
1620 CodeGenFunction
&CGF
;
1621 Address OldCXXDefaultInitExprThis
;
1624 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1625 /// is overridden to be the object under construction.
1626 class CXXDefaultInitExprScope
{
1628 CXXDefaultInitExprScope(CodeGenFunction
&CGF
, const CXXDefaultInitExpr
*E
)
1629 : CGF(CGF
), OldCXXThisValue(CGF
.CXXThisValue
),
1630 OldCXXThisAlignment(CGF
.CXXThisAlignment
),
1631 SourceLocScope(E
, CGF
.CurSourceLocExprScope
) {
1632 CGF
.CXXThisValue
= CGF
.CXXDefaultInitExprThis
.getPointer();
1633 CGF
.CXXThisAlignment
= CGF
.CXXDefaultInitExprThis
.getAlignment();
1635 ~CXXDefaultInitExprScope() {
1636 CGF
.CXXThisValue
= OldCXXThisValue
;
1637 CGF
.CXXThisAlignment
= OldCXXThisAlignment
;
1641 CodeGenFunction
&CGF
;
1642 llvm::Value
*OldCXXThisValue
;
1643 CharUnits OldCXXThisAlignment
;
1644 SourceLocExprScopeGuard SourceLocScope
;
1647 struct CXXDefaultArgExprScope
: SourceLocExprScopeGuard
{
1648 CXXDefaultArgExprScope(CodeGenFunction
&CGF
, const CXXDefaultArgExpr
*E
)
1649 : SourceLocExprScopeGuard(E
, CGF
.CurSourceLocExprScope
) {}
1652 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1653 /// current loop index is overridden.
1654 class ArrayInitLoopExprScope
{
1656 ArrayInitLoopExprScope(CodeGenFunction
&CGF
, llvm::Value
*Index
)
1657 : CGF(CGF
), OldArrayInitIndex(CGF
.ArrayInitIndex
) {
1658 CGF
.ArrayInitIndex
= Index
;
1660 ~ArrayInitLoopExprScope() {
1661 CGF
.ArrayInitIndex
= OldArrayInitIndex
;
1665 CodeGenFunction
&CGF
;
1666 llvm::Value
*OldArrayInitIndex
;
1669 class InlinedInheritingConstructorScope
{
1671 InlinedInheritingConstructorScope(CodeGenFunction
&CGF
, GlobalDecl GD
)
1672 : CGF(CGF
), OldCurGD(CGF
.CurGD
), OldCurFuncDecl(CGF
.CurFuncDecl
),
1673 OldCurCodeDecl(CGF
.CurCodeDecl
),
1674 OldCXXABIThisDecl(CGF
.CXXABIThisDecl
),
1675 OldCXXABIThisValue(CGF
.CXXABIThisValue
),
1676 OldCXXThisValue(CGF
.CXXThisValue
),
1677 OldCXXABIThisAlignment(CGF
.CXXABIThisAlignment
),
1678 OldCXXThisAlignment(CGF
.CXXThisAlignment
),
1679 OldReturnValue(CGF
.ReturnValue
), OldFnRetTy(CGF
.FnRetTy
),
1680 OldCXXInheritedCtorInitExprArgs(
1681 std::move(CGF
.CXXInheritedCtorInitExprArgs
)) {
1683 CGF
.CurFuncDecl
= CGF
.CurCodeDecl
=
1684 cast
<CXXConstructorDecl
>(GD
.getDecl());
1685 CGF
.CXXABIThisDecl
= nullptr;
1686 CGF
.CXXABIThisValue
= nullptr;
1687 CGF
.CXXThisValue
= nullptr;
1688 CGF
.CXXABIThisAlignment
= CharUnits();
1689 CGF
.CXXThisAlignment
= CharUnits();
1690 CGF
.ReturnValue
= Address::invalid();
1691 CGF
.FnRetTy
= QualType();
1692 CGF
.CXXInheritedCtorInitExprArgs
.clear();
1694 ~InlinedInheritingConstructorScope() {
1695 CGF
.CurGD
= OldCurGD
;
1696 CGF
.CurFuncDecl
= OldCurFuncDecl
;
1697 CGF
.CurCodeDecl
= OldCurCodeDecl
;
1698 CGF
.CXXABIThisDecl
= OldCXXABIThisDecl
;
1699 CGF
.CXXABIThisValue
= OldCXXABIThisValue
;
1700 CGF
.CXXThisValue
= OldCXXThisValue
;
1701 CGF
.CXXABIThisAlignment
= OldCXXABIThisAlignment
;
1702 CGF
.CXXThisAlignment
= OldCXXThisAlignment
;
1703 CGF
.ReturnValue
= OldReturnValue
;
1704 CGF
.FnRetTy
= OldFnRetTy
;
1705 CGF
.CXXInheritedCtorInitExprArgs
=
1706 std::move(OldCXXInheritedCtorInitExprArgs
);
1710 CodeGenFunction
&CGF
;
1711 GlobalDecl OldCurGD
;
1712 const Decl
*OldCurFuncDecl
;
1713 const Decl
*OldCurCodeDecl
;
1714 ImplicitParamDecl
*OldCXXABIThisDecl
;
1715 llvm::Value
*OldCXXABIThisValue
;
1716 llvm::Value
*OldCXXThisValue
;
1717 CharUnits OldCXXABIThisAlignment
;
1718 CharUnits OldCXXThisAlignment
;
1719 Address OldReturnValue
;
1720 QualType OldFnRetTy
;
1721 CallArgList OldCXXInheritedCtorInitExprArgs
;
1724 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1725 // region body, and finalization codegen callbacks. This will class will also
1726 // contain privatization functions used by the privatization call backs
1728 // TODO: this is temporary class for things that are being moved out of
1729 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1730 // utility function for use with the OMPBuilder. Once that move to use the
1731 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1732 // directly, or a new helper class that will contain functions used by both
1733 // this and the OMPBuilder
1735 struct OMPBuilderCBHelpers
{
1737 OMPBuilderCBHelpers() = delete;
1738 OMPBuilderCBHelpers(const OMPBuilderCBHelpers
&) = delete;
1739 OMPBuilderCBHelpers
&operator=(const OMPBuilderCBHelpers
&) = delete;
1741 using InsertPointTy
= llvm::OpenMPIRBuilder::InsertPointTy
;
1743 /// Cleanup action for allocate support.
1744 class OMPAllocateCleanupTy final
: public EHScopeStack::Cleanup
{
1747 llvm::CallInst
*RTLFnCI
;
1750 OMPAllocateCleanupTy(llvm::CallInst
*RLFnCI
) : RTLFnCI(RLFnCI
) {
1751 RLFnCI
->removeFromParent();
1754 void Emit(CodeGenFunction
&CGF
, Flags
/*flags*/) override
{
1755 if (!CGF
.HaveInsertPoint())
1757 CGF
.Builder
.Insert(RTLFnCI
);
1761 /// Returns address of the threadprivate variable for the current
1762 /// thread. This Also create any necessary OMP runtime calls.
1764 /// \param VD VarDecl for Threadprivate variable.
1765 /// \param VDAddr Address of the Vardecl
1766 /// \param Loc The location where the barrier directive was encountered
1767 static Address
getAddrOfThreadPrivate(CodeGenFunction
&CGF
,
1768 const VarDecl
*VD
, Address VDAddr
,
1769 SourceLocation Loc
);
1771 /// Gets the OpenMP-specific address of the local variable /p VD.
1772 static Address
getAddressOfLocalVariable(CodeGenFunction
&CGF
,
1774 /// Get the platform-specific name separator.
1775 /// \param Parts different parts of the final name that needs separation
1776 /// \param FirstSeparator First separator used between the initial two
1777 /// parts of the name.
1778 /// \param Separator separator used between all of the rest consecutinve
1779 /// parts of the name
1780 static std::string
getNameWithSeparators(ArrayRef
<StringRef
> Parts
,
1781 StringRef FirstSeparator
= ".",
1782 StringRef Separator
= ".");
1783 /// Emit the Finalization for an OMP region
1784 /// \param CGF The Codegen function this belongs to
1785 /// \param IP Insertion point for generating the finalization code.
1786 static void FinalizeOMPRegion(CodeGenFunction
&CGF
, InsertPointTy IP
) {
1787 CGBuilderTy::InsertPointGuard
IPG(CGF
.Builder
);
1788 assert(IP
.getBlock()->end() != IP
.getPoint() &&
1789 "OpenMP IR Builder should cause terminated block!");
1791 llvm::BasicBlock
*IPBB
= IP
.getBlock();
1792 llvm::BasicBlock
*DestBB
= IPBB
->getUniqueSuccessor();
1793 assert(DestBB
&& "Finalization block should have one successor!");
1795 // erase and replace with cleanup branch.
1796 IPBB
->getTerminator()->eraseFromParent();
1797 CGF
.Builder
.SetInsertPoint(IPBB
);
1798 CodeGenFunction::JumpDest Dest
= CGF
.getJumpDestInCurrentScope(DestBB
);
1799 CGF
.EmitBranchThroughCleanup(Dest
);
1802 /// Emit the body of an OMP region
1803 /// \param CGF The Codegen function this belongs to
1804 /// \param RegionBodyStmt The body statement for the OpenMP region being
1806 /// \param AllocaIP Where to insert alloca instructions
1807 /// \param CodeGenIP Where to insert the region code
1808 /// \param RegionName Name to be used for new blocks
1809 static void EmitOMPInlinedRegionBody(CodeGenFunction
&CGF
,
1810 const Stmt
*RegionBodyStmt
,
1811 InsertPointTy AllocaIP
,
1812 InsertPointTy CodeGenIP
,
1815 static void EmitCaptureStmt(CodeGenFunction
&CGF
, InsertPointTy CodeGenIP
,
1816 llvm::BasicBlock
&FiniBB
, llvm::Function
*Fn
,
1817 ArrayRef
<llvm::Value
*> Args
) {
1818 llvm::BasicBlock
*CodeGenIPBB
= CodeGenIP
.getBlock();
1819 if (llvm::Instruction
*CodeGenIPBBTI
= CodeGenIPBB
->getTerminator())
1820 CodeGenIPBBTI
->eraseFromParent();
1822 CGF
.Builder
.SetInsertPoint(CodeGenIPBB
);
1824 if (Fn
->doesNotThrow())
1825 CGF
.EmitNounwindRuntimeCall(Fn
, Args
);
1827 CGF
.EmitRuntimeCall(Fn
, Args
);
1829 if (CGF
.Builder
.saveIP().isSet())
1830 CGF
.Builder
.CreateBr(&FiniBB
);
1833 /// Emit the body of an OMP region that will be outlined in
1834 /// OpenMPIRBuilder::finalize().
1835 /// \param CGF The Codegen function this belongs to
1836 /// \param RegionBodyStmt The body statement for the OpenMP region being
1838 /// \param AllocaIP Where to insert alloca instructions
1839 /// \param CodeGenIP Where to insert the region code
1840 /// \param RegionName Name to be used for new blocks
1841 static void EmitOMPOutlinedRegionBody(CodeGenFunction
&CGF
,
1842 const Stmt
*RegionBodyStmt
,
1843 InsertPointTy AllocaIP
,
1844 InsertPointTy CodeGenIP
,
1847 /// RAII for preserving necessary info during Outlined region body codegen.
1848 class OutlinedRegionBodyRAII
{
1850 llvm::AssertingVH
<llvm::Instruction
> OldAllocaIP
;
1851 CodeGenFunction::JumpDest OldReturnBlock
;
1852 CodeGenFunction
&CGF
;
1855 OutlinedRegionBodyRAII(CodeGenFunction
&cgf
, InsertPointTy
&AllocaIP
,
1856 llvm::BasicBlock
&RetBB
)
1858 assert(AllocaIP
.isSet() &&
1859 "Must specify Insertion point for allocas of outlined function");
1860 OldAllocaIP
= CGF
.AllocaInsertPt
;
1861 CGF
.AllocaInsertPt
= &*AllocaIP
.getPoint();
1863 OldReturnBlock
= CGF
.ReturnBlock
;
1864 CGF
.ReturnBlock
= CGF
.getJumpDestInCurrentScope(&RetBB
);
1867 ~OutlinedRegionBodyRAII() {
1868 CGF
.AllocaInsertPt
= OldAllocaIP
;
1869 CGF
.ReturnBlock
= OldReturnBlock
;
1873 /// RAII for preserving necessary info during inlined region body codegen.
1874 class InlinedRegionBodyRAII
{
1876 llvm::AssertingVH
<llvm::Instruction
> OldAllocaIP
;
1877 CodeGenFunction
&CGF
;
1880 InlinedRegionBodyRAII(CodeGenFunction
&cgf
, InsertPointTy
&AllocaIP
,
1881 llvm::BasicBlock
&FiniBB
)
1883 // Alloca insertion block should be in the entry block of the containing
1884 // function so it expects an empty AllocaIP in which case will reuse the
1885 // old alloca insertion point, or a new AllocaIP in the same block as
1887 assert((!AllocaIP
.isSet() ||
1888 CGF
.AllocaInsertPt
->getParent() == AllocaIP
.getBlock()) &&
1889 "Insertion point should be in the entry block of containing "
1891 OldAllocaIP
= CGF
.AllocaInsertPt
;
1892 if (AllocaIP
.isSet())
1893 CGF
.AllocaInsertPt
= &*AllocaIP
.getPoint();
1895 // TODO: Remove the call, after making sure the counter is not used by
1897 // Since this is an inlined region, it should not modify the
1898 // ReturnBlock, and should reuse the one for the enclosing outlined
1899 // region. So, the JumpDest being return by the function is discarded
1900 (void)CGF
.getJumpDestInCurrentScope(&FiniBB
);
1903 ~InlinedRegionBodyRAII() { CGF
.AllocaInsertPt
= OldAllocaIP
; }
1908 /// CXXThisDecl - When generating code for a C++ member function,
1909 /// this will hold the implicit 'this' declaration.
1910 ImplicitParamDecl
*CXXABIThisDecl
= nullptr;
1911 llvm::Value
*CXXABIThisValue
= nullptr;
1912 llvm::Value
*CXXThisValue
= nullptr;
1913 CharUnits CXXABIThisAlignment
;
1914 CharUnits CXXThisAlignment
;
1916 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
1917 /// this expression.
1918 Address CXXDefaultInitExprThis
= Address::invalid();
1920 /// The current array initialization index when evaluating an
1921 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
1922 llvm::Value
*ArrayInitIndex
= nullptr;
1924 /// The values of function arguments to use when evaluating
1925 /// CXXInheritedCtorInitExprs within this context.
1926 CallArgList CXXInheritedCtorInitExprArgs
;
1928 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
1929 /// destructor, this will hold the implicit argument (e.g. VTT).
1930 ImplicitParamDecl
*CXXStructorImplicitParamDecl
= nullptr;
1931 llvm::Value
*CXXStructorImplicitParamValue
= nullptr;
1933 /// OutermostConditional - Points to the outermost active
1934 /// conditional control. This is used so that we know if a
1935 /// temporary should be destroyed conditionally.
1936 ConditionalEvaluation
*OutermostConditional
= nullptr;
1938 /// The current lexical scope.
1939 LexicalScope
*CurLexicalScope
= nullptr;
1941 /// The current source location that should be used for exception
1943 SourceLocation CurEHLocation
;
1945 /// BlockByrefInfos - For each __block variable, contains
1946 /// information about the layout of the variable.
1947 llvm::DenseMap
<const ValueDecl
*, BlockByrefInfo
> BlockByrefInfos
;
1949 /// Used by -fsanitize=nullability-return to determine whether the return
1950 /// value can be checked.
1951 llvm::Value
*RetValNullabilityPrecondition
= nullptr;
1953 /// Check if -fsanitize=nullability-return instrumentation is required for
1955 bool requiresReturnValueNullabilityCheck() const {
1956 return RetValNullabilityPrecondition
;
1959 /// Used to store precise source locations for return statements by the
1960 /// runtime return value checks.
1961 Address ReturnLocation
= Address::invalid();
1963 /// Check if the return value of this function requires sanitization.
1964 bool requiresReturnValueCheck() const;
1966 llvm::BasicBlock
*TerminateLandingPad
= nullptr;
1967 llvm::BasicBlock
*TerminateHandler
= nullptr;
1968 llvm::SmallVector
<llvm::BasicBlock
*, 2> TrapBBs
;
1970 /// Terminate funclets keyed by parent funclet pad.
1971 llvm::MapVector
<llvm::Value
*, llvm::BasicBlock
*> TerminateFunclets
;
1973 /// Largest vector width used in ths function. Will be used to create a
1974 /// function attribute.
1975 unsigned LargestVectorWidth
= 0;
1977 /// True if we need emit the life-time markers. This is initially set in
1978 /// the constructor, but could be overwritten to true if this is a coroutine.
1979 bool ShouldEmitLifetimeMarkers
;
1981 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
1982 /// the function metadata.
1983 void EmitKernelMetadata(const FunctionDecl
*FD
, llvm::Function
*Fn
);
1986 CodeGenFunction(CodeGenModule
&cgm
, bool suppressNewContext
=false);
1989 CodeGenTypes
&getTypes() const { return CGM
.getTypes(); }
1990 ASTContext
&getContext() const { return CGM
.getContext(); }
1991 CGDebugInfo
*getDebugInfo() {
1992 if (DisableDebugInfo
)
1996 void disableDebugInfo() { DisableDebugInfo
= true; }
1997 void enableDebugInfo() { DisableDebugInfo
= false; }
1999 bool shouldUseFusedARCCalls() {
2000 return CGM
.getCodeGenOpts().OptimizationLevel
== 0;
2003 const LangOptions
&getLangOpts() const { return CGM
.getLangOpts(); }
2005 /// Returns a pointer to the function's exception object and selector slot,
2006 /// which is assigned in every landing pad.
2007 Address
getExceptionSlot();
2008 Address
getEHSelectorSlot();
2010 /// Returns the contents of the function's exception object and selector
2012 llvm::Value
*getExceptionFromSlot();
2013 llvm::Value
*getSelectorFromSlot();
2015 Address
getNormalCleanupDestSlot();
2017 llvm::BasicBlock
*getUnreachableBlock() {
2018 if (!UnreachableBlock
) {
2019 UnreachableBlock
= createBasicBlock("unreachable");
2020 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock
);
2022 return UnreachableBlock
;
2025 llvm::BasicBlock
*getInvokeDest() {
2026 if (!EHStack
.requiresLandingPad()) return nullptr;
2027 return getInvokeDestImpl();
2030 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent
; }
2032 const TargetInfo
&getTarget() const { return Target
; }
2033 llvm::LLVMContext
&getLLVMContext() { return CGM
.getLLVMContext(); }
2034 const TargetCodeGenInfo
&getTargetHooks() const {
2035 return CGM
.getTargetCodeGenInfo();
2038 //===--------------------------------------------------------------------===//
2040 //===--------------------------------------------------------------------===//
2042 typedef void Destroyer(CodeGenFunction
&CGF
, Address addr
, QualType ty
);
2044 void pushIrregularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2045 Address arrayEndPointer
,
2046 QualType elementType
,
2047 CharUnits elementAlignment
,
2048 Destroyer
*destroyer
);
2049 void pushRegularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2050 llvm::Value
*arrayEnd
,
2051 QualType elementType
,
2052 CharUnits elementAlignment
,
2053 Destroyer
*destroyer
);
2055 void pushDestroy(QualType::DestructionKind dtorKind
,
2056 Address addr
, QualType type
);
2057 void pushEHDestroy(QualType::DestructionKind dtorKind
,
2058 Address addr
, QualType type
);
2059 void pushDestroy(CleanupKind kind
, Address addr
, QualType type
,
2060 Destroyer
*destroyer
, bool useEHCleanupForArray
);
2061 void pushLifetimeExtendedDestroy(CleanupKind kind
, Address addr
,
2062 QualType type
, Destroyer
*destroyer
,
2063 bool useEHCleanupForArray
);
2064 void pushCallObjectDeleteCleanup(const FunctionDecl
*OperatorDelete
,
2065 llvm::Value
*CompletePtr
,
2066 QualType ElementType
);
2067 void pushStackRestore(CleanupKind kind
, Address SPMem
);
2068 void emitDestroy(Address addr
, QualType type
, Destroyer
*destroyer
,
2069 bool useEHCleanupForArray
);
2070 llvm::Function
*generateDestroyHelper(Address addr
, QualType type
,
2071 Destroyer
*destroyer
,
2072 bool useEHCleanupForArray
,
2074 void emitArrayDestroy(llvm::Value
*begin
, llvm::Value
*end
,
2075 QualType elementType
, CharUnits elementAlign
,
2076 Destroyer
*destroyer
,
2077 bool checkZeroLength
, bool useEHCleanup
);
2079 Destroyer
*getDestroyer(QualType::DestructionKind destructionKind
);
2081 /// Determines whether an EH cleanup is required to destroy a type
2082 /// with the given destruction kind.
2083 bool needsEHCleanup(QualType::DestructionKind kind
) {
2085 case QualType::DK_none
:
2087 case QualType::DK_cxx_destructor
:
2088 case QualType::DK_objc_weak_lifetime
:
2089 case QualType::DK_nontrivial_c_struct
:
2090 return getLangOpts().Exceptions
;
2091 case QualType::DK_objc_strong_lifetime
:
2092 return getLangOpts().Exceptions
&&
2093 CGM
.getCodeGenOpts().ObjCAutoRefCountExceptions
;
2095 llvm_unreachable("bad destruction kind");
2098 CleanupKind
getCleanupKind(QualType::DestructionKind kind
) {
2099 return (needsEHCleanup(kind
) ? NormalAndEHCleanup
: NormalCleanup
);
2102 //===--------------------------------------------------------------------===//
2104 //===--------------------------------------------------------------------===//
2106 void GenerateObjCMethod(const ObjCMethodDecl
*OMD
);
2108 void StartObjCMethod(const ObjCMethodDecl
*MD
, const ObjCContainerDecl
*CD
);
2110 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2111 void GenerateObjCGetter(ObjCImplementationDecl
*IMP
,
2112 const ObjCPropertyImplDecl
*PID
);
2113 void generateObjCGetterBody(const ObjCImplementationDecl
*classImpl
,
2114 const ObjCPropertyImplDecl
*propImpl
,
2115 const ObjCMethodDecl
*GetterMothodDecl
,
2116 llvm::Constant
*AtomicHelperFn
);
2118 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl
*IMP
,
2119 ObjCMethodDecl
*MD
, bool ctor
);
2121 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2122 /// for the given property.
2123 void GenerateObjCSetter(ObjCImplementationDecl
*IMP
,
2124 const ObjCPropertyImplDecl
*PID
);
2125 void generateObjCSetterBody(const ObjCImplementationDecl
*classImpl
,
2126 const ObjCPropertyImplDecl
*propImpl
,
2127 llvm::Constant
*AtomicHelperFn
);
2129 //===--------------------------------------------------------------------===//
2131 //===--------------------------------------------------------------------===//
2133 /// Emit block literal.
2134 /// \return an LLVM value which is a pointer to a struct which contains
2135 /// information about the block, including the block invoke function, the
2136 /// captured variables, etc.
2137 llvm::Value
*EmitBlockLiteral(const BlockExpr
*);
2139 llvm::Function
*GenerateBlockFunction(GlobalDecl GD
,
2140 const CGBlockInfo
&Info
,
2141 const DeclMapTy
&ldm
,
2142 bool IsLambdaConversionToBlock
,
2143 bool BuildGlobalBlock
);
2145 /// Check if \p T is a C++ class that has a destructor that can throw.
2146 static bool cxxDestructorCanThrow(QualType T
);
2148 llvm::Constant
*GenerateCopyHelperFunction(const CGBlockInfo
&blockInfo
);
2149 llvm::Constant
*GenerateDestroyHelperFunction(const CGBlockInfo
&blockInfo
);
2150 llvm::Constant
*GenerateObjCAtomicSetterCopyHelperFunction(
2151 const ObjCPropertyImplDecl
*PID
);
2152 llvm::Constant
*GenerateObjCAtomicGetterCopyHelperFunction(
2153 const ObjCPropertyImplDecl
*PID
);
2154 llvm::Value
*EmitBlockCopyAndAutorelease(llvm::Value
*Block
, QualType Ty
);
2156 void BuildBlockRelease(llvm::Value
*DeclPtr
, BlockFieldFlags flags
,
2159 class AutoVarEmission
;
2161 void emitByrefStructureInit(const AutoVarEmission
&emission
);
2163 /// Enter a cleanup to destroy a __block variable. Note that this
2164 /// cleanup should be a no-op if the variable hasn't left the stack
2165 /// yet; if a cleanup is required for the variable itself, that needs
2166 /// to be done externally.
2168 /// \param Kind Cleanup kind.
2170 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2171 /// structure that will be passed to _Block_object_dispose. When
2172 /// \p LoadBlockVarAddr is true, the address of the field of the block
2173 /// structure that holds the address of the __block structure.
2175 /// \param Flags The flag that will be passed to _Block_object_dispose.
2177 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2178 /// \p Addr to get the address of the __block structure.
2179 void enterByrefCleanup(CleanupKind Kind
, Address Addr
, BlockFieldFlags Flags
,
2180 bool LoadBlockVarAddr
, bool CanThrow
);
2182 void setBlockContextParameter(const ImplicitParamDecl
*D
, unsigned argNum
,
2185 Address
LoadBlockStruct();
2186 Address
GetAddrOfBlockDecl(const VarDecl
*var
);
2188 /// BuildBlockByrefAddress - Computes the location of the
2189 /// data in a variable which is declared as __block.
2190 Address
emitBlockByrefAddress(Address baseAddr
, const VarDecl
*V
,
2191 bool followForward
= true);
2192 Address
emitBlockByrefAddress(Address baseAddr
,
2193 const BlockByrefInfo
&info
,
2195 const llvm::Twine
&name
);
2197 const BlockByrefInfo
&getBlockByrefInfo(const VarDecl
*var
);
2199 QualType
BuildFunctionArgList(GlobalDecl GD
, FunctionArgList
&Args
);
2201 void GenerateCode(GlobalDecl GD
, llvm::Function
*Fn
,
2202 const CGFunctionInfo
&FnInfo
);
2204 /// Annotate the function with an attribute that disables TSan checking at
2206 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function
*Fn
);
2208 /// Emit code for the start of a function.
2209 /// \param Loc The location to be associated with the function.
2210 /// \param StartLoc The location of the function body.
2211 void StartFunction(GlobalDecl GD
,
2214 const CGFunctionInfo
&FnInfo
,
2215 const FunctionArgList
&Args
,
2216 SourceLocation Loc
= SourceLocation(),
2217 SourceLocation StartLoc
= SourceLocation());
2219 static bool IsConstructorDelegationValid(const CXXConstructorDecl
*Ctor
);
2221 void EmitConstructorBody(FunctionArgList
&Args
);
2222 void EmitDestructorBody(FunctionArgList
&Args
);
2223 void emitImplicitAssignmentOperatorBody(FunctionArgList
&Args
);
2224 void EmitFunctionBody(const Stmt
*Body
);
2225 void EmitBlockWithFallThrough(llvm::BasicBlock
*BB
, const Stmt
*S
);
2227 void EmitForwardingCallToLambda(const CXXMethodDecl
*LambdaCallOperator
,
2228 CallArgList
&CallArgs
);
2229 void EmitLambdaBlockInvokeBody();
2230 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl
*MD
);
2231 void EmitLambdaStaticInvokeBody(const CXXMethodDecl
*MD
);
2232 void EmitLambdaVLACapture(const VariableArrayType
*VAT
, LValue LV
) {
2233 EmitStoreThroughLValue(RValue::get(VLASizeMap
[VAT
->getSizeExpr()]), LV
);
2235 void EmitAsanPrologueOrEpilogue(bool Prologue
);
2237 /// Emit the unified return block, trying to avoid its emission when
2239 /// \return The debug location of the user written return statement if the
2240 /// return block is avoided.
2241 llvm::DebugLoc
EmitReturnBlock();
2243 /// FinishFunction - Complete IR generation of the current function. It is
2244 /// legal to call this function even if there is no current insertion point.
2245 void FinishFunction(SourceLocation EndLoc
=SourceLocation());
2247 void StartThunk(llvm::Function
*Fn
, GlobalDecl GD
,
2248 const CGFunctionInfo
&FnInfo
, bool IsUnprototyped
);
2250 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee
,
2251 const ThunkInfo
*Thunk
, bool IsUnprototyped
);
2255 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2256 void EmitMustTailThunk(GlobalDecl GD
, llvm::Value
*AdjustedThisPtr
,
2257 llvm::FunctionCallee Callee
);
2259 /// Generate a thunk for the given method.
2260 void generateThunk(llvm::Function
*Fn
, const CGFunctionInfo
&FnInfo
,
2261 GlobalDecl GD
, const ThunkInfo
&Thunk
,
2262 bool IsUnprototyped
);
2264 llvm::Function
*GenerateVarArgsThunk(llvm::Function
*Fn
,
2265 const CGFunctionInfo
&FnInfo
,
2266 GlobalDecl GD
, const ThunkInfo
&Thunk
);
2268 void EmitCtorPrologue(const CXXConstructorDecl
*CD
, CXXCtorType Type
,
2269 FunctionArgList
&Args
);
2271 void EmitInitializerForField(FieldDecl
*Field
, LValue LHS
, Expr
*Init
);
2273 /// Struct with all information about dynamic [sub]class needed to set vptr.
2276 const CXXRecordDecl
*NearestVBase
;
2277 CharUnits OffsetFromNearestVBase
;
2278 const CXXRecordDecl
*VTableClass
;
2281 /// Initialize the vtable pointer of the given subobject.
2282 void InitializeVTablePointer(const VPtr
&vptr
);
2284 typedef llvm::SmallVector
<VPtr
, 4> VPtrsVector
;
2286 typedef llvm::SmallPtrSet
<const CXXRecordDecl
*, 4> VisitedVirtualBasesSetTy
;
2287 VPtrsVector
getVTablePointers(const CXXRecordDecl
*VTableClass
);
2289 void getVTablePointers(BaseSubobject Base
, const CXXRecordDecl
*NearestVBase
,
2290 CharUnits OffsetFromNearestVBase
,
2291 bool BaseIsNonVirtualPrimaryBase
,
2292 const CXXRecordDecl
*VTableClass
,
2293 VisitedVirtualBasesSetTy
&VBases
, VPtrsVector
&vptrs
);
2295 void InitializeVTablePointers(const CXXRecordDecl
*ClassDecl
);
2297 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2299 llvm::Value
*GetVTablePtr(Address This
, llvm::Type
*VTableTy
,
2300 const CXXRecordDecl
*VTableClass
);
2302 enum CFITypeCheckKind
{
2306 CFITCK_UnrelatedCast
,
2312 /// Derived is the presumed address of an object of type T after a
2313 /// cast. If T is a polymorphic class type, emit a check that the virtual
2314 /// table for Derived belongs to a class derived from T.
2315 void EmitVTablePtrCheckForCast(QualType T
, Address Derived
, bool MayBeNull
,
2316 CFITypeCheckKind TCK
, SourceLocation Loc
);
2318 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2319 /// If vptr CFI is enabled, emit a check that VTable is valid.
2320 void EmitVTablePtrCheckForCall(const CXXRecordDecl
*RD
, llvm::Value
*VTable
,
2321 CFITypeCheckKind TCK
, SourceLocation Loc
);
2323 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2324 /// RD using llvm.type.test.
2325 void EmitVTablePtrCheck(const CXXRecordDecl
*RD
, llvm::Value
*VTable
,
2326 CFITypeCheckKind TCK
, SourceLocation Loc
);
2328 /// If whole-program virtual table optimization is enabled, emit an assumption
2329 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2330 /// enabled, emit a check that VTable is a member of RD's type identifier.
2331 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl
*RD
,
2332 llvm::Value
*VTable
, SourceLocation Loc
);
2334 /// Returns whether we should perform a type checked load when loading a
2335 /// virtual function for virtual calls to members of RD. This is generally
2336 /// true when both vcall CFI and whole-program-vtables are enabled.
2337 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl
*RD
);
2339 /// Emit a type checked load from the given vtable.
2340 llvm::Value
*EmitVTableTypeCheckedLoad(const CXXRecordDecl
*RD
,
2341 llvm::Value
*VTable
,
2342 llvm::Type
*VTableTy
,
2343 uint64_t VTableByteOffset
);
2345 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2346 /// given phase of destruction for a destructor. The end result
2347 /// should call destructors on members and base classes in reverse
2348 /// order of their construction.
2349 void EnterDtorCleanups(const CXXDestructorDecl
*Dtor
, CXXDtorType Type
);
2351 /// ShouldInstrumentFunction - Return true if the current function should be
2352 /// instrumented with __cyg_profile_func_* calls
2353 bool ShouldInstrumentFunction();
2355 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2356 /// should not be instrumented with sanitizers.
2357 bool ShouldSkipSanitizerInstrumentation();
2359 /// ShouldXRayInstrument - Return true if the current function should be
2360 /// instrumented with XRay nop sleds.
2361 bool ShouldXRayInstrumentFunction() const;
2363 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2364 /// XRay custom event handling calls.
2365 bool AlwaysEmitXRayCustomEvents() const;
2367 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2368 /// XRay typed event handling calls.
2369 bool AlwaysEmitXRayTypedEvents() const;
2371 /// Return a type hash constant for a function instrumented by
2372 /// -fsanitize=function.
2373 llvm::ConstantInt
*getUBSanFunctionTypeHash(QualType T
) const;
2375 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2376 /// arguments for the given function. This is also responsible for naming the
2377 /// LLVM function arguments.
2378 void EmitFunctionProlog(const CGFunctionInfo
&FI
,
2380 const FunctionArgList
&Args
);
2382 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2383 /// given temporary.
2384 void EmitFunctionEpilog(const CGFunctionInfo
&FI
, bool EmitRetDbgLoc
,
2385 SourceLocation EndLoc
);
2387 /// Emit a test that checks if the return value \p RV is nonnull.
2388 void EmitReturnValueCheck(llvm::Value
*RV
);
2390 /// EmitStartEHSpec - Emit the start of the exception spec.
2391 void EmitStartEHSpec(const Decl
*D
);
2393 /// EmitEndEHSpec - Emit the end of the exception spec.
2394 void EmitEndEHSpec(const Decl
*D
);
2396 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2397 llvm::BasicBlock
*getTerminateLandingPad();
2399 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2401 llvm::BasicBlock
*getTerminateFunclet();
2403 /// getTerminateHandler - Return a handler (not a landing pad, just
2404 /// a catch handler) that just calls terminate. This is used when
2405 /// a terminate scope encloses a try.
2406 llvm::BasicBlock
*getTerminateHandler();
2408 llvm::Type
*ConvertTypeForMem(QualType T
);
2409 llvm::Type
*ConvertType(QualType T
);
2410 llvm::Type
*ConvertType(const TypeDecl
*T
) {
2411 return ConvertType(getContext().getTypeDeclType(T
));
2414 /// LoadObjCSelf - Load the value of self. This function is only valid while
2415 /// generating code for an Objective-C method.
2416 llvm::Value
*LoadObjCSelf();
2418 /// TypeOfSelfObject - Return type of object that this self represents.
2419 QualType
TypeOfSelfObject();
2421 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2422 static TypeEvaluationKind
getEvaluationKind(QualType T
);
2424 static bool hasScalarEvaluationKind(QualType T
) {
2425 return getEvaluationKind(T
) == TEK_Scalar
;
2428 static bool hasAggregateEvaluationKind(QualType T
) {
2429 return getEvaluationKind(T
) == TEK_Aggregate
;
2432 /// createBasicBlock - Create an LLVM basic block.
2433 llvm::BasicBlock
*createBasicBlock(const Twine
&name
= "",
2434 llvm::Function
*parent
= nullptr,
2435 llvm::BasicBlock
*before
= nullptr) {
2436 return llvm::BasicBlock::Create(getLLVMContext(), name
, parent
, before
);
2439 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2441 JumpDest
getJumpDestForLabel(const LabelDecl
*S
);
2443 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2444 /// another basic block, simplify it. This assumes that no other code could
2445 /// potentially reference the basic block.
2446 void SimplifyForwardingBlocks(llvm::BasicBlock
*BB
);
2448 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2449 /// adding a fall-through branch from the current insert block if
2450 /// necessary. It is legal to call this function even if there is no current
2451 /// insertion point.
2453 /// IsFinished - If true, indicates that the caller has finished emitting
2454 /// branches to the given block and does not expect to emit code into it. This
2455 /// means the block can be ignored if it is unreachable.
2456 void EmitBlock(llvm::BasicBlock
*BB
, bool IsFinished
=false);
2458 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2459 /// near its uses, and leave the insertion point in it.
2460 void EmitBlockAfterUses(llvm::BasicBlock
*BB
);
2462 /// EmitBranch - Emit a branch to the specified basic block from the current
2463 /// insert block, taking care to avoid creation of branches from dummy
2464 /// blocks. It is legal to call this function even if there is no current
2465 /// insertion point.
2467 /// This function clears the current insertion point. The caller should follow
2468 /// calls to this function with calls to Emit*Block prior to generation new
2470 void EmitBranch(llvm::BasicBlock
*Block
);
2472 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2473 /// indicates that the current code being emitted is unreachable.
2474 bool HaveInsertPoint() const {
2475 return Builder
.GetInsertBlock() != nullptr;
2478 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2479 /// emitted IR has a place to go. Note that by definition, if this function
2480 /// creates a block then that block is unreachable; callers may do better to
2481 /// detect when no insertion point is defined and simply skip IR generation.
2482 void EnsureInsertPoint() {
2483 if (!HaveInsertPoint())
2484 EmitBlock(createBasicBlock());
2487 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2488 /// specified stmt yet.
2489 void ErrorUnsupported(const Stmt
*S
, const char *Type
);
2491 //===--------------------------------------------------------------------===//
2493 //===--------------------------------------------------------------------===//
2495 LValue
MakeAddrLValue(Address Addr
, QualType T
,
2496 AlignmentSource Source
= AlignmentSource::Type
) {
2497 return LValue::MakeAddr(Addr
, T
, getContext(), LValueBaseInfo(Source
),
2498 CGM
.getTBAAAccessInfo(T
));
2501 LValue
MakeAddrLValue(Address Addr
, QualType T
, LValueBaseInfo BaseInfo
,
2502 TBAAAccessInfo TBAAInfo
) {
2503 return LValue::MakeAddr(Addr
, T
, getContext(), BaseInfo
, TBAAInfo
);
2506 LValue
MakeAddrLValue(llvm::Value
*V
, QualType T
, CharUnits Alignment
,
2507 AlignmentSource Source
= AlignmentSource::Type
) {
2508 Address
Addr(V
, ConvertTypeForMem(T
), Alignment
);
2509 return LValue::MakeAddr(Addr
, T
, getContext(), LValueBaseInfo(Source
),
2510 CGM
.getTBAAAccessInfo(T
));
2514 MakeAddrLValueWithoutTBAA(Address Addr
, QualType T
,
2515 AlignmentSource Source
= AlignmentSource::Type
) {
2516 return LValue::MakeAddr(Addr
, T
, getContext(), LValueBaseInfo(Source
),
2520 LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value
*V
, QualType T
);
2521 LValue
MakeNaturalAlignAddrLValue(llvm::Value
*V
, QualType T
);
2523 Address
EmitLoadOfReference(LValue RefLVal
,
2524 LValueBaseInfo
*PointeeBaseInfo
= nullptr,
2525 TBAAAccessInfo
*PointeeTBAAInfo
= nullptr);
2526 LValue
EmitLoadOfReferenceLValue(LValue RefLVal
);
2527 LValue
EmitLoadOfReferenceLValue(Address RefAddr
, QualType RefTy
,
2528 AlignmentSource Source
=
2529 AlignmentSource::Type
) {
2530 LValue RefLVal
= MakeAddrLValue(RefAddr
, RefTy
, LValueBaseInfo(Source
),
2531 CGM
.getTBAAAccessInfo(RefTy
));
2532 return EmitLoadOfReferenceLValue(RefLVal
);
2535 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2536 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2537 /// it is loaded from.
2538 Address
EmitLoadOfPointer(Address Ptr
, const PointerType
*PtrTy
,
2539 LValueBaseInfo
*BaseInfo
= nullptr,
2540 TBAAAccessInfo
*TBAAInfo
= nullptr);
2541 LValue
EmitLoadOfPointerLValue(Address Ptr
, const PointerType
*PtrTy
);
2543 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2544 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2545 /// insertion point of the builder. The caller is responsible for setting an
2546 /// appropriate alignment on
2549 /// \p ArraySize is the number of array elements to be allocated if it
2552 /// LangAS::Default is the address space of pointers to local variables and
2553 /// temporaries, as exposed in the source language. In certain
2554 /// configurations, this is not the same as the alloca address space, and a
2555 /// cast is needed to lift the pointer from the alloca AS into
2556 /// LangAS::Default. This can happen when the target uses a restricted
2557 /// address space for the stack but the source language requires
2558 /// LangAS::Default to be a generic address space. The latter condition is
2559 /// common for most programming languages; OpenCL is an exception in that
2560 /// LangAS::Default is the private address space, which naturally maps
2563 /// Because the address of a temporary is often exposed to the program in
2564 /// various ways, this function will perform the cast. The original alloca
2565 /// instruction is returned through \p Alloca if it is not nullptr.
2567 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2568 /// more efficient if the caller knows that the address will not be exposed.
2569 llvm::AllocaInst
*CreateTempAlloca(llvm::Type
*Ty
, const Twine
&Name
= "tmp",
2570 llvm::Value
*ArraySize
= nullptr);
2571 Address
CreateTempAlloca(llvm::Type
*Ty
, CharUnits align
,
2572 const Twine
&Name
= "tmp",
2573 llvm::Value
*ArraySize
= nullptr,
2574 Address
*Alloca
= nullptr);
2575 Address
CreateTempAllocaWithoutCast(llvm::Type
*Ty
, CharUnits align
,
2576 const Twine
&Name
= "tmp",
2577 llvm::Value
*ArraySize
= nullptr);
2579 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2580 /// default ABI alignment of the given LLVM type.
2582 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2583 /// any given AST type that happens to have been lowered to the
2584 /// given IR type. This should only ever be used for function-local,
2585 /// IR-driven manipulations like saving and restoring a value. Do
2586 /// not hand this address off to arbitrary IRGen routines, and especially
2587 /// do not pass it as an argument to a function that might expect a
2588 /// properly ABI-aligned value.
2589 Address
CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
2590 const Twine
&Name
= "tmp");
2592 /// CreateIRTemp - Create a temporary IR object of the given type, with
2593 /// appropriate alignment. This routine should only be used when an temporary
2594 /// value needs to be stored into an alloca (for example, to avoid explicit
2595 /// PHI construction), but the type is the IR type, not the type appropriate
2596 /// for storing in memory.
2598 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2599 /// ConvertType instead of ConvertTypeForMem.
2600 Address
CreateIRTemp(QualType T
, const Twine
&Name
= "tmp");
2602 /// CreateMemTemp - Create a temporary memory object of the given type, with
2603 /// appropriate alignmen and cast it to the default address space. Returns
2604 /// the original alloca instruction by \p Alloca if it is not nullptr.
2605 Address
CreateMemTemp(QualType T
, const Twine
&Name
= "tmp",
2606 Address
*Alloca
= nullptr);
2607 Address
CreateMemTemp(QualType T
, CharUnits Align
, const Twine
&Name
= "tmp",
2608 Address
*Alloca
= nullptr);
2610 /// CreateMemTemp - Create a temporary memory object of the given type, with
2611 /// appropriate alignmen without casting it to the default address space.
2612 Address
CreateMemTempWithoutCast(QualType T
, const Twine
&Name
= "tmp");
2613 Address
CreateMemTempWithoutCast(QualType T
, CharUnits Align
,
2614 const Twine
&Name
= "tmp");
2616 /// CreateAggTemp - Create a temporary memory object for the given
2618 AggValueSlot
CreateAggTemp(QualType T
, const Twine
&Name
= "tmp",
2619 Address
*Alloca
= nullptr) {
2620 return AggValueSlot::forAddr(CreateMemTemp(T
, Name
, Alloca
),
2622 AggValueSlot::IsNotDestructed
,
2623 AggValueSlot::DoesNotNeedGCBarriers
,
2624 AggValueSlot::IsNotAliased
,
2625 AggValueSlot::DoesNotOverlap
);
2628 /// Emit a cast to void* in the appropriate address space.
2629 llvm::Value
*EmitCastToVoidPtr(llvm::Value
*value
);
2631 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2632 /// expression and compare the result against zero, returning an Int1Ty value.
2633 llvm::Value
*EvaluateExprAsBool(const Expr
*E
);
2635 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2636 void EmitIgnoredExpr(const Expr
*E
);
2638 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2639 /// any type. The result is returned as an RValue struct. If this is an
2640 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2641 /// the result should be returned.
2643 /// \param ignoreResult True if the resulting value isn't used.
2644 RValue
EmitAnyExpr(const Expr
*E
,
2645 AggValueSlot aggSlot
= AggValueSlot::ignored(),
2646 bool ignoreResult
= false);
2648 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2649 // or the value of the expression, depending on how va_list is defined.
2650 Address
EmitVAListRef(const Expr
*E
);
2652 /// Emit a "reference" to a __builtin_ms_va_list; this is
2653 /// always the value of the expression, because a __builtin_ms_va_list is a
2654 /// pointer to a char.
2655 Address
EmitMSVAListRef(const Expr
*E
);
2657 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2658 /// always be accessible even if no aggregate location is provided.
2659 RValue
EmitAnyExprToTemp(const Expr
*E
);
2661 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2662 /// arbitrary expression into the given memory location.
2663 void EmitAnyExprToMem(const Expr
*E
, Address Location
,
2664 Qualifiers Quals
, bool IsInitializer
);
2666 void EmitAnyExprToExn(const Expr
*E
, Address Addr
);
2668 /// EmitExprAsInit - Emits the code necessary to initialize a
2669 /// location in memory with the given initializer.
2670 void EmitExprAsInit(const Expr
*init
, const ValueDecl
*D
, LValue lvalue
,
2671 bool capturedByInit
);
2673 /// hasVolatileMember - returns true if aggregate type has a volatile
2675 bool hasVolatileMember(QualType T
) {
2676 if (const RecordType
*RT
= T
->getAs
<RecordType
>()) {
2677 const RecordDecl
*RD
= cast
<RecordDecl
>(RT
->getDecl());
2678 return RD
->hasVolatileMember();
2683 /// Determine whether a return value slot may overlap some other object.
2684 AggValueSlot::Overlap_t
getOverlapForReturnValue() {
2685 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2686 // class subobjects. These cases may need to be revisited depending on the
2687 // resolution of the relevant core issue.
2688 return AggValueSlot::DoesNotOverlap
;
2691 /// Determine whether a field initialization may overlap some other object.
2692 AggValueSlot::Overlap_t
getOverlapForFieldInit(const FieldDecl
*FD
);
2694 /// Determine whether a base class initialization may overlap some other
2696 AggValueSlot::Overlap_t
getOverlapForBaseInit(const CXXRecordDecl
*RD
,
2697 const CXXRecordDecl
*BaseRD
,
2700 /// Emit an aggregate assignment.
2701 void EmitAggregateAssign(LValue Dest
, LValue Src
, QualType EltTy
) {
2702 bool IsVolatile
= hasVolatileMember(EltTy
);
2703 EmitAggregateCopy(Dest
, Src
, EltTy
, AggValueSlot::MayOverlap
, IsVolatile
);
2706 void EmitAggregateCopyCtor(LValue Dest
, LValue Src
,
2707 AggValueSlot::Overlap_t MayOverlap
) {
2708 EmitAggregateCopy(Dest
, Src
, Src
.getType(), MayOverlap
);
2711 /// EmitAggregateCopy - Emit an aggregate copy.
2713 /// \param isVolatile \c true iff either the source or the destination is
2715 /// \param MayOverlap Whether the tail padding of the destination might be
2716 /// occupied by some other object. More efficient code can often be
2717 /// generated if not.
2718 void EmitAggregateCopy(LValue Dest
, LValue Src
, QualType EltTy
,
2719 AggValueSlot::Overlap_t MayOverlap
,
2720 bool isVolatile
= false);
2722 /// GetAddrOfLocalVar - Return the address of a local variable.
2723 Address
GetAddrOfLocalVar(const VarDecl
*VD
) {
2724 auto it
= LocalDeclMap
.find(VD
);
2725 assert(it
!= LocalDeclMap
.end() &&
2726 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2730 /// Given an opaque value expression, return its LValue mapping if it exists,
2731 /// otherwise create one.
2732 LValue
getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
);
2734 /// Given an opaque value expression, return its RValue mapping if it exists,
2735 /// otherwise create one.
2736 RValue
getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
);
2738 /// Get the index of the current ArrayInitLoopExpr, if any.
2739 llvm::Value
*getArrayInitIndex() { return ArrayInitIndex
; }
2741 /// getAccessedFieldNo - Given an encoded value and a result number, return
2742 /// the input field number being accessed.
2743 static unsigned getAccessedFieldNo(unsigned Idx
, const llvm::Constant
*Elts
);
2745 llvm::BlockAddress
*GetAddrOfLabel(const LabelDecl
*L
);
2746 llvm::BasicBlock
*GetIndirectGotoBlock();
2748 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
2749 static bool IsWrappedCXXThis(const Expr
*E
);
2751 /// EmitNullInitialization - Generate code to set a value of the given type to
2752 /// null, If the type contains data member pointers, they will be initialized
2753 /// to -1 in accordance with the Itanium C++ ABI.
2754 void EmitNullInitialization(Address DestPtr
, QualType Ty
);
2756 /// Emits a call to an LLVM variable-argument intrinsic, either
2757 /// \c llvm.va_start or \c llvm.va_end.
2758 /// \param ArgValue A reference to the \c va_list as emitted by either
2759 /// \c EmitVAListRef or \c EmitMSVAListRef.
2760 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
2761 /// calls \c llvm.va_end.
2762 llvm::Value
*EmitVAStartEnd(llvm::Value
*ArgValue
, bool IsStart
);
2764 /// Generate code to get an argument from the passed in pointer
2765 /// and update it accordingly.
2766 /// \param VE The \c VAArgExpr for which to generate code.
2767 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
2768 /// either \c EmitVAListRef or \c EmitMSVAListRef.
2769 /// \returns A pointer to the argument.
2770 // FIXME: We should be able to get rid of this method and use the va_arg
2771 // instruction in LLVM instead once it works well enough.
2772 Address
EmitVAArg(VAArgExpr
*VE
, Address
&VAListAddr
);
2774 /// emitArrayLength - Compute the length of an array, even if it's a
2775 /// VLA, and drill down to the base element type.
2776 llvm::Value
*emitArrayLength(const ArrayType
*arrayType
,
2780 /// EmitVLASize - Capture all the sizes for the VLA expressions in
2781 /// the given variably-modified type and store them in the VLASizeMap.
2783 /// This function can be called with a null (unreachable) insert point.
2784 void EmitVariablyModifiedType(QualType Ty
);
2786 struct VlaSizePair
{
2787 llvm::Value
*NumElts
;
2790 VlaSizePair(llvm::Value
*NE
, QualType T
) : NumElts(NE
), Type(T
) {}
2793 /// Return the number of elements for a single dimension
2794 /// for the given array type.
2795 VlaSizePair
getVLAElements1D(const VariableArrayType
*vla
);
2796 VlaSizePair
getVLAElements1D(QualType vla
);
2798 /// Returns an LLVM value that corresponds to the size,
2799 /// in non-variably-sized elements, of a variable length array type,
2800 /// plus that largest non-variably-sized element type. Assumes that
2801 /// the type has already been emitted with EmitVariablyModifiedType.
2802 VlaSizePair
getVLASize(const VariableArrayType
*vla
);
2803 VlaSizePair
getVLASize(QualType vla
);
2805 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
2806 /// generating code for an C++ member function.
2807 llvm::Value
*LoadCXXThis() {
2808 assert(CXXThisValue
&& "no 'this' value for this function");
2809 return CXXThisValue
;
2811 Address
LoadCXXThisAddress();
2813 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
2815 // FIXME: Every place that calls LoadCXXVTT is something
2816 // that needs to be abstracted properly.
2817 llvm::Value
*LoadCXXVTT() {
2818 assert(CXXStructorImplicitParamValue
&& "no VTT value for this function");
2819 return CXXStructorImplicitParamValue
;
2822 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
2823 /// complete class to the given direct base.
2825 GetAddressOfDirectBaseInCompleteClass(Address Value
,
2826 const CXXRecordDecl
*Derived
,
2827 const CXXRecordDecl
*Base
,
2828 bool BaseIsVirtual
);
2830 static bool ShouldNullCheckClassCastValue(const CastExpr
*Cast
);
2832 /// GetAddressOfBaseClass - This function will add the necessary delta to the
2833 /// load of 'this' and returns address of the base class.
2834 Address
GetAddressOfBaseClass(Address Value
,
2835 const CXXRecordDecl
*Derived
,
2836 CastExpr::path_const_iterator PathBegin
,
2837 CastExpr::path_const_iterator PathEnd
,
2838 bool NullCheckValue
, SourceLocation Loc
);
2840 Address
GetAddressOfDerivedClass(Address Value
,
2841 const CXXRecordDecl
*Derived
,
2842 CastExpr::path_const_iterator PathBegin
,
2843 CastExpr::path_const_iterator PathEnd
,
2844 bool NullCheckValue
);
2846 /// GetVTTParameter - Return the VTT parameter that should be passed to a
2847 /// base constructor/destructor with virtual bases.
2848 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
2849 /// to ItaniumCXXABI.cpp together with all the references to VTT.
2850 llvm::Value
*GetVTTParameter(GlobalDecl GD
, bool ForVirtualBase
,
2853 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
2854 CXXCtorType CtorType
,
2855 const FunctionArgList
&Args
,
2856 SourceLocation Loc
);
2857 // It's important not to confuse this and the previous function. Delegating
2858 // constructors are the C++0x feature. The constructor delegate optimization
2859 // is used to reduce duplication in the base and complete consturctors where
2860 // they are substantially the same.
2861 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
2862 const FunctionArgList
&Args
);
2864 /// Emit a call to an inheriting constructor (that is, one that invokes a
2865 /// constructor inherited from a base class) by inlining its definition. This
2866 /// is necessary if the ABI does not support forwarding the arguments to the
2867 /// base class constructor (because they're variadic or similar).
2868 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
2869 CXXCtorType CtorType
,
2870 bool ForVirtualBase
,
2874 /// Emit a call to a constructor inherited from a base class, passing the
2875 /// current constructor's arguments along unmodified (without even making
2877 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl
*D
,
2878 bool ForVirtualBase
, Address This
,
2879 bool InheritedFromVBase
,
2880 const CXXInheritedCtorInitExpr
*E
);
2882 void EmitCXXConstructorCall(const CXXConstructorDecl
*D
, CXXCtorType Type
,
2883 bool ForVirtualBase
, bool Delegating
,
2884 AggValueSlot ThisAVS
, const CXXConstructExpr
*E
);
2886 void EmitCXXConstructorCall(const CXXConstructorDecl
*D
, CXXCtorType Type
,
2887 bool ForVirtualBase
, bool Delegating
,
2888 Address This
, CallArgList
&Args
,
2889 AggValueSlot::Overlap_t Overlap
,
2890 SourceLocation Loc
, bool NewPointerIsChecked
);
2892 /// Emit assumption load for all bases. Requires to be called only on
2893 /// most-derived class and not under construction of the object.
2894 void EmitVTableAssumptionLoads(const CXXRecordDecl
*ClassDecl
, Address This
);
2896 /// Emit assumption that vptr load == global vtable.
2897 void EmitVTableAssumptionLoad(const VPtr
&vptr
, Address This
);
2899 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl
*D
,
2900 Address This
, Address Src
,
2901 const CXXConstructExpr
*E
);
2903 void EmitCXXAggrConstructorCall(const CXXConstructorDecl
*D
,
2904 const ArrayType
*ArrayTy
,
2906 const CXXConstructExpr
*E
,
2907 bool NewPointerIsChecked
,
2908 bool ZeroInitialization
= false);
2910 void EmitCXXAggrConstructorCall(const CXXConstructorDecl
*D
,
2911 llvm::Value
*NumElements
,
2913 const CXXConstructExpr
*E
,
2914 bool NewPointerIsChecked
,
2915 bool ZeroInitialization
= false);
2917 static Destroyer destroyCXXObject
;
2919 void EmitCXXDestructorCall(const CXXDestructorDecl
*D
, CXXDtorType Type
,
2920 bool ForVirtualBase
, bool Delegating
, Address This
,
2923 void EmitNewArrayInitializer(const CXXNewExpr
*E
, QualType elementType
,
2924 llvm::Type
*ElementTy
, Address NewPtr
,
2925 llvm::Value
*NumElements
,
2926 llvm::Value
*AllocSizeWithoutCookie
);
2928 void EmitCXXTemporary(const CXXTemporary
*Temporary
, QualType TempType
,
2931 void EmitSehCppScopeBegin();
2932 void EmitSehCppScopeEnd();
2933 void EmitSehTryScopeBegin();
2934 void EmitSehTryScopeEnd();
2936 llvm::Value
*EmitLifetimeStart(llvm::TypeSize Size
, llvm::Value
*Addr
);
2937 void EmitLifetimeEnd(llvm::Value
*Size
, llvm::Value
*Addr
);
2939 llvm::Value
*EmitCXXNewExpr(const CXXNewExpr
*E
);
2940 void EmitCXXDeleteExpr(const CXXDeleteExpr
*E
);
2942 void EmitDeleteCall(const FunctionDecl
*DeleteFD
, llvm::Value
*Ptr
,
2943 QualType DeleteTy
, llvm::Value
*NumElements
= nullptr,
2944 CharUnits CookieSize
= CharUnits());
2946 RValue
EmitBuiltinNewDeleteCall(const FunctionProtoType
*Type
,
2947 const CallExpr
*TheCallExpr
, bool IsDelete
);
2949 llvm::Value
*EmitCXXTypeidExpr(const CXXTypeidExpr
*E
);
2950 llvm::Value
*EmitDynamicCast(Address V
, const CXXDynamicCastExpr
*DCE
);
2951 Address
EmitCXXUuidofExpr(const CXXUuidofExpr
*E
);
2953 /// Situations in which we might emit a check for the suitability of a
2954 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
2956 enum TypeCheckKind
{
2957 /// Checking the operand of a load. Must be suitably sized and aligned.
2959 /// Checking the destination of a store. Must be suitably sized and aligned.
2961 /// Checking the bound value in a reference binding. Must be suitably sized
2962 /// and aligned, but is not required to refer to an object (until the
2963 /// reference is used), per core issue 453.
2964 TCK_ReferenceBinding
,
2965 /// Checking the object expression in a non-static data member access. Must
2966 /// be an object within its lifetime.
2968 /// Checking the 'this' pointer for a call to a non-static member function.
2969 /// Must be an object within its lifetime.
2971 /// Checking the 'this' pointer for a constructor call.
2972 TCK_ConstructorCall
,
2973 /// Checking the operand of a static_cast to a derived pointer type. Must be
2974 /// null or an object within its lifetime.
2975 TCK_DowncastPointer
,
2976 /// Checking the operand of a static_cast to a derived reference type. Must
2977 /// be an object within its lifetime.
2978 TCK_DowncastReference
,
2979 /// Checking the operand of a cast to a base object. Must be suitably sized
2982 /// Checking the operand of a cast to a virtual base object. Must be an
2983 /// object within its lifetime.
2984 TCK_UpcastToVirtualBase
,
2985 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
2987 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
2988 /// null or an object within its lifetime.
2989 TCK_DynamicOperation
2992 /// Determine whether the pointer type check \p TCK permits null pointers.
2993 static bool isNullPointerAllowed(TypeCheckKind TCK
);
2995 /// Determine whether the pointer type check \p TCK requires a vptr check.
2996 static bool isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
);
2998 /// Whether any type-checking sanitizers are enabled. If \c false,
2999 /// calls to EmitTypeCheck can be skipped.
3000 bool sanitizePerformTypeCheck() const;
3002 /// Emit a check that \p V is the address of storage of the
3003 /// appropriate size and alignment for an object of type \p Type
3004 /// (or if ArraySize is provided, for an array of that bound).
3005 void EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
, llvm::Value
*V
,
3006 QualType Type
, CharUnits Alignment
= CharUnits::Zero(),
3007 SanitizerSet SkippedChecks
= SanitizerSet(),
3008 llvm::Value
*ArraySize
= nullptr);
3010 /// Emit a check that \p Base points into an array object, which
3011 /// we can access at index \p Index. \p Accessed should be \c false if we
3012 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3013 void EmitBoundsCheck(const Expr
*E
, const Expr
*Base
, llvm::Value
*Index
,
3014 QualType IndexType
, bool Accessed
);
3016 llvm::Value
*EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
3017 bool isInc
, bool isPre
);
3018 ComplexPairTy
EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
3019 bool isInc
, bool isPre
);
3021 /// Converts Location to a DebugLoc, if debug information is enabled.
3022 llvm::DebugLoc
SourceLocToDebugLoc(SourceLocation Location
);
3024 /// Get the record field index as represented in debug info.
3025 unsigned getDebugInfoFIndex(const RecordDecl
*Rec
, unsigned FieldIndex
);
3028 //===--------------------------------------------------------------------===//
3029 // Declaration Emission
3030 //===--------------------------------------------------------------------===//
3032 /// EmitDecl - Emit a declaration.
3034 /// This function can be called with a null (unreachable) insert point.
3035 void EmitDecl(const Decl
&D
);
3037 /// EmitVarDecl - Emit a local variable declaration.
3039 /// This function can be called with a null (unreachable) insert point.
3040 void EmitVarDecl(const VarDecl
&D
);
3042 void EmitScalarInit(const Expr
*init
, const ValueDecl
*D
, LValue lvalue
,
3043 bool capturedByInit
);
3045 typedef void SpecialInitFn(CodeGenFunction
&Init
, const VarDecl
&D
,
3046 llvm::Value
*Address
);
3048 /// Determine whether the given initializer is trivial in the sense
3049 /// that it requires no code to be generated.
3050 bool isTrivialInitializer(const Expr
*Init
);
3052 /// EmitAutoVarDecl - Emit an auto variable declaration.
3054 /// This function can be called with a null (unreachable) insert point.
3055 void EmitAutoVarDecl(const VarDecl
&D
);
3057 class AutoVarEmission
{
3058 friend class CodeGenFunction
;
3060 const VarDecl
*Variable
;
3062 /// The address of the alloca for languages with explicit address space
3063 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3064 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3065 /// as a global constant.
3068 llvm::Value
*NRVOFlag
;
3070 /// True if the variable is a __block variable that is captured by an
3072 bool IsEscapingByRef
;
3074 /// True if the variable is of aggregate type and has a constant
3076 bool IsConstantAggregate
;
3078 /// Non-null if we should use lifetime annotations.
3079 llvm::Value
*SizeForLifetimeMarkers
;
3081 /// Address with original alloca instruction. Invalid if the variable was
3082 /// emitted as a global constant.
3086 AutoVarEmission(Invalid
)
3087 : Variable(nullptr), Addr(Address::invalid()),
3088 AllocaAddr(Address::invalid()) {}
3090 AutoVarEmission(const VarDecl
&variable
)
3091 : Variable(&variable
), Addr(Address::invalid()), NRVOFlag(nullptr),
3092 IsEscapingByRef(false), IsConstantAggregate(false),
3093 SizeForLifetimeMarkers(nullptr), AllocaAddr(Address::invalid()) {}
3095 bool wasEmittedAsGlobal() const { return !Addr
.isValid(); }
3098 static AutoVarEmission
invalid() { return AutoVarEmission(Invalid()); }
3100 bool useLifetimeMarkers() const {
3101 return SizeForLifetimeMarkers
!= nullptr;
3103 llvm::Value
*getSizeForLifetimeMarkers() const {
3104 assert(useLifetimeMarkers());
3105 return SizeForLifetimeMarkers
;
3108 /// Returns the raw, allocated address, which is not necessarily
3109 /// the address of the object itself. It is casted to default
3110 /// address space for address space agnostic languages.
3111 Address
getAllocatedAddress() const {
3115 /// Returns the address for the original alloca instruction.
3116 Address
getOriginalAllocatedAddress() const { return AllocaAddr
; }
3118 /// Returns the address of the object within this declaration.
3119 /// Note that this does not chase the forwarding pointer for
3121 Address
getObjectAddress(CodeGenFunction
&CGF
) const {
3122 if (!IsEscapingByRef
) return Addr
;
3124 return CGF
.emitBlockByrefAddress(Addr
, Variable
, /*forward*/ false);
3127 AutoVarEmission
EmitAutoVarAlloca(const VarDecl
&var
);
3128 void EmitAutoVarInit(const AutoVarEmission
&emission
);
3129 void EmitAutoVarCleanups(const AutoVarEmission
&emission
);
3130 void emitAutoVarTypeCleanup(const AutoVarEmission
&emission
,
3131 QualType::DestructionKind dtorKind
);
3133 /// Emits the alloca and debug information for the size expressions for each
3134 /// dimension of an array. It registers the association of its (1-dimensional)
3135 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3136 /// reference this node when creating the DISubrange object to describe the
3138 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo
*DI
,
3140 bool EmitDebugInfo
);
3142 void EmitStaticVarDecl(const VarDecl
&D
,
3143 llvm::GlobalValue::LinkageTypes Linkage
);
3147 llvm::Type
*ElementType
;
3149 ParamValue(llvm::Value
*V
, llvm::Type
*T
, unsigned A
)
3150 : Value(V
), ElementType(T
), Alignment(A
) {}
3152 static ParamValue
forDirect(llvm::Value
*value
) {
3153 return ParamValue(value
, nullptr, 0);
3155 static ParamValue
forIndirect(Address addr
) {
3156 assert(!addr
.getAlignment().isZero());
3157 return ParamValue(addr
.getPointer(), addr
.getElementType(),
3158 addr
.getAlignment().getQuantity());
3161 bool isIndirect() const { return Alignment
!= 0; }
3162 llvm::Value
*getAnyValue() const { return Value
; }
3164 llvm::Value
*getDirectValue() const {
3165 assert(!isIndirect());
3169 Address
getIndirectAddress() const {
3170 assert(isIndirect());
3171 return Address(Value
, ElementType
, CharUnits::fromQuantity(Alignment
),
3176 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3177 void EmitParmDecl(const VarDecl
&D
, ParamValue Arg
, unsigned ArgNo
);
3179 /// protectFromPeepholes - Protect a value that we're intending to
3180 /// store to the side, but which will probably be used later, from
3181 /// aggressive peepholing optimizations that might delete it.
3183 /// Pass the result to unprotectFromPeepholes to declare that
3184 /// protection is no longer required.
3186 /// There's no particular reason why this shouldn't apply to
3187 /// l-values, it's just that no existing peepholes work on pointers.
3188 PeepholeProtection
protectFromPeepholes(RValue rvalue
);
3189 void unprotectFromPeepholes(PeepholeProtection protection
);
3191 void emitAlignmentAssumptionCheck(llvm::Value
*Ptr
, QualType Ty
,
3193 SourceLocation AssumptionLoc
,
3194 llvm::Value
*Alignment
,
3195 llvm::Value
*OffsetValue
,
3196 llvm::Value
*TheCheck
,
3197 llvm::Instruction
*Assumption
);
3199 void emitAlignmentAssumption(llvm::Value
*PtrValue
, QualType Ty
,
3200 SourceLocation Loc
, SourceLocation AssumptionLoc
,
3201 llvm::Value
*Alignment
,
3202 llvm::Value
*OffsetValue
= nullptr);
3204 void emitAlignmentAssumption(llvm::Value
*PtrValue
, const Expr
*E
,
3205 SourceLocation AssumptionLoc
,
3206 llvm::Value
*Alignment
,
3207 llvm::Value
*OffsetValue
= nullptr);
3209 //===--------------------------------------------------------------------===//
3210 // Statement Emission
3211 //===--------------------------------------------------------------------===//
3213 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3214 void EmitStopPoint(const Stmt
*S
);
3216 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3217 /// this function even if there is no current insertion point.
3219 /// This function may clear the current insertion point; callers should use
3220 /// EnsureInsertPoint if they wish to subsequently generate code without first
3221 /// calling EmitBlock, EmitBranch, or EmitStmt.
3222 void EmitStmt(const Stmt
*S
, ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3224 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3225 /// necessarily require an insertion point or debug information; typically
3226 /// because the statement amounts to a jump or a container of other
3229 /// \return True if the statement was handled.
3230 bool EmitSimpleStmt(const Stmt
*S
, ArrayRef
<const Attr
*> Attrs
);
3232 Address
EmitCompoundStmt(const CompoundStmt
&S
, bool GetLast
= false,
3233 AggValueSlot AVS
= AggValueSlot::ignored());
3234 Address
EmitCompoundStmtWithoutScope(const CompoundStmt
&S
,
3235 bool GetLast
= false,
3237 AggValueSlot::ignored());
3239 /// EmitLabel - Emit the block for the given label. It is legal to call this
3240 /// function even if there is no current insertion point.
3241 void EmitLabel(const LabelDecl
*D
); // helper for EmitLabelStmt.
3243 void EmitLabelStmt(const LabelStmt
&S
);
3244 void EmitAttributedStmt(const AttributedStmt
&S
);
3245 void EmitGotoStmt(const GotoStmt
&S
);
3246 void EmitIndirectGotoStmt(const IndirectGotoStmt
&S
);
3247 void EmitIfStmt(const IfStmt
&S
);
3249 void EmitWhileStmt(const WhileStmt
&S
,
3250 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3251 void EmitDoStmt(const DoStmt
&S
, ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3252 void EmitForStmt(const ForStmt
&S
,
3253 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3254 void EmitReturnStmt(const ReturnStmt
&S
);
3255 void EmitDeclStmt(const DeclStmt
&S
);
3256 void EmitBreakStmt(const BreakStmt
&S
);
3257 void EmitContinueStmt(const ContinueStmt
&S
);
3258 void EmitSwitchStmt(const SwitchStmt
&S
);
3259 void EmitDefaultStmt(const DefaultStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3260 void EmitCaseStmt(const CaseStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3261 void EmitCaseStmtRange(const CaseStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3262 void EmitAsmStmt(const AsmStmt
&S
);
3264 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt
&S
);
3265 void EmitObjCAtTryStmt(const ObjCAtTryStmt
&S
);
3266 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt
&S
);
3267 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt
&S
);
3268 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt
&S
);
3270 void EmitCoroutineBody(const CoroutineBodyStmt
&S
);
3271 void EmitCoreturnStmt(const CoreturnStmt
&S
);
3272 RValue
EmitCoawaitExpr(const CoawaitExpr
&E
,
3273 AggValueSlot aggSlot
= AggValueSlot::ignored(),
3274 bool ignoreResult
= false);
3275 LValue
EmitCoawaitLValue(const CoawaitExpr
*E
);
3276 RValue
EmitCoyieldExpr(const CoyieldExpr
&E
,
3277 AggValueSlot aggSlot
= AggValueSlot::ignored(),
3278 bool ignoreResult
= false);
3279 LValue
EmitCoyieldLValue(const CoyieldExpr
*E
);
3280 RValue
EmitCoroutineIntrinsic(const CallExpr
*E
, unsigned int IID
);
3282 void EnterCXXTryStmt(const CXXTryStmt
&S
, bool IsFnTryBlock
= false);
3283 void ExitCXXTryStmt(const CXXTryStmt
&S
, bool IsFnTryBlock
= false);
3285 void EmitCXXTryStmt(const CXXTryStmt
&S
);
3286 void EmitSEHTryStmt(const SEHTryStmt
&S
);
3287 void EmitSEHLeaveStmt(const SEHLeaveStmt
&S
);
3288 void EnterSEHTryStmt(const SEHTryStmt
&S
);
3289 void ExitSEHTryStmt(const SEHTryStmt
&S
);
3290 void VolatilizeTryBlocks(llvm::BasicBlock
*BB
,
3291 llvm::SmallPtrSet
<llvm::BasicBlock
*, 10> &V
);
3293 void pushSEHCleanup(CleanupKind kind
,
3294 llvm::Function
*FinallyFunc
);
3295 void startOutlinedSEHHelper(CodeGenFunction
&ParentCGF
, bool IsFilter
,
3296 const Stmt
*OutlinedStmt
);
3298 llvm::Function
*GenerateSEHFilterFunction(CodeGenFunction
&ParentCGF
,
3299 const SEHExceptStmt
&Except
);
3301 llvm::Function
*GenerateSEHFinallyFunction(CodeGenFunction
&ParentCGF
,
3302 const SEHFinallyStmt
&Finally
);
3304 void EmitSEHExceptionCodeSave(CodeGenFunction
&ParentCGF
,
3305 llvm::Value
*ParentFP
,
3306 llvm::Value
*EntryEBP
);
3307 llvm::Value
*EmitSEHExceptionCode();
3308 llvm::Value
*EmitSEHExceptionInfo();
3309 llvm::Value
*EmitSEHAbnormalTermination();
3311 /// Emit simple code for OpenMP directives in Simd-only mode.
3312 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective
&D
);
3314 /// Scan the outlined statement for captures from the parent function. For
3315 /// each capture, mark the capture as escaped and emit a call to
3316 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3317 void EmitCapturedLocals(CodeGenFunction
&ParentCGF
, const Stmt
*OutlinedStmt
,
3320 /// Recovers the address of a local in a parent function. ParentVar is the
3321 /// address of the variable used in the immediate parent function. It can
3322 /// either be an alloca or a call to llvm.localrecover if there are nested
3323 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3325 Address
recoverAddrOfEscapedLocal(CodeGenFunction
&ParentCGF
,
3327 llvm::Value
*ParentFP
);
3329 void EmitCXXForRangeStmt(const CXXForRangeStmt
&S
,
3330 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3332 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3333 class OMPCancelStackRAII
{
3334 CodeGenFunction
&CGF
;
3337 OMPCancelStackRAII(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
,
3340 CGF
.OMPCancelStack
.enter(CGF
, Kind
, HasCancel
);
3342 ~OMPCancelStackRAII() { CGF
.OMPCancelStack
.exit(CGF
); }
3345 /// Returns calculated size of the specified type.
3346 llvm::Value
*getTypeSize(QualType Ty
);
3347 LValue
InitCapturedStruct(const CapturedStmt
&S
);
3348 llvm::Function
*EmitCapturedStmt(const CapturedStmt
&S
, CapturedRegionKind K
);
3349 llvm::Function
*GenerateCapturedStmtFunction(const CapturedStmt
&S
);
3350 Address
GenerateCapturedStmtArgument(const CapturedStmt
&S
);
3351 llvm::Function
*GenerateOpenMPCapturedStmtFunction(const CapturedStmt
&S
,
3352 SourceLocation Loc
);
3353 void GenerateOpenMPCapturedVars(const CapturedStmt
&S
,
3354 SmallVectorImpl
<llvm::Value
*> &CapturedVars
);
3355 void emitOMPSimpleStore(LValue LVal
, RValue RVal
, QualType RValTy
,
3356 SourceLocation Loc
);
3357 /// Perform element by element copying of arrays with type \a
3358 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3359 /// generated by \a CopyGen.
3361 /// \param DestAddr Address of the destination array.
3362 /// \param SrcAddr Address of the source array.
3363 /// \param OriginalType Type of destination and source arrays.
3364 /// \param CopyGen Copying procedure that copies value of single array element
3365 /// to another single array element.
3366 void EmitOMPAggregateAssign(
3367 Address DestAddr
, Address SrcAddr
, QualType OriginalType
,
3368 const llvm::function_ref
<void(Address
, Address
)> CopyGen
);
3369 /// Emit proper copying of data from one variable to another.
3371 /// \param OriginalType Original type of the copied variables.
3372 /// \param DestAddr Destination address.
3373 /// \param SrcAddr Source address.
3374 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3375 /// type of the base array element).
3376 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3377 /// the base array element).
3378 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3380 void EmitOMPCopy(QualType OriginalType
,
3381 Address DestAddr
, Address SrcAddr
,
3382 const VarDecl
*DestVD
, const VarDecl
*SrcVD
,
3384 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3385 /// \a X = \a E \a BO \a E.
3387 /// \param X Value to be updated.
3388 /// \param E Update value.
3389 /// \param BO Binary operation for update operation.
3390 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3391 /// expression, false otherwise.
3392 /// \param AO Atomic ordering of the generated atomic instructions.
3393 /// \param CommonGen Code generator for complex expressions that cannot be
3394 /// expressed through atomicrmw instruction.
3395 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3396 /// generated, <false, RValue::get(nullptr)> otherwise.
3397 std::pair
<bool, RValue
> EmitOMPAtomicSimpleUpdateExpr(
3398 LValue X
, RValue E
, BinaryOperatorKind BO
, bool IsXLHSInRHSPart
,
3399 llvm::AtomicOrdering AO
, SourceLocation Loc
,
3400 const llvm::function_ref
<RValue(RValue
)> CommonGen
);
3401 bool EmitOMPFirstprivateClause(const OMPExecutableDirective
&D
,
3402 OMPPrivateScope
&PrivateScope
);
3403 void EmitOMPPrivateClause(const OMPExecutableDirective
&D
,
3404 OMPPrivateScope
&PrivateScope
);
3405 void EmitOMPUseDevicePtrClause(
3406 const OMPUseDevicePtrClause
&C
, OMPPrivateScope
&PrivateScope
,
3407 const llvm::DenseMap
<const ValueDecl
*, Address
> &CaptureDeviceAddrMap
);
3408 void EmitOMPUseDeviceAddrClause(
3409 const OMPUseDeviceAddrClause
&C
, OMPPrivateScope
&PrivateScope
,
3410 const llvm::DenseMap
<const ValueDecl
*, Address
> &CaptureDeviceAddrMap
);
3411 /// Emit code for copyin clause in \a D directive. The next code is
3412 /// generated at the start of outlined functions for directives:
3414 /// threadprivate_var1 = master_threadprivate_var1;
3415 /// operator=(threadprivate_var2, master_threadprivate_var2);
3417 /// __kmpc_barrier(&loc, global_tid);
3420 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3421 /// \returns true if at least one copyin variable is found, false otherwise.
3422 bool EmitOMPCopyinClause(const OMPExecutableDirective
&D
);
3423 /// Emit initial code for lastprivate variables. If some variable is
3424 /// not also firstprivate, then the default initialization is used. Otherwise
3425 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3428 /// \param D Directive that may have 'lastprivate' directives.
3429 /// \param PrivateScope Private scope for capturing lastprivate variables for
3430 /// proper codegen in internal captured statement.
3432 /// \returns true if there is at least one lastprivate variable, false
3434 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective
&D
,
3435 OMPPrivateScope
&PrivateScope
);
3436 /// Emit final copying of lastprivate values to original variables at
3437 /// the end of the worksharing or simd directive.
3439 /// \param D Directive that has at least one 'lastprivate' directives.
3440 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3441 /// it is the last iteration of the loop code in associated directive, or to
3442 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3443 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective
&D
,
3445 llvm::Value
*IsLastIterCond
= nullptr);
3446 /// Emit initial code for linear clauses.
3447 void EmitOMPLinearClause(const OMPLoopDirective
&D
,
3448 CodeGenFunction::OMPPrivateScope
&PrivateScope
);
3449 /// Emit final code for linear clauses.
3450 /// \param CondGen Optional conditional code for final part of codegen for
3452 void EmitOMPLinearClauseFinal(
3453 const OMPLoopDirective
&D
,
3454 const llvm::function_ref
<llvm::Value
*(CodeGenFunction
&)> CondGen
);
3455 /// Emit initial code for reduction variables. Creates reduction copies
3456 /// and initializes them with the values according to OpenMP standard.
3458 /// \param D Directive (possibly) with the 'reduction' clause.
3459 /// \param PrivateScope Private scope for capturing reduction variables for
3460 /// proper codegen in internal captured statement.
3462 void EmitOMPReductionClauseInit(const OMPExecutableDirective
&D
,
3463 OMPPrivateScope
&PrivateScope
,
3464 bool ForInscan
= false);
3465 /// Emit final update of reduction values to original variables at
3466 /// the end of the directive.
3468 /// \param D Directive that has at least one 'reduction' directives.
3469 /// \param ReductionKind The kind of reduction to perform.
3470 void EmitOMPReductionClauseFinal(const OMPExecutableDirective
&D
,
3471 const OpenMPDirectiveKind ReductionKind
);
3472 /// Emit initial code for linear variables. Creates private copies
3473 /// and initializes them with the values according to OpenMP standard.
3475 /// \param D Directive (possibly) with the 'linear' clause.
3476 /// \return true if at least one linear variable is found that should be
3477 /// initialized with the value of the original variable, false otherwise.
3478 bool EmitOMPLinearClauseInit(const OMPLoopDirective
&D
);
3480 typedef const llvm::function_ref
<void(CodeGenFunction
& /*CGF*/,
3481 llvm::Function
* /*OutlinedFn*/,
3482 const OMPTaskDataTy
& /*Data*/)>
3484 void EmitOMPTaskBasedDirective(const OMPExecutableDirective
&S
,
3485 const OpenMPDirectiveKind CapturedRegion
,
3486 const RegionCodeGenTy
&BodyGen
,
3487 const TaskGenTy
&TaskGen
, OMPTaskDataTy
&Data
);
3488 struct OMPTargetDataInfo
{
3489 Address BasePointersArray
= Address::invalid();
3490 Address PointersArray
= Address::invalid();
3491 Address SizesArray
= Address::invalid();
3492 Address MappersArray
= Address::invalid();
3493 unsigned NumberOfTargetItems
= 0;
3494 explicit OMPTargetDataInfo() = default;
3495 OMPTargetDataInfo(Address BasePointersArray
, Address PointersArray
,
3496 Address SizesArray
, Address MappersArray
,
3497 unsigned NumberOfTargetItems
)
3498 : BasePointersArray(BasePointersArray
), PointersArray(PointersArray
),
3499 SizesArray(SizesArray
), MappersArray(MappersArray
),
3500 NumberOfTargetItems(NumberOfTargetItems
) {}
3502 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective
&S
,
3503 const RegionCodeGenTy
&BodyGen
,
3504 OMPTargetDataInfo
&InputInfo
);
3505 void processInReduction(const OMPExecutableDirective
&S
,
3506 OMPTaskDataTy
&Data
,
3507 CodeGenFunction
&CGF
,
3508 const CapturedStmt
*CS
,
3509 OMPPrivateScope
&Scope
);
3510 void EmitOMPMetaDirective(const OMPMetaDirective
&S
);
3511 void EmitOMPParallelDirective(const OMPParallelDirective
&S
);
3512 void EmitOMPSimdDirective(const OMPSimdDirective
&S
);
3513 void EmitOMPTileDirective(const OMPTileDirective
&S
);
3514 void EmitOMPUnrollDirective(const OMPUnrollDirective
&S
);
3515 void EmitOMPForDirective(const OMPForDirective
&S
);
3516 void EmitOMPForSimdDirective(const OMPForSimdDirective
&S
);
3517 void EmitOMPSectionsDirective(const OMPSectionsDirective
&S
);
3518 void EmitOMPSectionDirective(const OMPSectionDirective
&S
);
3519 void EmitOMPSingleDirective(const OMPSingleDirective
&S
);
3520 void EmitOMPMasterDirective(const OMPMasterDirective
&S
);
3521 void EmitOMPMaskedDirective(const OMPMaskedDirective
&S
);
3522 void EmitOMPCriticalDirective(const OMPCriticalDirective
&S
);
3523 void EmitOMPParallelForDirective(const OMPParallelForDirective
&S
);
3524 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective
&S
);
3525 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective
&S
);
3526 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective
&S
);
3527 void EmitOMPTaskDirective(const OMPTaskDirective
&S
);
3528 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective
&S
);
3529 void EmitOMPErrorDirective(const OMPErrorDirective
&S
);
3530 void EmitOMPBarrierDirective(const OMPBarrierDirective
&S
);
3531 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective
&S
);
3532 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective
&S
);
3533 void EmitOMPFlushDirective(const OMPFlushDirective
&S
);
3534 void EmitOMPDepobjDirective(const OMPDepobjDirective
&S
);
3535 void EmitOMPScanDirective(const OMPScanDirective
&S
);
3536 void EmitOMPOrderedDirective(const OMPOrderedDirective
&S
);
3537 void EmitOMPAtomicDirective(const OMPAtomicDirective
&S
);
3538 void EmitOMPTargetDirective(const OMPTargetDirective
&S
);
3539 void EmitOMPTargetDataDirective(const OMPTargetDataDirective
&S
);
3540 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective
&S
);
3541 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective
&S
);
3542 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective
&S
);
3543 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective
&S
);
3545 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective
&S
);
3546 void EmitOMPTeamsDirective(const OMPTeamsDirective
&S
);
3548 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective
&S
);
3549 void EmitOMPCancelDirective(const OMPCancelDirective
&S
);
3550 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective
&S
);
3551 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective
&S
);
3552 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective
&S
);
3553 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective
&S
);
3555 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective
&S
);
3556 void EmitOMPParallelMasterTaskLoopDirective(
3557 const OMPParallelMasterTaskLoopDirective
&S
);
3558 void EmitOMPParallelMasterTaskLoopSimdDirective(
3559 const OMPParallelMasterTaskLoopSimdDirective
&S
);
3560 void EmitOMPDistributeDirective(const OMPDistributeDirective
&S
);
3561 void EmitOMPDistributeParallelForDirective(
3562 const OMPDistributeParallelForDirective
&S
);
3563 void EmitOMPDistributeParallelForSimdDirective(
3564 const OMPDistributeParallelForSimdDirective
&S
);
3565 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective
&S
);
3566 void EmitOMPTargetParallelForSimdDirective(
3567 const OMPTargetParallelForSimdDirective
&S
);
3568 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective
&S
);
3569 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective
&S
);
3571 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective
&S
);
3572 void EmitOMPTeamsDistributeParallelForSimdDirective(
3573 const OMPTeamsDistributeParallelForSimdDirective
&S
);
3574 void EmitOMPTeamsDistributeParallelForDirective(
3575 const OMPTeamsDistributeParallelForDirective
&S
);
3576 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective
&S
);
3577 void EmitOMPTargetTeamsDistributeDirective(
3578 const OMPTargetTeamsDistributeDirective
&S
);
3579 void EmitOMPTargetTeamsDistributeParallelForDirective(
3580 const OMPTargetTeamsDistributeParallelForDirective
&S
);
3581 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3582 const OMPTargetTeamsDistributeParallelForSimdDirective
&S
);
3583 void EmitOMPTargetTeamsDistributeSimdDirective(
3584 const OMPTargetTeamsDistributeSimdDirective
&S
);
3585 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective
&S
);
3586 void EmitOMPInteropDirective(const OMPInteropDirective
&S
);
3587 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective
&S
);
3589 /// Emit device code for the target directive.
3590 static void EmitOMPTargetDeviceFunction(CodeGenModule
&CGM
,
3591 StringRef ParentName
,
3592 const OMPTargetDirective
&S
);
3594 EmitOMPTargetParallelDeviceFunction(CodeGenModule
&CGM
, StringRef ParentName
,
3595 const OMPTargetParallelDirective
&S
);
3596 /// Emit device code for the target parallel for directive.
3597 static void EmitOMPTargetParallelForDeviceFunction(
3598 CodeGenModule
&CGM
, StringRef ParentName
,
3599 const OMPTargetParallelForDirective
&S
);
3600 /// Emit device code for the target parallel for simd directive.
3601 static void EmitOMPTargetParallelForSimdDeviceFunction(
3602 CodeGenModule
&CGM
, StringRef ParentName
,
3603 const OMPTargetParallelForSimdDirective
&S
);
3604 /// Emit device code for the target teams directive.
3606 EmitOMPTargetTeamsDeviceFunction(CodeGenModule
&CGM
, StringRef ParentName
,
3607 const OMPTargetTeamsDirective
&S
);
3608 /// Emit device code for the target teams distribute directive.
3609 static void EmitOMPTargetTeamsDistributeDeviceFunction(
3610 CodeGenModule
&CGM
, StringRef ParentName
,
3611 const OMPTargetTeamsDistributeDirective
&S
);
3612 /// Emit device code for the target teams distribute simd directive.
3613 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3614 CodeGenModule
&CGM
, StringRef ParentName
,
3615 const OMPTargetTeamsDistributeSimdDirective
&S
);
3616 /// Emit device code for the target simd directive.
3617 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule
&CGM
,
3618 StringRef ParentName
,
3619 const OMPTargetSimdDirective
&S
);
3620 /// Emit device code for the target teams distribute parallel for simd
3622 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3623 CodeGenModule
&CGM
, StringRef ParentName
,
3624 const OMPTargetTeamsDistributeParallelForSimdDirective
&S
);
3626 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3627 CodeGenModule
&CGM
, StringRef ParentName
,
3628 const OMPTargetTeamsDistributeParallelForDirective
&S
);
3630 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3631 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3632 /// future it is meant to be the number of loops expected in the loop nests
3633 /// (usually specified by the "collapse" clause) that are collapsed to a
3634 /// single loop by this function.
3635 llvm::CanonicalLoopInfo
*EmitOMPCollapsedCanonicalLoopNest(const Stmt
*S
,
3638 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3639 void EmitOMPCanonicalLoop(const OMPCanonicalLoop
*S
);
3641 /// Emit inner loop of the worksharing/simd construct.
3643 /// \param S Directive, for which the inner loop must be emitted.
3644 /// \param RequiresCleanup true, if directive has some associated private
3646 /// \param LoopCond Bollean condition for loop continuation.
3647 /// \param IncExpr Increment expression for loop control variable.
3648 /// \param BodyGen Generator for the inner body of the inner loop.
3649 /// \param PostIncGen Genrator for post-increment code (required for ordered
3650 /// loop directvies).
3651 void EmitOMPInnerLoop(
3652 const OMPExecutableDirective
&S
, bool RequiresCleanup
,
3653 const Expr
*LoopCond
, const Expr
*IncExpr
,
3654 const llvm::function_ref
<void(CodeGenFunction
&)> BodyGen
,
3655 const llvm::function_ref
<void(CodeGenFunction
&)> PostIncGen
);
3657 JumpDest
getOMPCancelDestination(OpenMPDirectiveKind Kind
);
3658 /// Emit initial code for loop counters of loop-based directives.
3659 void EmitOMPPrivateLoopCounters(const OMPLoopDirective
&S
,
3660 OMPPrivateScope
&LoopScope
);
3662 /// Helper for the OpenMP loop directives.
3663 void EmitOMPLoopBody(const OMPLoopDirective
&D
, JumpDest LoopExit
);
3665 /// Emit code for the worksharing loop-based directive.
3666 /// \return true, if this construct has any lastprivate clause, false -
3668 bool EmitOMPWorksharingLoop(const OMPLoopDirective
&S
, Expr
*EUB
,
3669 const CodeGenLoopBoundsTy
&CodeGenLoopBounds
,
3670 const CodeGenDispatchBoundsTy
&CGDispatchBounds
);
3672 /// Emit code for the distribute loop-based directive.
3673 void EmitOMPDistributeLoop(const OMPLoopDirective
&S
,
3674 const CodeGenLoopTy
&CodeGenLoop
, Expr
*IncExpr
);
3676 /// Helpers for the OpenMP loop directives.
3677 void EmitOMPSimdInit(const OMPLoopDirective
&D
);
3678 void EmitOMPSimdFinal(
3679 const OMPLoopDirective
&D
,
3680 const llvm::function_ref
<llvm::Value
*(CodeGenFunction
&)> CondGen
);
3682 /// Emits the lvalue for the expression with possibly captured variable.
3683 LValue
EmitOMPSharedLValue(const Expr
*E
);
3686 /// Helpers for blocks.
3687 llvm::Value
*EmitBlockLiteral(const CGBlockInfo
&Info
);
3689 /// struct with the values to be passed to the OpenMP loop-related functions
3690 struct OMPLoopArguments
{
3691 /// loop lower bound
3692 Address LB
= Address::invalid();
3693 /// loop upper bound
3694 Address UB
= Address::invalid();
3696 Address ST
= Address::invalid();
3697 /// isLastIteration argument for runtime functions
3698 Address IL
= Address::invalid();
3699 /// Chunk value generated by sema
3700 llvm::Value
*Chunk
= nullptr;
3701 /// EnsureUpperBound
3702 Expr
*EUB
= nullptr;
3703 /// IncrementExpression
3704 Expr
*IncExpr
= nullptr;
3705 /// Loop initialization
3706 Expr
*Init
= nullptr;
3707 /// Loop exit condition
3708 Expr
*Cond
= nullptr;
3709 /// Update of LB after a whole chunk has been executed
3710 Expr
*NextLB
= nullptr;
3711 /// Update of UB after a whole chunk has been executed
3712 Expr
*NextUB
= nullptr;
3713 OMPLoopArguments() = default;
3714 OMPLoopArguments(Address LB
, Address UB
, Address ST
, Address IL
,
3715 llvm::Value
*Chunk
= nullptr, Expr
*EUB
= nullptr,
3716 Expr
*IncExpr
= nullptr, Expr
*Init
= nullptr,
3717 Expr
*Cond
= nullptr, Expr
*NextLB
= nullptr,
3718 Expr
*NextUB
= nullptr)
3719 : LB(LB
), UB(UB
), ST(ST
), IL(IL
), Chunk(Chunk
), EUB(EUB
),
3720 IncExpr(IncExpr
), Init(Init
), Cond(Cond
), NextLB(NextLB
),
3723 void EmitOMPOuterLoop(bool DynamicOrOrdered
, bool IsMonotonic
,
3724 const OMPLoopDirective
&S
, OMPPrivateScope
&LoopScope
,
3725 const OMPLoopArguments
&LoopArgs
,
3726 const CodeGenLoopTy
&CodeGenLoop
,
3727 const CodeGenOrderedTy
&CodeGenOrdered
);
3728 void EmitOMPForOuterLoop(const OpenMPScheduleTy
&ScheduleKind
,
3729 bool IsMonotonic
, const OMPLoopDirective
&S
,
3730 OMPPrivateScope
&LoopScope
, bool Ordered
,
3731 const OMPLoopArguments
&LoopArgs
,
3732 const CodeGenDispatchBoundsTy
&CGDispatchBounds
);
3733 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind
,
3734 const OMPLoopDirective
&S
,
3735 OMPPrivateScope
&LoopScope
,
3736 const OMPLoopArguments
&LoopArgs
,
3737 const CodeGenLoopTy
&CodeGenLoopContent
);
3738 /// Emit code for sections directive.
3739 void EmitSections(const OMPExecutableDirective
&S
);
3743 //===--------------------------------------------------------------------===//
3744 // LValue Expression Emission
3745 //===--------------------------------------------------------------------===//
3747 /// Create a check that a scalar RValue is non-null.
3748 llvm::Value
*EmitNonNullRValueCheck(RValue RV
, QualType T
);
3750 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
3751 RValue
GetUndefRValue(QualType Ty
);
3753 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
3754 /// and issue an ErrorUnsupported style diagnostic (using the
3756 RValue
EmitUnsupportedRValue(const Expr
*E
,
3759 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
3760 /// an ErrorUnsupported style diagnostic (using the provided Name).
3761 LValue
EmitUnsupportedLValue(const Expr
*E
,
3764 /// EmitLValue - Emit code to compute a designator that specifies the location
3765 /// of the expression.
3767 /// This can return one of two things: a simple address or a bitfield
3768 /// reference. In either case, the LLVM Value* in the LValue structure is
3769 /// guaranteed to be an LLVM pointer type.
3771 /// If this returns a bitfield reference, nothing about the pointee type of
3772 /// the LLVM value is known: For example, it may not be a pointer to an
3775 /// If this returns a normal address, and if the lvalue's C type is fixed
3776 /// size, this method guarantees that the returned pointer type will point to
3777 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
3778 /// variable length type, this is not possible.
3780 LValue
EmitLValue(const Expr
*E
,
3781 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
);
3784 LValue
EmitLValueHelper(const Expr
*E
, KnownNonNull_t IsKnownNonNull
);
3787 /// Same as EmitLValue but additionally we generate checking code to
3788 /// guard against undefined behavior. This is only suitable when we know
3789 /// that the address will be used to access the object.
3790 LValue
EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
);
3792 RValue
convertTempToRValue(Address addr
, QualType type
,
3793 SourceLocation Loc
);
3795 void EmitAtomicInit(Expr
*E
, LValue lvalue
);
3797 bool LValueIsSuitableForInlineAtomic(LValue Src
);
3799 RValue
EmitAtomicLoad(LValue LV
, SourceLocation SL
,
3800 AggValueSlot Slot
= AggValueSlot::ignored());
3802 RValue
EmitAtomicLoad(LValue lvalue
, SourceLocation loc
,
3803 llvm::AtomicOrdering AO
, bool IsVolatile
= false,
3804 AggValueSlot slot
= AggValueSlot::ignored());
3806 void EmitAtomicStore(RValue rvalue
, LValue lvalue
, bool isInit
);
3808 void EmitAtomicStore(RValue rvalue
, LValue lvalue
, llvm::AtomicOrdering AO
,
3809 bool IsVolatile
, bool isInit
);
3811 std::pair
<RValue
, llvm::Value
*> EmitAtomicCompareExchange(
3812 LValue Obj
, RValue Expected
, RValue Desired
, SourceLocation Loc
,
3813 llvm::AtomicOrdering Success
=
3814 llvm::AtomicOrdering::SequentiallyConsistent
,
3815 llvm::AtomicOrdering Failure
=
3816 llvm::AtomicOrdering::SequentiallyConsistent
,
3817 bool IsWeak
= false, AggValueSlot Slot
= AggValueSlot::ignored());
3819 void EmitAtomicUpdate(LValue LVal
, llvm::AtomicOrdering AO
,
3820 const llvm::function_ref
<RValue(RValue
)> &UpdateOp
,
3823 /// EmitToMemory - Change a scalar value from its value
3824 /// representation to its in-memory representation.
3825 llvm::Value
*EmitToMemory(llvm::Value
*Value
, QualType Ty
);
3827 /// EmitFromMemory - Change a scalar value from its memory
3828 /// representation to its value representation.
3829 llvm::Value
*EmitFromMemory(llvm::Value
*Value
, QualType Ty
);
3831 /// Check if the scalar \p Value is within the valid range for the given
3834 /// Returns true if a check is needed (even if the range is unknown).
3835 bool EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
3836 SourceLocation Loc
);
3838 /// EmitLoadOfScalar - Load a scalar value from an address, taking
3839 /// care to appropriately convert from the memory representation to
3840 /// the LLVM value representation.
3841 llvm::Value
*EmitLoadOfScalar(Address Addr
, bool Volatile
, QualType Ty
,
3843 AlignmentSource Source
= AlignmentSource::Type
,
3844 bool isNontemporal
= false) {
3845 return EmitLoadOfScalar(Addr
, Volatile
, Ty
, Loc
, LValueBaseInfo(Source
),
3846 CGM
.getTBAAAccessInfo(Ty
), isNontemporal
);
3849 llvm::Value
*EmitLoadOfScalar(Address Addr
, bool Volatile
, QualType Ty
,
3850 SourceLocation Loc
, LValueBaseInfo BaseInfo
,
3851 TBAAAccessInfo TBAAInfo
,
3852 bool isNontemporal
= false);
3854 /// EmitLoadOfScalar - Load a scalar value from an address, taking
3855 /// care to appropriately convert from the memory representation to
3856 /// the LLVM value representation. The l-value must be a simple
3858 llvm::Value
*EmitLoadOfScalar(LValue lvalue
, SourceLocation Loc
);
3860 /// EmitStoreOfScalar - Store a scalar value to an address, taking
3861 /// care to appropriately convert from the memory representation to
3862 /// the LLVM value representation.
3863 void EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
3864 bool Volatile
, QualType Ty
,
3865 AlignmentSource Source
= AlignmentSource::Type
,
3866 bool isInit
= false, bool isNontemporal
= false) {
3867 EmitStoreOfScalar(Value
, Addr
, Volatile
, Ty
, LValueBaseInfo(Source
),
3868 CGM
.getTBAAAccessInfo(Ty
), isInit
, isNontemporal
);
3871 void EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
3872 bool Volatile
, QualType Ty
,
3873 LValueBaseInfo BaseInfo
, TBAAAccessInfo TBAAInfo
,
3874 bool isInit
= false, bool isNontemporal
= false);
3876 /// EmitStoreOfScalar - Store a scalar value to an address, taking
3877 /// care to appropriately convert from the memory representation to
3878 /// the LLVM value representation. The l-value must be a simple
3879 /// l-value. The isInit flag indicates whether this is an initialization.
3880 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
3881 void EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
, bool isInit
=false);
3883 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
3884 /// this method emits the address of the lvalue, then loads the result as an
3885 /// rvalue, returning the rvalue.
3886 RValue
EmitLoadOfLValue(LValue V
, SourceLocation Loc
);
3887 RValue
EmitLoadOfExtVectorElementLValue(LValue V
);
3888 RValue
EmitLoadOfBitfieldLValue(LValue LV
, SourceLocation Loc
);
3889 RValue
EmitLoadOfGlobalRegLValue(LValue LV
);
3891 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
3892 /// lvalue, where both are guaranteed to the have the same type, and that type
3894 void EmitStoreThroughLValue(RValue Src
, LValue Dst
, bool isInit
= false);
3895 void EmitStoreThroughExtVectorComponentLValue(RValue Src
, LValue Dst
);
3896 void EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
);
3898 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
3899 /// as EmitStoreThroughLValue.
3901 /// \param Result [out] - If non-null, this will be set to a Value* for the
3902 /// bit-field contents after the store, appropriate for use as the result of
3903 /// an assignment to the bit-field.
3904 void EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
3905 llvm::Value
**Result
=nullptr);
3907 /// Emit an l-value for an assignment (simple or compound) of complex type.
3908 LValue
EmitComplexAssignmentLValue(const BinaryOperator
*E
);
3909 LValue
EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator
*E
);
3910 LValue
EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator
*E
,
3911 llvm::Value
*&Result
);
3913 // Note: only available for agg return types
3914 LValue
EmitBinaryOperatorLValue(const BinaryOperator
*E
);
3915 LValue
EmitCompoundAssignmentLValue(const CompoundAssignOperator
*E
);
3916 // Note: only available for agg return types
3917 LValue
EmitCallExprLValue(const CallExpr
*E
);
3918 // Note: only available for agg return types
3919 LValue
EmitVAArgExprLValue(const VAArgExpr
*E
);
3920 LValue
EmitDeclRefLValue(const DeclRefExpr
*E
);
3921 LValue
EmitStringLiteralLValue(const StringLiteral
*E
);
3922 LValue
EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
);
3923 LValue
EmitPredefinedLValue(const PredefinedExpr
*E
);
3924 LValue
EmitUnaryOpLValue(const UnaryOperator
*E
);
3925 LValue
EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
3926 bool Accessed
= false);
3927 LValue
EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
);
3928 LValue
EmitOMPArraySectionExpr(const OMPArraySectionExpr
*E
,
3929 bool IsLowerBound
= true);
3930 LValue
EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
);
3931 LValue
EmitMemberExpr(const MemberExpr
*E
);
3932 LValue
EmitObjCIsaExpr(const ObjCIsaExpr
*E
);
3933 LValue
EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
);
3934 LValue
EmitInitListLValue(const InitListExpr
*E
);
3935 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator
*E
);
3936 LValue
EmitConditionalOperatorLValue(const AbstractConditionalOperator
*E
);
3937 LValue
EmitCastLValue(const CastExpr
*E
);
3938 LValue
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*E
);
3939 LValue
EmitOpaqueValueLValue(const OpaqueValueExpr
*e
);
3941 Address
EmitExtVectorElementLValue(LValue V
);
3943 RValue
EmitRValueForField(LValue LV
, const FieldDecl
*FD
, SourceLocation Loc
);
3945 Address
EmitArrayToPointerDecay(const Expr
*Array
,
3946 LValueBaseInfo
*BaseInfo
= nullptr,
3947 TBAAAccessInfo
*TBAAInfo
= nullptr);
3949 class ConstantEmission
{
3950 llvm::PointerIntPair
<llvm::Constant
*, 1, bool> ValueAndIsReference
;
3951 ConstantEmission(llvm::Constant
*C
, bool isReference
)
3952 : ValueAndIsReference(C
, isReference
) {}
3954 ConstantEmission() {}
3955 static ConstantEmission
forReference(llvm::Constant
*C
) {
3956 return ConstantEmission(C
, true);
3958 static ConstantEmission
forValue(llvm::Constant
*C
) {
3959 return ConstantEmission(C
, false);
3962 explicit operator bool() const {
3963 return ValueAndIsReference
.getOpaqueValue() != nullptr;
3966 bool isReference() const { return ValueAndIsReference
.getInt(); }
3967 LValue
getReferenceLValue(CodeGenFunction
&CGF
, Expr
*refExpr
) const {
3968 assert(isReference());
3969 return CGF
.MakeNaturalAlignAddrLValue(ValueAndIsReference
.getPointer(),
3970 refExpr
->getType());
3973 llvm::Constant
*getValue() const {
3974 assert(!isReference());
3975 return ValueAndIsReference
.getPointer();
3979 ConstantEmission
tryEmitAsConstant(DeclRefExpr
*refExpr
);
3980 ConstantEmission
tryEmitAsConstant(const MemberExpr
*ME
);
3981 llvm::Value
*emitScalarConstant(const ConstantEmission
&Constant
, Expr
*E
);
3983 RValue
EmitPseudoObjectRValue(const PseudoObjectExpr
*e
,
3984 AggValueSlot slot
= AggValueSlot::ignored());
3985 LValue
EmitPseudoObjectLValue(const PseudoObjectExpr
*e
);
3987 llvm::Value
*EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
3988 const ObjCIvarDecl
*Ivar
);
3989 llvm::Value
*EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl
*Interface
,
3990 const ObjCIvarDecl
*Ivar
);
3991 LValue
EmitLValueForField(LValue Base
, const FieldDecl
* Field
);
3992 LValue
EmitLValueForLambdaField(const FieldDecl
*Field
);
3994 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
3995 /// if the Field is a reference, this will return the address of the reference
3996 /// and not the address of the value stored in the reference.
3997 LValue
EmitLValueForFieldInitialization(LValue Base
,
3998 const FieldDecl
* Field
);
4000 LValue
EmitLValueForIvar(QualType ObjectTy
,
4001 llvm::Value
* Base
, const ObjCIvarDecl
*Ivar
,
4002 unsigned CVRQualifiers
);
4004 LValue
EmitCXXConstructLValue(const CXXConstructExpr
*E
);
4005 LValue
EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
);
4006 LValue
EmitCXXTypeidLValue(const CXXTypeidExpr
*E
);
4007 LValue
EmitCXXUuidofLValue(const CXXUuidofExpr
*E
);
4009 LValue
EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
);
4010 LValue
EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
);
4011 LValue
EmitStmtExprLValue(const StmtExpr
*E
);
4012 LValue
EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
);
4013 LValue
EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
);
4014 void EmitDeclRefExprDbgValue(const DeclRefExpr
*E
, const APValue
&Init
);
4016 //===--------------------------------------------------------------------===//
4017 // Scalar Expression Emission
4018 //===--------------------------------------------------------------------===//
4020 /// EmitCall - Generate a call of the given function, expecting the given
4021 /// result type, and using the given argument list which specifies both the
4022 /// LLVM arguments and the types they were derived from.
4023 RValue
EmitCall(const CGFunctionInfo
&CallInfo
, const CGCallee
&Callee
,
4024 ReturnValueSlot ReturnValue
, const CallArgList
&Args
,
4025 llvm::CallBase
**callOrInvoke
, bool IsMustTail
,
4026 SourceLocation Loc
);
4027 RValue
EmitCall(const CGFunctionInfo
&CallInfo
, const CGCallee
&Callee
,
4028 ReturnValueSlot ReturnValue
, const CallArgList
&Args
,
4029 llvm::CallBase
**callOrInvoke
= nullptr,
4030 bool IsMustTail
= false) {
4031 return EmitCall(CallInfo
, Callee
, ReturnValue
, Args
, callOrInvoke
,
4032 IsMustTail
, SourceLocation());
4034 RValue
EmitCall(QualType FnType
, const CGCallee
&Callee
, const CallExpr
*E
,
4035 ReturnValueSlot ReturnValue
, llvm::Value
*Chain
= nullptr);
4036 RValue
EmitCallExpr(const CallExpr
*E
,
4037 ReturnValueSlot ReturnValue
= ReturnValueSlot());
4038 RValue
EmitSimpleCallExpr(const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4039 CGCallee
EmitCallee(const Expr
*E
);
4041 void checkTargetFeatures(const CallExpr
*E
, const FunctionDecl
*TargetDecl
);
4042 void checkTargetFeatures(SourceLocation Loc
, const FunctionDecl
*TargetDecl
);
4044 llvm::CallInst
*EmitRuntimeCall(llvm::FunctionCallee callee
,
4045 const Twine
&name
= "");
4046 llvm::CallInst
*EmitRuntimeCall(llvm::FunctionCallee callee
,
4047 ArrayRef
<llvm::Value
*> args
,
4048 const Twine
&name
= "");
4049 llvm::CallInst
*EmitNounwindRuntimeCall(llvm::FunctionCallee callee
,
4050 const Twine
&name
= "");
4051 llvm::CallInst
*EmitNounwindRuntimeCall(llvm::FunctionCallee callee
,
4052 ArrayRef
<llvm::Value
*> args
,
4053 const Twine
&name
= "");
4055 SmallVector
<llvm::OperandBundleDef
, 1>
4056 getBundlesForFunclet(llvm::Value
*Callee
);
4058 llvm::CallBase
*EmitCallOrInvoke(llvm::FunctionCallee Callee
,
4059 ArrayRef
<llvm::Value
*> Args
,
4060 const Twine
&Name
= "");
4061 llvm::CallBase
*EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4062 ArrayRef
<llvm::Value
*> args
,
4063 const Twine
&name
= "");
4064 llvm::CallBase
*EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4065 const Twine
&name
= "");
4066 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4067 ArrayRef
<llvm::Value
*> args
);
4069 CGCallee
BuildAppleKextVirtualCall(const CXXMethodDecl
*MD
,
4070 NestedNameSpecifier
*Qual
,
4073 CGCallee
BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl
*DD
,
4075 const CXXRecordDecl
*RD
);
4077 // Return the copy constructor name with the prefix "__copy_constructor_"
4079 static std::string
getNonTrivialCopyConstructorStr(QualType QT
,
4080 CharUnits Alignment
,
4084 // Return the destructor name with the prefix "__destructor_" removed.
4085 static std::string
getNonTrivialDestructorStr(QualType QT
,
4086 CharUnits Alignment
,
4090 // These functions emit calls to the special functions of non-trivial C
4092 void defaultInitNonTrivialCStructVar(LValue Dst
);
4093 void callCStructDefaultConstructor(LValue Dst
);
4094 void callCStructDestructor(LValue Dst
);
4095 void callCStructCopyConstructor(LValue Dst
, LValue Src
);
4096 void callCStructMoveConstructor(LValue Dst
, LValue Src
);
4097 void callCStructCopyAssignmentOperator(LValue Dst
, LValue Src
);
4098 void callCStructMoveAssignmentOperator(LValue Dst
, LValue Src
);
4101 EmitCXXMemberOrOperatorCall(const CXXMethodDecl
*Method
,
4102 const CGCallee
&Callee
,
4103 ReturnValueSlot ReturnValue
, llvm::Value
*This
,
4104 llvm::Value
*ImplicitParam
,
4105 QualType ImplicitParamTy
, const CallExpr
*E
,
4106 CallArgList
*RtlArgs
);
4107 RValue
EmitCXXDestructorCall(GlobalDecl Dtor
, const CGCallee
&Callee
,
4108 llvm::Value
*This
, QualType ThisTy
,
4109 llvm::Value
*ImplicitParam
,
4110 QualType ImplicitParamTy
, const CallExpr
*E
);
4111 RValue
EmitCXXMemberCallExpr(const CXXMemberCallExpr
*E
,
4112 ReturnValueSlot ReturnValue
);
4113 RValue
EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr
*CE
,
4114 const CXXMethodDecl
*MD
,
4115 ReturnValueSlot ReturnValue
,
4117 NestedNameSpecifier
*Qualifier
,
4118 bool IsArrow
, const Expr
*Base
);
4119 // Compute the object pointer.
4120 Address
EmitCXXMemberDataPointerAddress(const Expr
*E
, Address base
,
4121 llvm::Value
*memberPtr
,
4122 const MemberPointerType
*memberPtrType
,
4123 LValueBaseInfo
*BaseInfo
= nullptr,
4124 TBAAAccessInfo
*TBAAInfo
= nullptr);
4125 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr
*E
,
4126 ReturnValueSlot ReturnValue
);
4128 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr
*E
,
4129 const CXXMethodDecl
*MD
,
4130 ReturnValueSlot ReturnValue
);
4131 RValue
EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr
*E
);
4133 RValue
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr
*E
,
4134 ReturnValueSlot ReturnValue
);
4136 RValue
EmitNVPTXDevicePrintfCallExpr(const CallExpr
*E
);
4137 RValue
EmitAMDGPUDevicePrintfCallExpr(const CallExpr
*E
);
4138 RValue
EmitOpenMPDevicePrintfCallExpr(const CallExpr
*E
);
4140 RValue
EmitBuiltinExpr(const GlobalDecl GD
, unsigned BuiltinID
,
4141 const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4143 RValue
emitRotate(const CallExpr
*E
, bool IsRotateRight
);
4145 /// Emit IR for __builtin_os_log_format.
4146 RValue
emitBuiltinOSLogFormat(const CallExpr
&E
);
4148 /// Emit IR for __builtin_is_aligned.
4149 RValue
EmitBuiltinIsAligned(const CallExpr
*E
);
4150 /// Emit IR for __builtin_align_up/__builtin_align_down.
4151 RValue
EmitBuiltinAlignTo(const CallExpr
*E
, bool AlignUp
);
4153 llvm::Function
*generateBuiltinOSLogHelperFunction(
4154 const analyze_os_log::OSLogBufferLayout
&Layout
,
4155 CharUnits BufferAlignment
);
4157 RValue
EmitBlockCallExpr(const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4159 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4160 /// is unhandled by the current target.
4161 llvm::Value
*EmitTargetBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4162 ReturnValueSlot ReturnValue
);
4164 llvm::Value
*EmitAArch64CompareBuiltinExpr(llvm::Value
*Op
, llvm::Type
*Ty
,
4165 const llvm::CmpInst::Predicate Fp
,
4166 const llvm::CmpInst::Predicate Ip
,
4167 const llvm::Twine
&Name
= "");
4168 llvm::Value
*EmitARMBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4169 ReturnValueSlot ReturnValue
,
4170 llvm::Triple::ArchType Arch
);
4171 llvm::Value
*EmitARMMVEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4172 ReturnValueSlot ReturnValue
,
4173 llvm::Triple::ArchType Arch
);
4174 llvm::Value
*EmitARMCDEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4175 ReturnValueSlot ReturnValue
,
4176 llvm::Triple::ArchType Arch
);
4177 llvm::Value
*EmitCMSEClearRecord(llvm::Value
*V
, llvm::IntegerType
*ITy
,
4179 llvm::Value
*EmitCMSEClearRecord(llvm::Value
*V
, llvm::ArrayType
*ATy
,
4182 llvm::Value
*EmitCommonNeonBuiltinExpr(unsigned BuiltinID
,
4183 unsigned LLVMIntrinsic
,
4184 unsigned AltLLVMIntrinsic
,
4185 const char *NameHint
,
4188 SmallVectorImpl
<llvm::Value
*> &Ops
,
4189 Address PtrOp0
, Address PtrOp1
,
4190 llvm::Triple::ArchType Arch
);
4192 llvm::Function
*LookupNeonLLVMIntrinsic(unsigned IntrinsicID
,
4193 unsigned Modifier
, llvm::Type
*ArgTy
,
4195 llvm::Value
*EmitNeonCall(llvm::Function
*F
,
4196 SmallVectorImpl
<llvm::Value
*> &O
,
4198 unsigned shift
= 0, bool rightshift
= false);
4199 llvm::Value
*EmitNeonSplat(llvm::Value
*V
, llvm::Constant
*Idx
,
4200 const llvm::ElementCount
&Count
);
4201 llvm::Value
*EmitNeonSplat(llvm::Value
*V
, llvm::Constant
*Idx
);
4202 llvm::Value
*EmitNeonShiftVector(llvm::Value
*V
, llvm::Type
*Ty
,
4203 bool negateForRightShift
);
4204 llvm::Value
*EmitNeonRShiftImm(llvm::Value
*Vec
, llvm::Value
*Amt
,
4205 llvm::Type
*Ty
, bool usgn
, const char *name
);
4206 llvm::Value
*vectorWrapScalar16(llvm::Value
*Op
);
4207 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4208 /// access builtin. Only required if it can't be inferred from the base
4209 /// pointer operand.
4210 llvm::Type
*SVEBuiltinMemEltTy(const SVETypeFlags
&TypeFlags
);
4212 SmallVector
<llvm::Type
*, 2>
4213 getSVEOverloadTypes(const SVETypeFlags
&TypeFlags
, llvm::Type
*ReturnType
,
4214 ArrayRef
<llvm::Value
*> Ops
);
4215 llvm::Type
*getEltType(const SVETypeFlags
&TypeFlags
);
4216 llvm::ScalableVectorType
*getSVEType(const SVETypeFlags
&TypeFlags
);
4217 llvm::ScalableVectorType
*getSVEPredType(const SVETypeFlags
&TypeFlags
);
4218 llvm::Value
*EmitSVETupleSetOrGet(const SVETypeFlags
&TypeFlags
,
4219 llvm::Type
*ReturnType
,
4220 ArrayRef
<llvm::Value
*> Ops
);
4221 llvm::Value
*EmitSVETupleCreate(const SVETypeFlags
&TypeFlags
,
4222 llvm::Type
*ReturnType
,
4223 ArrayRef
<llvm::Value
*> Ops
);
4224 llvm::Value
*EmitSVEAllTruePred(const SVETypeFlags
&TypeFlags
);
4225 llvm::Value
*EmitSVEDupX(llvm::Value
*Scalar
);
4226 llvm::Value
*EmitSVEDupX(llvm::Value
*Scalar
, llvm::Type
*Ty
);
4227 llvm::Value
*EmitSVEReinterpret(llvm::Value
*Val
, llvm::Type
*Ty
);
4228 llvm::Value
*EmitSVEPMull(const SVETypeFlags
&TypeFlags
,
4229 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4230 unsigned BuiltinID
);
4231 llvm::Value
*EmitSVEMovl(const SVETypeFlags
&TypeFlags
,
4232 llvm::ArrayRef
<llvm::Value
*> Ops
,
4233 unsigned BuiltinID
);
4234 llvm::Value
*EmitSVEPredicateCast(llvm::Value
*Pred
,
4235 llvm::ScalableVectorType
*VTy
);
4236 llvm::Value
*EmitSVEGatherLoad(const SVETypeFlags
&TypeFlags
,
4237 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4239 llvm::Value
*EmitSVEScatterStore(const SVETypeFlags
&TypeFlags
,
4240 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4242 llvm::Value
*EmitSVEMaskedLoad(const CallExpr
*, llvm::Type
*ReturnTy
,
4243 SmallVectorImpl
<llvm::Value
*> &Ops
,
4244 unsigned BuiltinID
, bool IsZExtReturn
);
4245 llvm::Value
*EmitSVEMaskedStore(const CallExpr
*,
4246 SmallVectorImpl
<llvm::Value
*> &Ops
,
4247 unsigned BuiltinID
);
4248 llvm::Value
*EmitTileslice(llvm::Value
*Offset
, llvm::Value
*Base
);
4249 llvm::Value
*EmitSVEPrefetchLoad(const SVETypeFlags
&TypeFlags
,
4250 SmallVectorImpl
<llvm::Value
*> &Ops
,
4251 unsigned BuiltinID
);
4252 llvm::Value
*EmitSVEGatherPrefetch(const SVETypeFlags
&TypeFlags
,
4253 SmallVectorImpl
<llvm::Value
*> &Ops
,
4255 llvm::Value
*EmitSVEStructLoad(const SVETypeFlags
&TypeFlags
,
4256 SmallVectorImpl
<llvm::Value
*> &Ops
,
4258 llvm::Value
*EmitSVEStructStore(const SVETypeFlags
&TypeFlags
,
4259 SmallVectorImpl
<llvm::Value
*> &Ops
,
4261 llvm::Value
*EmitAArch64SVEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4263 llvm::Value
*EmitSMELd1St1(SVETypeFlags TypeFlags
,
4264 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4266 llvm::Value
*EmitAArch64SMEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4268 llvm::Value
*EmitAArch64BuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4269 llvm::Triple::ArchType Arch
);
4270 llvm::Value
*EmitBPFBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4272 llvm::Value
*BuildVector(ArrayRef
<llvm::Value
*> Ops
);
4273 llvm::Value
*EmitX86BuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4274 llvm::Value
*EmitPPCBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4275 llvm::Value
*EmitAMDGPUBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4276 llvm::Value
*EmitSystemZBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4277 llvm::Value
*EmitNVPTXBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4278 llvm::Value
*EmitWebAssemblyBuiltinExpr(unsigned BuiltinID
,
4280 llvm::Value
*EmitHexagonBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4281 llvm::Value
*EmitRISCVBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4282 ReturnValueSlot ReturnValue
);
4283 llvm::Value
*EmitLoongArchBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4284 void ProcessOrderScopeAMDGCN(llvm::Value
*Order
, llvm::Value
*Scope
,
4285 llvm::AtomicOrdering
&AO
,
4286 llvm::SyncScope::ID
&SSID
);
4288 enum class MSVCIntrin
;
4289 llvm::Value
*EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID
, const CallExpr
*E
);
4291 llvm::Value
*EmitBuiltinAvailable(const VersionTuple
&Version
);
4293 llvm::Value
*EmitObjCProtocolExpr(const ObjCProtocolExpr
*E
);
4294 llvm::Value
*EmitObjCStringLiteral(const ObjCStringLiteral
*E
);
4295 llvm::Value
*EmitObjCBoxedExpr(const ObjCBoxedExpr
*E
);
4296 llvm::Value
*EmitObjCArrayLiteral(const ObjCArrayLiteral
*E
);
4297 llvm::Value
*EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral
*E
);
4298 llvm::Value
*EmitObjCCollectionLiteral(const Expr
*E
,
4299 const ObjCMethodDecl
*MethodWithObjects
);
4300 llvm::Value
*EmitObjCSelectorExpr(const ObjCSelectorExpr
*E
);
4301 RValue
EmitObjCMessageExpr(const ObjCMessageExpr
*E
,
4302 ReturnValueSlot Return
= ReturnValueSlot());
4304 /// Retrieves the default cleanup kind for an ARC cleanup.
4305 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4306 CleanupKind
getARCCleanupKind() {
4307 return CGM
.getCodeGenOpts().ObjCAutoRefCountExceptions
4308 ? NormalAndEHCleanup
: NormalCleanup
;
4312 void EmitARCInitWeak(Address addr
, llvm::Value
*value
);
4313 void EmitARCDestroyWeak(Address addr
);
4314 llvm::Value
*EmitARCLoadWeak(Address addr
);
4315 llvm::Value
*EmitARCLoadWeakRetained(Address addr
);
4316 llvm::Value
*EmitARCStoreWeak(Address addr
, llvm::Value
*value
, bool ignored
);
4317 void emitARCCopyAssignWeak(QualType Ty
, Address DstAddr
, Address SrcAddr
);
4318 void emitARCMoveAssignWeak(QualType Ty
, Address DstAddr
, Address SrcAddr
);
4319 void EmitARCCopyWeak(Address dst
, Address src
);
4320 void EmitARCMoveWeak(Address dst
, Address src
);
4321 llvm::Value
*EmitARCRetainAutorelease(QualType type
, llvm::Value
*value
);
4322 llvm::Value
*EmitARCRetainAutoreleaseNonBlock(llvm::Value
*value
);
4323 llvm::Value
*EmitARCStoreStrong(LValue lvalue
, llvm::Value
*value
,
4324 bool resultIgnored
);
4325 llvm::Value
*EmitARCStoreStrongCall(Address addr
, llvm::Value
*value
,
4326 bool resultIgnored
);
4327 llvm::Value
*EmitARCRetain(QualType type
, llvm::Value
*value
);
4328 llvm::Value
*EmitARCRetainNonBlock(llvm::Value
*value
);
4329 llvm::Value
*EmitARCRetainBlock(llvm::Value
*value
, bool mandatory
);
4330 void EmitARCDestroyStrong(Address addr
, ARCPreciseLifetime_t precise
);
4331 void EmitARCRelease(llvm::Value
*value
, ARCPreciseLifetime_t precise
);
4332 llvm::Value
*EmitARCAutorelease(llvm::Value
*value
);
4333 llvm::Value
*EmitARCAutoreleaseReturnValue(llvm::Value
*value
);
4334 llvm::Value
*EmitARCRetainAutoreleaseReturnValue(llvm::Value
*value
);
4335 llvm::Value
*EmitARCRetainAutoreleasedReturnValue(llvm::Value
*value
);
4336 llvm::Value
*EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value
*value
);
4338 llvm::Value
*EmitObjCAutorelease(llvm::Value
*value
, llvm::Type
*returnType
);
4339 llvm::Value
*EmitObjCRetainNonBlock(llvm::Value
*value
,
4340 llvm::Type
*returnType
);
4341 void EmitObjCRelease(llvm::Value
*value
, ARCPreciseLifetime_t precise
);
4343 std::pair
<LValue
,llvm::Value
*>
4344 EmitARCStoreAutoreleasing(const BinaryOperator
*e
);
4345 std::pair
<LValue
,llvm::Value
*>
4346 EmitARCStoreStrong(const BinaryOperator
*e
, bool ignored
);
4347 std::pair
<LValue
,llvm::Value
*>
4348 EmitARCStoreUnsafeUnretained(const BinaryOperator
*e
, bool ignored
);
4350 llvm::Value
*EmitObjCAlloc(llvm::Value
*value
,
4351 llvm::Type
*returnType
);
4352 llvm::Value
*EmitObjCAllocWithZone(llvm::Value
*value
,
4353 llvm::Type
*returnType
);
4354 llvm::Value
*EmitObjCAllocInit(llvm::Value
*value
, llvm::Type
*resultType
);
4356 llvm::Value
*EmitObjCThrowOperand(const Expr
*expr
);
4357 llvm::Value
*EmitObjCConsumeObject(QualType T
, llvm::Value
*Ptr
);
4358 llvm::Value
*EmitObjCExtendObjectLifetime(QualType T
, llvm::Value
*Ptr
);
4360 llvm::Value
*EmitARCExtendBlockObject(const Expr
*expr
);
4361 llvm::Value
*EmitARCReclaimReturnedObject(const Expr
*e
,
4362 bool allowUnsafeClaim
);
4363 llvm::Value
*EmitARCRetainScalarExpr(const Expr
*expr
);
4364 llvm::Value
*EmitARCRetainAutoreleaseScalarExpr(const Expr
*expr
);
4365 llvm::Value
*EmitARCUnsafeUnretainedScalarExpr(const Expr
*expr
);
4367 void EmitARCIntrinsicUse(ArrayRef
<llvm::Value
*> values
);
4369 void EmitARCNoopIntrinsicUse(ArrayRef
<llvm::Value
*> values
);
4371 static Destroyer destroyARCStrongImprecise
;
4372 static Destroyer destroyARCStrongPrecise
;
4373 static Destroyer destroyARCWeak
;
4374 static Destroyer emitARCIntrinsicUse
;
4375 static Destroyer destroyNonTrivialCStruct
;
4377 void EmitObjCAutoreleasePoolPop(llvm::Value
*Ptr
);
4378 llvm::Value
*EmitObjCAutoreleasePoolPush();
4379 llvm::Value
*EmitObjCMRRAutoreleasePoolPush();
4380 void EmitObjCAutoreleasePoolCleanup(llvm::Value
*Ptr
);
4381 void EmitObjCMRRAutoreleasePoolPop(llvm::Value
*Ptr
);
4383 /// Emits a reference binding to the passed in expression.
4384 RValue
EmitReferenceBindingToExpr(const Expr
*E
);
4386 //===--------------------------------------------------------------------===//
4387 // Expression Emission
4388 //===--------------------------------------------------------------------===//
4390 // Expressions are broken into three classes: scalar, complex, aggregate.
4392 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4393 /// scalar type, returning the result.
4394 llvm::Value
*EmitScalarExpr(const Expr
*E
, bool IgnoreResultAssign
= false);
4396 /// Emit a conversion from the specified type to the specified destination
4397 /// type, both of which are LLVM scalar types.
4398 llvm::Value
*EmitScalarConversion(llvm::Value
*Src
, QualType SrcTy
,
4399 QualType DstTy
, SourceLocation Loc
);
4401 /// Emit a conversion from the specified complex type to the specified
4402 /// destination type, where the destination type is an LLVM scalar type.
4403 llvm::Value
*EmitComplexToScalarConversion(ComplexPairTy Src
, QualType SrcTy
,
4405 SourceLocation Loc
);
4407 /// EmitAggExpr - Emit the computation of the specified expression
4408 /// of aggregate type. The result is computed into the given slot,
4409 /// which may be null to indicate that the value is not needed.
4410 void EmitAggExpr(const Expr
*E
, AggValueSlot AS
);
4412 /// EmitAggExprToLValue - Emit the computation of the specified expression of
4413 /// aggregate type into a temporary LValue.
4414 LValue
EmitAggExprToLValue(const Expr
*E
);
4416 /// Build all the stores needed to initialize an aggregate at Dest with the
4418 void EmitAggregateStore(llvm::Value
*Val
, Address Dest
, bool DestIsVolatile
);
4420 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
4421 /// make sure it survives garbage collection until this point.
4422 void EmitExtendGCLifetime(llvm::Value
*object
);
4424 /// EmitComplexExpr - Emit the computation of the specified expression of
4425 /// complex type, returning the result.
4426 ComplexPairTy
EmitComplexExpr(const Expr
*E
,
4427 bool IgnoreReal
= false,
4428 bool IgnoreImag
= false);
4430 /// EmitComplexExprIntoLValue - Emit the given expression of complex
4431 /// type and place its result into the specified l-value.
4432 void EmitComplexExprIntoLValue(const Expr
*E
, LValue dest
, bool isInit
);
4434 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
4435 void EmitStoreOfComplex(ComplexPairTy V
, LValue dest
, bool isInit
);
4437 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
4438 ComplexPairTy
EmitLoadOfComplex(LValue src
, SourceLocation loc
);
4440 ComplexPairTy
EmitPromotedComplexExpr(const Expr
*E
, QualType PromotionType
);
4441 llvm::Value
*EmitPromotedScalarExpr(const Expr
*E
, QualType PromotionType
);
4442 ComplexPairTy
EmitPromotedValue(ComplexPairTy result
, QualType PromotionType
);
4443 ComplexPairTy
EmitUnPromotedValue(ComplexPairTy result
, QualType PromotionType
);
4445 Address
emitAddrOfRealComponent(Address
complex, QualType complexType
);
4446 Address
emitAddrOfImagComponent(Address
complex, QualType complexType
);
4448 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
4449 /// global variable that has already been created for it. If the initializer
4450 /// has a different type than GV does, this may free GV and return a different
4451 /// one. Otherwise it just returns GV.
4452 llvm::GlobalVariable
*
4453 AddInitializerToStaticVarDecl(const VarDecl
&D
,
4454 llvm::GlobalVariable
*GV
);
4456 // Emit an @llvm.invariant.start call for the given memory region.
4457 void EmitInvariantStart(llvm::Constant
*Addr
, CharUnits Size
);
4459 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
4460 /// variable with global storage.
4461 void EmitCXXGlobalVarDeclInit(const VarDecl
&D
, llvm::GlobalVariable
*GV
,
4464 llvm::Function
*createAtExitStub(const VarDecl
&VD
, llvm::FunctionCallee Dtor
,
4465 llvm::Constant
*Addr
);
4467 llvm::Function
*createTLSAtExitStub(const VarDecl
&VD
,
4468 llvm::FunctionCallee Dtor
,
4469 llvm::Constant
*Addr
,
4470 llvm::FunctionCallee
&AtExit
);
4472 /// Call atexit() with a function that passes the given argument to
4473 /// the given function.
4474 void registerGlobalDtorWithAtExit(const VarDecl
&D
, llvm::FunctionCallee fn
,
4475 llvm::Constant
*addr
);
4477 /// Call atexit() with function dtorStub.
4478 void registerGlobalDtorWithAtExit(llvm::Constant
*dtorStub
);
4480 /// Call unatexit() with function dtorStub.
4481 llvm::Value
*unregisterGlobalDtorWithUnAtExit(llvm::Constant
*dtorStub
);
4483 /// Emit code in this function to perform a guarded variable
4484 /// initialization. Guarded initializations are used when it's not
4485 /// possible to prove that an initialization will be done exactly
4486 /// once, e.g. with a static local variable or a static data member
4487 /// of a class template.
4488 void EmitCXXGuardedInit(const VarDecl
&D
, llvm::GlobalVariable
*DeclPtr
,
4491 enum class GuardKind
{ VariableGuard
, TlsGuard
};
4493 /// Emit a branch to select whether or not to perform guarded initialization.
4494 void EmitCXXGuardedInitBranch(llvm::Value
*NeedsInit
,
4495 llvm::BasicBlock
*InitBlock
,
4496 llvm::BasicBlock
*NoInitBlock
,
4497 GuardKind Kind
, const VarDecl
*D
);
4499 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
4502 GenerateCXXGlobalInitFunc(llvm::Function
*Fn
,
4503 ArrayRef
<llvm::Function
*> CXXThreadLocals
,
4504 ConstantAddress Guard
= ConstantAddress::invalid());
4506 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
4508 void GenerateCXXGlobalCleanUpFunc(
4510 ArrayRef
<std::tuple
<llvm::FunctionType
*, llvm::WeakTrackingVH
,
4512 DtorsOrStermFinalizers
);
4514 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function
*Fn
,
4516 llvm::GlobalVariable
*Addr
,
4519 void EmitCXXConstructExpr(const CXXConstructExpr
*E
, AggValueSlot Dest
);
4521 void EmitSynthesizedCXXCopyCtor(Address Dest
, Address Src
, const Expr
*Exp
);
4523 void EmitCXXThrowExpr(const CXXThrowExpr
*E
, bool KeepInsertionPoint
= true);
4525 RValue
EmitAtomicExpr(AtomicExpr
*E
);
4527 //===--------------------------------------------------------------------===//
4528 // Annotations Emission
4529 //===--------------------------------------------------------------------===//
4531 /// Emit an annotation call (intrinsic).
4532 llvm::Value
*EmitAnnotationCall(llvm::Function
*AnnotationFn
,
4533 llvm::Value
*AnnotatedVal
,
4534 StringRef AnnotationStr
,
4535 SourceLocation Location
,
4536 const AnnotateAttr
*Attr
);
4538 /// Emit local annotations for the local variable V, declared by D.
4539 void EmitVarAnnotations(const VarDecl
*D
, llvm::Value
*V
);
4541 /// Emit field annotations for the given field & value. Returns the
4542 /// annotation result.
4543 Address
EmitFieldAnnotations(const FieldDecl
*D
, Address V
);
4545 //===--------------------------------------------------------------------===//
4547 //===--------------------------------------------------------------------===//
4549 /// ContainsLabel - Return true if the statement contains a label in it. If
4550 /// this statement is not executed normally, it not containing a label means
4551 /// that we can just remove the code.
4552 static bool ContainsLabel(const Stmt
*S
, bool IgnoreCaseStmts
= false);
4554 /// containsBreak - Return true if the statement contains a break out of it.
4555 /// If the statement (recursively) contains a switch or loop with a break
4556 /// inside of it, this is fine.
4557 static bool containsBreak(const Stmt
*S
);
4559 /// Determine if the given statement might introduce a declaration into the
4560 /// current scope, by being a (possibly-labelled) DeclStmt.
4561 static bool mightAddDeclToScope(const Stmt
*S
);
4563 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4564 /// to a constant, or if it does but contains a label, return false. If it
4565 /// constant folds return true and set the boolean result in Result.
4566 bool ConstantFoldsToSimpleInteger(const Expr
*Cond
, bool &Result
,
4567 bool AllowLabels
= false);
4569 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4570 /// to a constant, or if it does but contains a label, return false. If it
4571 /// constant folds return true and set the folded value.
4572 bool ConstantFoldsToSimpleInteger(const Expr
*Cond
, llvm::APSInt
&Result
,
4573 bool AllowLabels
= false);
4575 /// isInstrumentedCondition - Determine whether the given condition is an
4576 /// instrumentable condition (i.e. no "&&" or "||").
4577 static bool isInstrumentedCondition(const Expr
*C
);
4579 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
4580 /// increments a profile counter based on the semantics of the given logical
4581 /// operator opcode. This is used to instrument branch condition coverage
4582 /// for logical operators.
4583 void EmitBranchToCounterBlock(const Expr
*Cond
, BinaryOperator::Opcode LOp
,
4584 llvm::BasicBlock
*TrueBlock
,
4585 llvm::BasicBlock
*FalseBlock
,
4586 uint64_t TrueCount
= 0,
4587 Stmt::Likelihood LH
= Stmt::LH_None
,
4588 const Expr
*CntrIdx
= nullptr);
4590 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
4591 /// if statement) to the specified blocks. Based on the condition, this might
4592 /// try to simplify the codegen of the conditional based on the branch.
4593 /// TrueCount should be the number of times we expect the condition to
4594 /// evaluate to true based on PGO data.
4595 void EmitBranchOnBoolExpr(const Expr
*Cond
, llvm::BasicBlock
*TrueBlock
,
4596 llvm::BasicBlock
*FalseBlock
, uint64_t TrueCount
,
4597 Stmt::Likelihood LH
= Stmt::LH_None
);
4599 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
4600 /// nonnull, if \p LHS is marked _Nonnull.
4601 void EmitNullabilityCheck(LValue LHS
, llvm::Value
*RHS
, SourceLocation Loc
);
4603 /// An enumeration which makes it easier to specify whether or not an
4604 /// operation is a subtraction.
4605 enum { NotSubtraction
= false, IsSubtraction
= true };
4607 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
4608 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
4609 /// \p SignedIndices indicates whether any of the GEP indices are signed.
4610 /// \p IsSubtraction indicates whether the expression used to form the GEP
4611 /// is a subtraction.
4612 llvm::Value
*EmitCheckedInBoundsGEP(llvm::Type
*ElemTy
, llvm::Value
*Ptr
,
4613 ArrayRef
<llvm::Value
*> IdxList
,
4617 const Twine
&Name
= "");
4619 /// Specifies which type of sanitizer check to apply when handling a
4620 /// particular builtin.
4621 enum BuiltinCheckKind
{
4626 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
4627 /// enabled, a runtime check specified by \p Kind is also emitted.
4628 llvm::Value
*EmitCheckedArgForBuiltin(const Expr
*E
, BuiltinCheckKind Kind
);
4630 /// Emit a description of a type in a format suitable for passing to
4631 /// a runtime sanitizer handler.
4632 llvm::Constant
*EmitCheckTypeDescriptor(QualType T
);
4634 /// Convert a value into a format suitable for passing to a runtime
4635 /// sanitizer handler.
4636 llvm::Value
*EmitCheckValue(llvm::Value
*V
);
4638 /// Emit a description of a source location in a format suitable for
4639 /// passing to a runtime sanitizer handler.
4640 llvm::Constant
*EmitCheckSourceLocation(SourceLocation Loc
);
4642 void EmitKCFIOperandBundle(const CGCallee
&Callee
,
4643 SmallVectorImpl
<llvm::OperandBundleDef
> &Bundles
);
4645 /// Create a basic block that will either trap or call a handler function in
4646 /// the UBSan runtime with the provided arguments, and create a conditional
4648 void EmitCheck(ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
4649 SanitizerHandler Check
, ArrayRef
<llvm::Constant
*> StaticArgs
,
4650 ArrayRef
<llvm::Value
*> DynamicArgs
);
4652 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
4653 /// if Cond if false.
4654 void EmitCfiSlowPathCheck(SanitizerMask Kind
, llvm::Value
*Cond
,
4655 llvm::ConstantInt
*TypeId
, llvm::Value
*Ptr
,
4656 ArrayRef
<llvm::Constant
*> StaticArgs
);
4658 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
4659 /// checking is enabled. Otherwise, just emit an unreachable instruction.
4660 void EmitUnreachable(SourceLocation Loc
);
4662 /// Create a basic block that will call the trap intrinsic, and emit a
4663 /// conditional branch to it, for the -ftrapv checks.
4664 void EmitTrapCheck(llvm::Value
*Checked
, SanitizerHandler CheckHandlerID
);
4666 /// Emit a call to trap or debugtrap and attach function attribute
4667 /// "trap-func-name" if specified.
4668 llvm::CallInst
*EmitTrapCall(llvm::Intrinsic::ID IntrID
);
4670 /// Emit a stub for the cross-DSO CFI check function.
4671 void EmitCfiCheckStub();
4673 /// Emit a cross-DSO CFI failure handling function.
4674 void EmitCfiCheckFail();
4676 /// Create a check for a function parameter that may potentially be
4677 /// declared as non-null.
4678 void EmitNonNullArgCheck(RValue RV
, QualType ArgType
, SourceLocation ArgLoc
,
4679 AbstractCallee AC
, unsigned ParmNum
);
4681 /// EmitCallArg - Emit a single call argument.
4682 void EmitCallArg(CallArgList
&args
, const Expr
*E
, QualType ArgType
);
4684 /// EmitDelegateCallArg - We are performing a delegate call; that
4685 /// is, the current function is delegating to another one. Produce
4686 /// a r-value suitable for passing the given parameter.
4687 void EmitDelegateCallArg(CallArgList
&args
, const VarDecl
*param
,
4688 SourceLocation loc
);
4690 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
4691 /// point operation, expressed as the maximum relative error in ulp.
4692 void SetFPAccuracy(llvm::Value
*Val
, float Accuracy
);
4694 /// Set the codegen fast-math flags.
4695 void SetFastMathFlags(FPOptions FPFeatures
);
4697 // Truncate or extend a boolean vector to the requested number of elements.
4698 llvm::Value
*emitBoolVecConversion(llvm::Value
*SrcVec
,
4699 unsigned NumElementsDst
,
4700 const llvm::Twine
&Name
= "");
4703 llvm::MDNode
*getRangeForLoadFromType(QualType Ty
);
4704 void EmitReturnOfRValue(RValue RV
, QualType Ty
);
4706 void deferPlaceholderReplacement(llvm::Instruction
*Old
, llvm::Value
*New
);
4708 llvm::SmallVector
<std::pair
<llvm::WeakTrackingVH
, llvm::Value
*>, 4>
4709 DeferredReplacements
;
4711 /// Set the address of a local variable.
4712 void setAddrOfLocalVar(const VarDecl
*VD
, Address Addr
) {
4713 assert(!LocalDeclMap
.count(VD
) && "Decl already exists in LocalDeclMap!");
4714 LocalDeclMap
.insert({VD
, Addr
});
4717 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
4718 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
4720 /// \param AI - The first function argument of the expansion.
4721 void ExpandTypeFromArgs(QualType Ty
, LValue Dst
,
4722 llvm::Function::arg_iterator
&AI
);
4724 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
4725 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
4726 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
4727 void ExpandTypeToArgs(QualType Ty
, CallArg Arg
, llvm::FunctionType
*IRFuncTy
,
4728 SmallVectorImpl
<llvm::Value
*> &IRCallArgs
,
4729 unsigned &IRCallArgPos
);
4731 std::pair
<llvm::Value
*, llvm::Type
*>
4732 EmitAsmInput(const TargetInfo::ConstraintInfo
&Info
, const Expr
*InputExpr
,
4733 std::string
&ConstraintStr
);
4735 std::pair
<llvm::Value
*, llvm::Type
*>
4736 EmitAsmInputLValue(const TargetInfo::ConstraintInfo
&Info
, LValue InputValue
,
4737 QualType InputType
, std::string
&ConstraintStr
,
4738 SourceLocation Loc
);
4740 /// Attempts to statically evaluate the object size of E. If that
4741 /// fails, emits code to figure the size of E out for us. This is
4742 /// pass_object_size aware.
4744 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
4745 llvm::Value
*evaluateOrEmitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
4746 llvm::IntegerType
*ResType
,
4747 llvm::Value
*EmittedE
,
4750 /// Emits the size of E, as required by __builtin_object_size. This
4751 /// function is aware of pass_object_size parameters, and will act accordingly
4752 /// if E is a parameter with the pass_object_size attribute.
4753 llvm::Value
*emitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
4754 llvm::IntegerType
*ResType
,
4755 llvm::Value
*EmittedE
,
4758 void emitZeroOrPatternForAutoVarInit(QualType type
, const VarDecl
&D
,
4762 enum class EvaluationOrder
{
4763 ///! No language constraints on evaluation order.
4765 ///! Language semantics require left-to-right evaluation.
4767 ///! Language semantics require right-to-left evaluation.
4771 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
4772 // an ObjCMethodDecl.
4773 struct PrototypeWrapper
{
4774 llvm::PointerUnion
<const FunctionProtoType
*, const ObjCMethodDecl
*> P
;
4776 PrototypeWrapper(const FunctionProtoType
*FT
) : P(FT
) {}
4777 PrototypeWrapper(const ObjCMethodDecl
*MD
) : P(MD
) {}
4780 void EmitCallArgs(CallArgList
&Args
, PrototypeWrapper Prototype
,
4781 llvm::iterator_range
<CallExpr::const_arg_iterator
> ArgRange
,
4782 AbstractCallee AC
= AbstractCallee(),
4783 unsigned ParamsToSkip
= 0,
4784 EvaluationOrder Order
= EvaluationOrder::Default
);
4786 /// EmitPointerWithAlignment - Given an expression with a pointer type,
4787 /// emit the value and compute our best estimate of the alignment of the
4790 /// \param BaseInfo - If non-null, this will be initialized with
4791 /// information about the source of the alignment and the may-alias
4792 /// attribute. Note that this function will conservatively fall back on
4793 /// the type when it doesn't recognize the expression and may-alias will
4794 /// be set to false.
4796 /// One reasonable way to use this information is when there's a language
4797 /// guarantee that the pointer must be aligned to some stricter value, and
4798 /// we're simply trying to ensure that sufficiently obvious uses of under-
4799 /// aligned objects don't get miscompiled; for example, a placement new
4800 /// into the address of a local variable. In such a case, it's quite
4801 /// reasonable to just ignore the returned alignment when it isn't from an
4802 /// explicit source.
4804 EmitPointerWithAlignment(const Expr
*Addr
, LValueBaseInfo
*BaseInfo
= nullptr,
4805 TBAAAccessInfo
*TBAAInfo
= nullptr,
4806 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
);
4808 /// If \p E references a parameter with pass_object_size info or a constant
4809 /// array size modifier, emit the object size divided by the size of \p EltTy.
4810 /// Otherwise return null.
4811 llvm::Value
*LoadPassedObjectSize(const Expr
*E
, QualType EltTy
);
4813 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK
);
4815 struct MultiVersionResolverOption
{
4816 llvm::Function
*Function
;
4818 StringRef Architecture
;
4819 llvm::SmallVector
<StringRef
, 8> Features
;
4821 Conds(StringRef Arch
, ArrayRef
<StringRef
> Feats
)
4822 : Architecture(Arch
), Features(Feats
.begin(), Feats
.end()) {}
4825 MultiVersionResolverOption(llvm::Function
*F
, StringRef Arch
,
4826 ArrayRef
<StringRef
> Feats
)
4827 : Function(F
), Conditions(Arch
, Feats
) {}
4830 // Emits the body of a multiversion function's resolver. Assumes that the
4831 // options are already sorted in the proper order, with the 'default' option
4832 // last (if it exists).
4833 void EmitMultiVersionResolver(llvm::Function
*Resolver
,
4834 ArrayRef
<MultiVersionResolverOption
> Options
);
4836 EmitX86MultiVersionResolver(llvm::Function
*Resolver
,
4837 ArrayRef
<MultiVersionResolverOption
> Options
);
4839 EmitAArch64MultiVersionResolver(llvm::Function
*Resolver
,
4840 ArrayRef
<MultiVersionResolverOption
> Options
);
4843 QualType
getVarArgType(const Expr
*Arg
);
4845 void EmitDeclMetadata();
4847 BlockByrefHelpers
*buildByrefHelpers(llvm::StructType
&byrefType
,
4848 const AutoVarEmission
&emission
);
4850 void AddObjCARCExceptionMetadata(llvm::Instruction
*Inst
);
4852 llvm::Value
*GetValueForARMHint(unsigned BuiltinID
);
4853 llvm::Value
*EmitX86CpuIs(const CallExpr
*E
);
4854 llvm::Value
*EmitX86CpuIs(StringRef CPUStr
);
4855 llvm::Value
*EmitX86CpuSupports(const CallExpr
*E
);
4856 llvm::Value
*EmitX86CpuSupports(ArrayRef
<StringRef
> FeatureStrs
);
4857 llvm::Value
*EmitX86CpuSupports(uint64_t Mask
);
4858 llvm::Value
*EmitX86CpuInit();
4859 llvm::Value
*FormX86ResolverCondition(const MultiVersionResolverOption
&RO
);
4860 llvm::Value
*EmitAArch64CpuInit();
4862 FormAArch64ResolverCondition(const MultiVersionResolverOption
&RO
);
4863 llvm::Value
*EmitAArch64CpuSupports(ArrayRef
<StringRef
> FeatureStrs
);
4867 inline DominatingLLVMValue::saved_type
4868 DominatingLLVMValue::save(CodeGenFunction
&CGF
, llvm::Value
*value
) {
4869 if (!needsSaving(value
)) return saved_type(value
, false);
4871 // Otherwise, we need an alloca.
4872 auto align
= CharUnits::fromQuantity(
4873 CGF
.CGM
.getDataLayout().getPrefTypeAlign(value
->getType()));
4875 CGF
.CreateTempAlloca(value
->getType(), align
, "cond-cleanup.save");
4876 CGF
.Builder
.CreateStore(value
, alloca
);
4878 return saved_type(alloca
.getPointer(), true);
4881 inline llvm::Value
*DominatingLLVMValue::restore(CodeGenFunction
&CGF
,
4883 // If the value says it wasn't saved, trust that it's still dominating.
4884 if (!value
.getInt()) return value
.getPointer();
4886 // Otherwise, it should be an alloca instruction, as set up in save().
4887 auto alloca
= cast
<llvm::AllocaInst
>(value
.getPointer());
4888 return CGF
.Builder
.CreateAlignedLoad(alloca
->getAllocatedType(), alloca
,
4889 alloca
->getAlign());
4892 } // end namespace CodeGen
4894 // Map the LangOption for floating point exception behavior into
4895 // the corresponding enum in the IR.
4896 llvm::fp::ExceptionBehavior
4897 ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind
);
4898 } // end namespace clang