1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This is the internal per-function state used for llvm translation.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
14 #define LLVM_CLANG_LIB_CODEGEN_CODEGENFUNCTION_H
16 #include "CGBuilder.h"
17 #include "CGDebugInfo.h"
18 #include "CGLoopInfo.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "EHScopeStack.h"
23 #include "VarBypassDetector.h"
24 #include "clang/AST/CharUnits.h"
25 #include "clang/AST/CurrentSourceLocExprScope.h"
26 #include "clang/AST/ExprCXX.h"
27 #include "clang/AST/ExprObjC.h"
28 #include "clang/AST/ExprOpenMP.h"
29 #include "clang/AST/StmtOpenACC.h"
30 #include "clang/AST/StmtOpenMP.h"
31 #include "clang/AST/Type.h"
32 #include "clang/Basic/ABI.h"
33 #include "clang/Basic/CapturedStmt.h"
34 #include "clang/Basic/CodeGenOptions.h"
35 #include "clang/Basic/OpenMPKinds.h"
36 #include "clang/Basic/TargetInfo.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/MapVector.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/ValueHandle.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Transforms/Utils/SanitizerStats.h"
55 class CanonicalLoopInfo
;
60 class CXXDestructorDecl
;
61 class CXXForRangeStmt
;
66 class FunctionProtoType
;
68 class ObjCContainerDecl
;
69 class ObjCInterfaceDecl
;
72 class ObjCImplementationDecl
;
73 class ObjCPropertyImplDecl
;
76 class ObjCForCollectionStmt
;
78 class ObjCAtThrowStmt
;
79 class ObjCAtSynchronizedStmt
;
80 class ObjCAutoreleasePoolStmt
;
81 class OMPUseDevicePtrClause
;
82 class OMPUseDeviceAddrClause
;
84 class OMPExecutableDirective
;
86 namespace analyze_os_log
{
87 class OSLogBufferLayout
;
96 class BlockByrefHelpers
;
98 class BlockFieldFlags
;
99 class RegionCodeGenTy
;
100 class TargetCodeGenInfo
;
101 struct OMPTaskDataTy
;
104 /// The kind of evaluation to perform on values of a particular
105 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
108 /// TODO: should vectors maybe be split out into their own thing?
109 enum TypeEvaluationKind
{
115 #define LIST_SANITIZER_CHECKS \
116 SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
117 SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
118 SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
119 SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
120 SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
121 SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
122 SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
123 SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
124 SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
125 SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
126 SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
127 SANITIZER_CHECK(MissingReturn, missing_return, 0) \
128 SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
129 SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
130 SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
131 SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
132 SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
133 SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
134 SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
135 SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
136 SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
137 SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
138 SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
139 SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
140 SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
141 SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
143 enum SanitizerHandler
{
144 #define SANITIZER_CHECK(Enum, Name, Version) Enum,
145 LIST_SANITIZER_CHECKS
146 #undef SANITIZER_CHECK
149 /// Helper class with most of the code for saving a value for a
150 /// conditional expression cleanup.
151 struct DominatingLLVMValue
{
152 typedef llvm::PointerIntPair
<llvm::Value
*, 1, bool> saved_type
;
154 /// Answer whether the given value needs extra work to be saved.
155 static bool needsSaving(llvm::Value
*value
) {
159 // If it's not an instruction, we don't need to save.
160 if (!isa
<llvm::Instruction
>(value
)) return false;
162 // If it's an instruction in the entry block, we don't need to save.
163 llvm::BasicBlock
*block
= cast
<llvm::Instruction
>(value
)->getParent();
164 return (block
!= &block
->getParent()->getEntryBlock());
167 static saved_type
save(CodeGenFunction
&CGF
, llvm::Value
*value
);
168 static llvm::Value
*restore(CodeGenFunction
&CGF
, saved_type value
);
171 /// A partial specialization of DominatingValue for llvm::Values that
172 /// might be llvm::Instructions.
173 template <class T
> struct DominatingPointer
<T
,true> : DominatingLLVMValue
{
175 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
176 return static_cast<T
*>(DominatingLLVMValue::restore(CGF
, value
));
180 /// A specialization of DominatingValue for Address.
181 template <> struct DominatingValue
<Address
> {
182 typedef Address type
;
185 DominatingLLVMValue::saved_type BasePtr
;
186 llvm::Type
*ElementType
;
188 DominatingLLVMValue::saved_type Offset
;
189 llvm::PointerType
*EffectiveType
;
192 static bool needsSaving(type value
) {
193 if (DominatingLLVMValue::needsSaving(value
.getBasePointer()) ||
194 DominatingLLVMValue::needsSaving(value
.getOffset()))
198 static saved_type
save(CodeGenFunction
&CGF
, type value
) {
199 return {DominatingLLVMValue::save(CGF
, value
.getBasePointer()),
200 value
.getElementType(), value
.getAlignment(),
201 DominatingLLVMValue::save(CGF
, value
.getOffset()), value
.getType()};
203 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
204 return Address(DominatingLLVMValue::restore(CGF
, value
.BasePtr
),
205 value
.ElementType
, value
.Alignment
, CGPointerAuthInfo(),
206 DominatingLLVMValue::restore(CGF
, value
.Offset
));
210 /// A specialization of DominatingValue for RValue.
211 template <> struct DominatingValue
<RValue
> {
214 enum Kind
{ ScalarLiteral
, ScalarAddress
, AggregateLiteral
,
215 AggregateAddress
, ComplexAddress
};
218 DominatingLLVMValue::saved_type first
, second
;
220 DominatingValue
<Address
>::saved_type AggregateAddr
;
222 LLVM_PREFERRED_TYPE(Kind
)
225 saved_type(DominatingLLVMValue::saved_type Val1
, unsigned K
)
226 : Vals
{Val1
, DominatingLLVMValue::saved_type()}, K(K
) {}
228 saved_type(DominatingLLVMValue::saved_type Val1
,
229 DominatingLLVMValue::saved_type Val2
)
230 : Vals
{Val1
, Val2
}, K(ComplexAddress
) {}
232 saved_type(DominatingValue
<Address
>::saved_type AggregateAddr
, unsigned K
)
233 : AggregateAddr(AggregateAddr
), K(K
) {}
236 static bool needsSaving(RValue value
);
237 static saved_type
save(CodeGenFunction
&CGF
, RValue value
);
238 RValue
restore(CodeGenFunction
&CGF
);
240 // implementations in CGCleanup.cpp
243 static bool needsSaving(type value
) {
244 return saved_type::needsSaving(value
);
246 static saved_type
save(CodeGenFunction
&CGF
, type value
) {
247 return saved_type::save(CGF
, value
);
249 static type
restore(CodeGenFunction
&CGF
, saved_type value
) {
250 return value
.restore(CGF
);
254 /// CodeGenFunction - This class organizes the per-function state that is used
255 /// while generating LLVM code.
256 class CodeGenFunction
: public CodeGenTypeCache
{
257 CodeGenFunction(const CodeGenFunction
&) = delete;
258 void operator=(const CodeGenFunction
&) = delete;
260 friend class CGCXXABI
;
262 /// A jump destination is an abstract label, branching to which may
263 /// require a jump out through normal cleanups.
265 JumpDest() : Block(nullptr), Index(0) {}
266 JumpDest(llvm::BasicBlock
*Block
, EHScopeStack::stable_iterator Depth
,
268 : Block(Block
), ScopeDepth(Depth
), Index(Index
) {}
270 bool isValid() const { return Block
!= nullptr; }
271 llvm::BasicBlock
*getBlock() const { return Block
; }
272 EHScopeStack::stable_iterator
getScopeDepth() const { return ScopeDepth
; }
273 unsigned getDestIndex() const { return Index
; }
275 // This should be used cautiously.
276 void setScopeDepth(EHScopeStack::stable_iterator depth
) {
281 llvm::BasicBlock
*Block
;
282 EHScopeStack::stable_iterator ScopeDepth
;
286 CodeGenModule
&CGM
; // Per-module state.
287 const TargetInfo
&Target
;
289 // For EH/SEH outlined funclets, this field points to parent's CGF
290 CodeGenFunction
*ParentCGF
= nullptr;
292 typedef std::pair
<llvm::Value
*, llvm::Value
*> ComplexPairTy
;
293 LoopInfoStack LoopStack
;
296 // Stores variables for which we can't generate correct lifetime markers
298 VarBypassDetector Bypasses
;
300 /// List of recently emitted OMPCanonicalLoops.
302 /// Since OMPCanonicalLoops are nested inside other statements (in particular
303 /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
304 /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
305 /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
306 /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
307 /// this stack when done. Entering a new loop requires clearing this list; it
308 /// either means we start parsing a new loop nest (in which case the previous
309 /// loop nest goes out of scope) or a second loop in the same level in which
310 /// case it would be ambiguous into which of the two (or more) loops the loop
311 /// nest would extend.
312 SmallVector
<llvm::CanonicalLoopInfo
*, 4> OMPLoopNestStack
;
314 /// Stack to track the Logical Operator recursion nest for MC/DC.
315 SmallVector
<const BinaryOperator
*, 16> MCDCLogOpStack
;
317 /// Stack to track the controlled convergence tokens.
318 SmallVector
<llvm::IntrinsicInst
*, 4> ConvergenceTokenStack
;
320 /// Number of nested loop to be consumed by the last surrounding
321 /// loop-associated directive.
322 int ExpectedOMPLoopDepth
= 0;
324 // CodeGen lambda for loops and support for ordered clause
325 typedef llvm::function_ref
<void(CodeGenFunction
&, const OMPLoopDirective
&,
328 typedef llvm::function_ref
<void(CodeGenFunction
&, SourceLocation
,
329 const unsigned, const bool)>
332 // Codegen lambda for loop bounds in worksharing loop constructs
333 typedef llvm::function_ref
<std::pair
<LValue
, LValue
>(
334 CodeGenFunction
&, const OMPExecutableDirective
&S
)>
337 // Codegen lambda for loop bounds in dispatch-based loop implementation
338 typedef llvm::function_ref
<std::pair
<llvm::Value
*, llvm::Value
*>(
339 CodeGenFunction
&, const OMPExecutableDirective
&S
, Address LB
,
341 CodeGenDispatchBoundsTy
;
343 /// CGBuilder insert helper. This function is called after an
344 /// instruction is created using Builder.
345 void InsertHelper(llvm::Instruction
*I
, const llvm::Twine
&Name
,
346 llvm::BasicBlock::iterator InsertPt
) const;
348 /// CurFuncDecl - Holds the Decl for the current outermost
349 /// non-closure context.
350 const Decl
*CurFuncDecl
= nullptr;
351 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
352 const Decl
*CurCodeDecl
= nullptr;
353 const CGFunctionInfo
*CurFnInfo
= nullptr;
355 llvm::Function
*CurFn
= nullptr;
357 /// Save Parameter Decl for coroutine.
358 llvm::SmallVector
<const ParmVarDecl
*, 4> FnArgs
;
360 // Holds coroutine data if the current function is a coroutine. We use a
361 // wrapper to manage its lifetime, so that we don't have to define CGCoroData
364 std::unique_ptr
<CGCoroData
> Data
;
365 bool InSuspendBlock
= false;
371 bool isCoroutine() const {
372 return CurCoro
.Data
!= nullptr;
375 bool inSuspendBlock() const {
376 return isCoroutine() && CurCoro
.InSuspendBlock
;
379 // Holds FramePtr for await_suspend wrapper generation,
380 // so that __builtin_coro_frame call can be lowered
381 // directly to value of its second argument
382 struct AwaitSuspendWrapperInfo
{
383 llvm::Value
*FramePtr
= nullptr;
385 AwaitSuspendWrapperInfo CurAwaitSuspendWrapper
;
387 // Generates wrapper function for `llvm.coro.await.suspend.*` intrinisics.
388 // It encapsulates SuspendExpr in a function, to separate it's body
389 // from the main coroutine to avoid miscompilations. Intrinisic
390 // is lowered to this function call in CoroSplit pass
391 // Function signature is:
392 // <type> __await_suspend_wrapper_<name>(ptr %awaiter, ptr %hdl)
393 // where type is one of (void, i1, ptr)
394 llvm::Function
*generateAwaitSuspendWrapper(Twine
const &CoroName
,
395 Twine
const &SuspendPointName
,
396 CoroutineSuspendExpr
const &S
);
398 /// CurGD - The GlobalDecl for the current function being compiled.
401 /// PrologueCleanupDepth - The cleanup depth enclosing all the
402 /// cleanups associated with the parameters.
403 EHScopeStack::stable_iterator PrologueCleanupDepth
;
405 /// ReturnBlock - Unified return block.
406 JumpDest ReturnBlock
;
408 /// ReturnValue - The temporary alloca to hold the return
409 /// value. This is invalid iff the function has no return value.
410 Address ReturnValue
= Address::invalid();
412 /// ReturnValuePointer - The temporary alloca to hold a pointer to sret.
413 /// This is invalid if sret is not in use.
414 Address ReturnValuePointer
= Address::invalid();
416 /// If a return statement is being visited, this holds the return statment's
417 /// result expression.
418 const Expr
*RetExpr
= nullptr;
420 /// Return true if a label was seen in the current scope.
421 bool hasLabelBeenSeenInCurrentScope() const {
423 return CurLexicalScope
->hasLabels();
424 return !LabelMap
.empty();
427 /// AllocaInsertPoint - This is an instruction in the entry block before which
428 /// we prefer to insert allocas.
429 llvm::AssertingVH
<llvm::Instruction
> AllocaInsertPt
;
432 /// PostAllocaInsertPt - This is a place in the prologue where code can be
433 /// inserted that will be dominated by all the static allocas. This helps
434 /// achieve two things:
435 /// 1. Contiguity of all static allocas (within the prologue) is maintained.
436 /// 2. All other prologue code (which are dominated by static allocas) do
437 /// appear in the source order immediately after all static allocas.
439 /// PostAllocaInsertPt will be lazily created when it is *really* required.
440 llvm::AssertingVH
<llvm::Instruction
> PostAllocaInsertPt
= nullptr;
443 /// Return PostAllocaInsertPt. If it is not yet created, then insert it
444 /// immediately after AllocaInsertPt.
445 llvm::Instruction
*getPostAllocaInsertPoint() {
446 if (!PostAllocaInsertPt
) {
447 assert(AllocaInsertPt
&&
448 "Expected static alloca insertion point at function prologue");
449 assert(AllocaInsertPt
->getParent()->isEntryBlock() &&
450 "EBB should be entry block of the current code gen function");
451 PostAllocaInsertPt
= AllocaInsertPt
->clone();
452 PostAllocaInsertPt
->setName("postallocapt");
453 PostAllocaInsertPt
->insertAfter(AllocaInsertPt
);
456 return PostAllocaInsertPt
;
459 /// API for captured statement code generation.
460 class CGCapturedStmtInfo
{
462 explicit CGCapturedStmtInfo(CapturedRegionKind K
= CR_Default
)
463 : Kind(K
), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {}
464 explicit CGCapturedStmtInfo(const CapturedStmt
&S
,
465 CapturedRegionKind K
= CR_Default
)
466 : Kind(K
), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
468 RecordDecl::field_iterator Field
=
469 S
.getCapturedRecordDecl()->field_begin();
470 for (CapturedStmt::const_capture_iterator I
= S
.capture_begin(),
472 I
!= E
; ++I
, ++Field
) {
473 if (I
->capturesThis())
474 CXXThisFieldDecl
= *Field
;
475 else if (I
->capturesVariable())
476 CaptureFields
[I
->getCapturedVar()->getCanonicalDecl()] = *Field
;
477 else if (I
->capturesVariableByCopy())
478 CaptureFields
[I
->getCapturedVar()->getCanonicalDecl()] = *Field
;
482 virtual ~CGCapturedStmtInfo();
484 CapturedRegionKind
getKind() const { return Kind
; }
486 virtual void setContextValue(llvm::Value
*V
) { ThisValue
= V
; }
487 // Retrieve the value of the context parameter.
488 virtual llvm::Value
*getContextValue() const { return ThisValue
; }
490 /// Lookup the captured field decl for a variable.
491 virtual const FieldDecl
*lookup(const VarDecl
*VD
) const {
492 return CaptureFields
.lookup(VD
->getCanonicalDecl());
495 bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; }
496 virtual FieldDecl
*getThisFieldDecl() const { return CXXThisFieldDecl
; }
498 static bool classof(const CGCapturedStmtInfo
*) {
502 /// Emit the captured statement body.
503 virtual void EmitBody(CodeGenFunction
&CGF
, const Stmt
*S
) {
504 CGF
.incrementProfileCounter(S
);
508 /// Get the name of the capture helper.
509 virtual StringRef
getHelperName() const { return "__captured_stmt"; }
511 /// Get the CaptureFields
512 llvm::SmallDenseMap
<const VarDecl
*, FieldDecl
*> getCaptureFields() {
513 return CaptureFields
;
517 /// The kind of captured statement being generated.
518 CapturedRegionKind Kind
;
520 /// Keep the map between VarDecl and FieldDecl.
521 llvm::SmallDenseMap
<const VarDecl
*, FieldDecl
*> CaptureFields
;
523 /// The base address of the captured record, passed in as the first
524 /// argument of the parallel region function.
525 llvm::Value
*ThisValue
;
527 /// Captured 'this' type.
528 FieldDecl
*CXXThisFieldDecl
;
530 CGCapturedStmtInfo
*CapturedStmtInfo
= nullptr;
532 /// RAII for correct setting/restoring of CapturedStmtInfo.
533 class CGCapturedStmtRAII
{
535 CodeGenFunction
&CGF
;
536 CGCapturedStmtInfo
*PrevCapturedStmtInfo
;
538 CGCapturedStmtRAII(CodeGenFunction
&CGF
,
539 CGCapturedStmtInfo
*NewCapturedStmtInfo
)
540 : CGF(CGF
), PrevCapturedStmtInfo(CGF
.CapturedStmtInfo
) {
541 CGF
.CapturedStmtInfo
= NewCapturedStmtInfo
;
543 ~CGCapturedStmtRAII() { CGF
.CapturedStmtInfo
= PrevCapturedStmtInfo
; }
546 /// An abstract representation of regular/ObjC call/message targets.
547 class AbstractCallee
{
548 /// The function declaration of the callee.
549 const Decl
*CalleeDecl
;
552 AbstractCallee() : CalleeDecl(nullptr) {}
553 AbstractCallee(const FunctionDecl
*FD
) : CalleeDecl(FD
) {}
554 AbstractCallee(const ObjCMethodDecl
*OMD
) : CalleeDecl(OMD
) {}
555 bool hasFunctionDecl() const {
556 return isa_and_nonnull
<FunctionDecl
>(CalleeDecl
);
558 const Decl
*getDecl() const { return CalleeDecl
; }
559 unsigned getNumParams() const {
560 if (const auto *FD
= dyn_cast
<FunctionDecl
>(CalleeDecl
))
561 return FD
->getNumParams();
562 return cast
<ObjCMethodDecl
>(CalleeDecl
)->param_size();
564 const ParmVarDecl
*getParamDecl(unsigned I
) const {
565 if (const auto *FD
= dyn_cast
<FunctionDecl
>(CalleeDecl
))
566 return FD
->getParamDecl(I
);
567 return *(cast
<ObjCMethodDecl
>(CalleeDecl
)->param_begin() + I
);
571 /// Sanitizers enabled for this function.
572 SanitizerSet SanOpts
;
574 /// True if CodeGen currently emits code implementing sanitizer checks.
575 bool IsSanitizerScope
= false;
577 /// RAII object to set/unset CodeGenFunction::IsSanitizerScope.
578 class SanitizerScope
{
579 CodeGenFunction
*CGF
;
581 SanitizerScope(CodeGenFunction
*CGF
);
585 /// In C++, whether we are code generating a thunk. This controls whether we
586 /// should emit cleanups.
587 bool CurFuncIsThunk
= false;
589 /// In ARC, whether we should autorelease the return value.
590 bool AutoreleaseResult
= false;
592 /// Whether we processed a Microsoft-style asm block during CodeGen. These can
593 /// potentially set the return value.
594 bool SawAsmBlock
= false;
596 GlobalDecl CurSEHParent
;
598 /// True if the current function is an outlined SEH helper. This can be a
599 /// finally block or filter expression.
600 bool IsOutlinedSEHHelper
= false;
602 /// True if CodeGen currently emits code inside presereved access index
604 bool IsInPreservedAIRegion
= false;
606 /// True if the current statement has nomerge attribute.
607 bool InNoMergeAttributedStmt
= false;
609 /// True if the current statement has noinline attribute.
610 bool InNoInlineAttributedStmt
= false;
612 /// True if the current statement has always_inline attribute.
613 bool InAlwaysInlineAttributedStmt
= false;
615 // The CallExpr within the current statement that the musttail attribute
616 // applies to. nullptr if there is no 'musttail' on the current statement.
617 const CallExpr
*MustTailCall
= nullptr;
619 /// Returns true if a function must make progress, which means the
620 /// mustprogress attribute can be added.
621 bool checkIfFunctionMustProgress() {
622 if (CGM
.getCodeGenOpts().getFiniteLoops() ==
623 CodeGenOptions::FiniteLoopsKind::Never
)
626 // C++11 and later guarantees that a thread eventually will do one of the
627 // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
629 // - make a call to a library I/O function,
630 // - perform an access through a volatile glvalue, or
631 // - perform a synchronization operation or an atomic operation.
633 // Hence each function is 'mustprogress' in C++11 or later.
634 return getLangOpts().CPlusPlus11
;
637 /// Returns true if a loop must make progress, which means the mustprogress
638 /// attribute can be added. \p HasConstantCond indicates whether the branch
639 /// condition is a known constant.
640 bool checkIfLoopMustProgress(const Expr
*, bool HasEmptyBody
);
642 const CodeGen::CGBlockInfo
*BlockInfo
= nullptr;
643 llvm::Value
*BlockPointer
= nullptr;
645 llvm::DenseMap
<const ValueDecl
*, FieldDecl
*> LambdaCaptureFields
;
646 FieldDecl
*LambdaThisCaptureField
= nullptr;
648 /// A mapping from NRVO variables to the flags used to indicate
649 /// when the NRVO has been applied to this variable.
650 llvm::DenseMap
<const VarDecl
*, llvm::Value
*> NRVOFlags
;
652 EHScopeStack EHStack
;
653 llvm::SmallVector
<char, 256> LifetimeExtendedCleanupStack
;
655 // A stack of cleanups which were added to EHStack but have to be deactivated
656 // later before being popped or emitted. These are usually deactivated on
657 // exiting a `CleanupDeactivationScope` scope. For instance, after a
660 // These are specially useful for correctly emitting cleanups while
661 // encountering branches out of expression (through stmt-expr or coroutine
663 struct DeferredDeactivateCleanup
{
664 EHScopeStack::stable_iterator Cleanup
;
665 llvm::Instruction
*DominatingIP
;
667 llvm::SmallVector
<DeferredDeactivateCleanup
> DeferredDeactivationCleanupStack
;
669 // Enters a new scope for capturing cleanups which are deferred to be
670 // deactivated, all of which will be deactivated once the scope is exited.
671 struct CleanupDeactivationScope
{
672 CodeGenFunction
&CGF
;
673 size_t OldDeactivateCleanupStackSize
;
675 CleanupDeactivationScope(CodeGenFunction
&CGF
)
676 : CGF(CGF
), OldDeactivateCleanupStackSize(
677 CGF
.DeferredDeactivationCleanupStack
.size()),
678 Deactivated(false) {}
680 void ForceDeactivate() {
681 assert(!Deactivated
&& "Deactivating already deactivated scope");
682 auto &Stack
= CGF
.DeferredDeactivationCleanupStack
;
683 for (size_t I
= Stack
.size(); I
> OldDeactivateCleanupStackSize
; I
--) {
684 CGF
.DeactivateCleanupBlock(Stack
[I
- 1].Cleanup
,
685 Stack
[I
- 1].DominatingIP
);
686 Stack
[I
- 1].DominatingIP
->eraseFromParent();
688 Stack
.resize(OldDeactivateCleanupStackSize
);
692 ~CleanupDeactivationScope() {
699 llvm::SmallVector
<const JumpDest
*, 2> SEHTryEpilogueStack
;
701 llvm::Instruction
*CurrentFuncletPad
= nullptr;
703 class CallLifetimeEnd final
: public EHScopeStack::Cleanup
{
704 bool isRedundantBeforeReturn() override
{ return true; }
710 CallLifetimeEnd(RawAddress addr
, llvm::Value
*size
)
711 : Addr(addr
.getPointer()), Size(size
) {}
713 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
714 CGF
.EmitLifetimeEnd(Size
, Addr
);
718 /// Header for data within LifetimeExtendedCleanupStack.
719 struct LifetimeExtendedCleanupHeader
{
720 /// The size of the following cleanup object.
722 /// The kind of cleanup to push.
723 LLVM_PREFERRED_TYPE(CleanupKind
)
725 /// Whether this is a conditional cleanup.
726 LLVM_PREFERRED_TYPE(bool)
727 unsigned IsConditional
: 1;
729 size_t getSize() const { return Size
; }
730 CleanupKind
getKind() const { return (CleanupKind
)Kind
; }
731 bool isConditional() const { return IsConditional
; }
734 /// i32s containing the indexes of the cleanup destinations.
735 RawAddress NormalCleanupDest
= RawAddress::invalid();
737 unsigned NextCleanupDestIndex
= 1;
739 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
740 llvm::BasicBlock
*EHResumeBlock
= nullptr;
742 /// The exception slot. All landing pads write the current exception pointer
743 /// into this alloca.
744 llvm::Value
*ExceptionSlot
= nullptr;
746 /// The selector slot. Under the MandatoryCleanup model, all landing pads
747 /// write the current selector value into this alloca.
748 llvm::AllocaInst
*EHSelectorSlot
= nullptr;
750 /// A stack of exception code slots. Entering an __except block pushes a slot
751 /// on the stack and leaving pops one. The __exception_code() intrinsic loads
752 /// a value from the top of the stack.
753 SmallVector
<Address
, 1> SEHCodeSlotStack
;
755 /// Value returned by __exception_info intrinsic.
756 llvm::Value
*SEHInfo
= nullptr;
758 /// Emits a landing pad for the current EH stack.
759 llvm::BasicBlock
*EmitLandingPad();
761 llvm::BasicBlock
*getInvokeDestImpl();
763 /// Parent loop-based directive for scan directive.
764 const OMPExecutableDirective
*OMPParentLoopDirectiveForScan
= nullptr;
765 llvm::BasicBlock
*OMPBeforeScanBlock
= nullptr;
766 llvm::BasicBlock
*OMPAfterScanBlock
= nullptr;
767 llvm::BasicBlock
*OMPScanExitBlock
= nullptr;
768 llvm::BasicBlock
*OMPScanDispatch
= nullptr;
769 bool OMPFirstScanLoop
= false;
771 /// Manages parent directive for scan directives.
772 class ParentLoopDirectiveForScanRegion
{
773 CodeGenFunction
&CGF
;
774 const OMPExecutableDirective
*ParentLoopDirectiveForScan
;
777 ParentLoopDirectiveForScanRegion(
778 CodeGenFunction
&CGF
,
779 const OMPExecutableDirective
&ParentLoopDirectiveForScan
)
781 ParentLoopDirectiveForScan(CGF
.OMPParentLoopDirectiveForScan
) {
782 CGF
.OMPParentLoopDirectiveForScan
= &ParentLoopDirectiveForScan
;
784 ~ParentLoopDirectiveForScanRegion() {
785 CGF
.OMPParentLoopDirectiveForScan
= ParentLoopDirectiveForScan
;
790 typename DominatingValue
<T
>::saved_type
saveValueInCond(T value
) {
791 return DominatingValue
<T
>::save(*this, value
);
794 class CGFPOptionsRAII
{
796 CGFPOptionsRAII(CodeGenFunction
&CGF
, FPOptions FPFeatures
);
797 CGFPOptionsRAII(CodeGenFunction
&CGF
, const Expr
*E
);
801 void ConstructorHelper(FPOptions FPFeatures
);
802 CodeGenFunction
&CGF
;
803 FPOptions OldFPFeatures
;
804 llvm::fp::ExceptionBehavior OldExcept
;
805 llvm::RoundingMode OldRounding
;
806 std::optional
<CGBuilderTy::FastMathFlagGuard
> FMFGuard
;
808 FPOptions CurFPFeatures
;
811 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
813 SmallVector
<llvm::Value
*, 8> ObjCEHValueStack
;
815 /// A class controlling the emission of a finally block.
817 /// Where the catchall's edge through the cleanup should go.
818 JumpDest RethrowDest
;
820 /// A function to call to enter the catch.
821 llvm::FunctionCallee BeginCatchFn
;
823 /// An i1 variable indicating whether or not the @finally is
824 /// running for an exception.
825 llvm::AllocaInst
*ForEHVar
= nullptr;
827 /// An i8* variable into which the exception pointer to rethrow
829 llvm::AllocaInst
*SavedExnVar
= nullptr;
832 void enter(CodeGenFunction
&CGF
, const Stmt
*Finally
,
833 llvm::FunctionCallee beginCatchFn
,
834 llvm::FunctionCallee endCatchFn
, llvm::FunctionCallee rethrowFn
);
835 void exit(CodeGenFunction
&CGF
);
838 /// Returns true inside SEH __try blocks.
839 bool isSEHTryScope() const { return !SEHTryEpilogueStack
.empty(); }
841 /// Returns true while emitting a cleanuppad.
842 bool isCleanupPadScope() const {
843 return CurrentFuncletPad
&& isa
<llvm::CleanupPadInst
>(CurrentFuncletPad
);
846 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
847 /// current full-expression. Safe against the possibility that
848 /// we're currently inside a conditionally-evaluated expression.
849 template <class T
, class... As
>
850 void pushFullExprCleanup(CleanupKind kind
, As
... A
) {
851 // If we're not in a conditional branch, or if none of the
852 // arguments requires saving, then use the unconditional cleanup.
853 if (!isInConditionalBranch())
854 return EHStack
.pushCleanup
<T
>(kind
, A
...);
856 // Stash values in a tuple so we can guarantee the order of saves.
857 typedef std::tuple
<typename DominatingValue
<As
>::saved_type
...> SavedTuple
;
858 SavedTuple Saved
{saveValueInCond(A
)...};
860 typedef EHScopeStack::ConditionalCleanup
<T
, As
...> CleanupType
;
861 EHStack
.pushCleanupTuple
<CleanupType
>(kind
, Saved
);
862 initFullExprCleanup();
865 /// Queue a cleanup to be pushed after finishing the current full-expression,
866 /// potentially with an active flag.
867 template <class T
, class... As
>
868 void pushCleanupAfterFullExpr(CleanupKind Kind
, As
... A
) {
869 if (!isInConditionalBranch())
870 return pushCleanupAfterFullExprWithActiveFlag
<T
>(
871 Kind
, RawAddress::invalid(), A
...);
873 RawAddress ActiveFlag
= createCleanupActiveFlag();
874 assert(!DominatingValue
<Address
>::needsSaving(ActiveFlag
) &&
875 "cleanup active flag should never need saving");
877 typedef std::tuple
<typename DominatingValue
<As
>::saved_type
...> SavedTuple
;
878 SavedTuple Saved
{saveValueInCond(A
)...};
880 typedef EHScopeStack::ConditionalCleanup
<T
, As
...> CleanupType
;
881 pushCleanupAfterFullExprWithActiveFlag
<CleanupType
>(Kind
, ActiveFlag
, Saved
);
884 template <class T
, class... As
>
885 void pushCleanupAfterFullExprWithActiveFlag(CleanupKind Kind
,
886 RawAddress ActiveFlag
, As
... A
) {
887 LifetimeExtendedCleanupHeader Header
= {sizeof(T
), Kind
,
888 ActiveFlag
.isValid()};
890 size_t OldSize
= LifetimeExtendedCleanupStack
.size();
891 LifetimeExtendedCleanupStack
.resize(
892 LifetimeExtendedCleanupStack
.size() + sizeof(Header
) + Header
.Size
+
893 (Header
.IsConditional
? sizeof(ActiveFlag
) : 0));
895 static_assert(sizeof(Header
) % alignof(T
) == 0,
896 "Cleanup will be allocated on misaligned address");
897 char *Buffer
= &LifetimeExtendedCleanupStack
[OldSize
];
898 new (Buffer
) LifetimeExtendedCleanupHeader(Header
);
899 new (Buffer
+ sizeof(Header
)) T(A
...);
900 if (Header
.IsConditional
)
901 new (Buffer
+ sizeof(Header
) + sizeof(T
)) RawAddress(ActiveFlag
);
904 // Push a cleanup onto EHStack and deactivate it later. It is usually
905 // deactivated when exiting a `CleanupDeactivationScope` (for example: after a
907 template <class T
, class... As
>
908 void pushCleanupAndDeferDeactivation(CleanupKind Kind
, As
... A
) {
909 // Placeholder dominating IP for this cleanup.
910 llvm::Instruction
*DominatingIP
=
911 Builder
.CreateFlagLoad(llvm::Constant::getNullValue(Int8PtrTy
));
912 EHStack
.pushCleanup
<T
>(Kind
, A
...);
913 DeferredDeactivationCleanupStack
.push_back(
914 {EHStack
.stable_begin(), DominatingIP
});
917 /// Set up the last cleanup that was pushed as a conditional
918 /// full-expression cleanup.
919 void initFullExprCleanup() {
920 initFullExprCleanupWithFlag(createCleanupActiveFlag());
923 void initFullExprCleanupWithFlag(RawAddress ActiveFlag
);
924 RawAddress
createCleanupActiveFlag();
926 /// PushDestructorCleanup - Push a cleanup to call the
927 /// complete-object destructor of an object of the given type at the
928 /// given address. Does nothing if T is not a C++ class type with a
929 /// non-trivial destructor.
930 void PushDestructorCleanup(QualType T
, Address Addr
);
932 /// PushDestructorCleanup - Push a cleanup to call the
933 /// complete-object variant of the given destructor on the object at
934 /// the given address.
935 void PushDestructorCleanup(const CXXDestructorDecl
*Dtor
, QualType T
,
938 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
939 /// process all branch fixups.
940 void PopCleanupBlock(bool FallThroughIsBranchThrough
= false,
941 bool ForDeactivation
= false);
943 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
944 /// The block cannot be reactivated. Pops it if it's the top of the
947 /// \param DominatingIP - An instruction which is known to
948 /// dominate the current IP (if set) and which lies along
949 /// all paths of execution between the current IP and the
950 /// the point at which the cleanup comes into scope.
951 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup
,
952 llvm::Instruction
*DominatingIP
);
954 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
955 /// Cannot be used to resurrect a deactivated cleanup.
957 /// \param DominatingIP - An instruction which is known to
958 /// dominate the current IP (if set) and which lies along
959 /// all paths of execution between the current IP and the
960 /// the point at which the cleanup comes into scope.
961 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup
,
962 llvm::Instruction
*DominatingIP
);
964 /// Enters a new scope for capturing cleanups, all of which
965 /// will be executed once the scope is exited.
966 class RunCleanupsScope
{
967 EHScopeStack::stable_iterator CleanupStackDepth
, OldCleanupScopeDepth
;
968 size_t LifetimeExtendedCleanupStackSize
;
969 CleanupDeactivationScope DeactivateCleanups
;
970 bool OldDidCallStackSave
;
975 RunCleanupsScope(const RunCleanupsScope
&) = delete;
976 void operator=(const RunCleanupsScope
&) = delete;
979 CodeGenFunction
& CGF
;
982 /// Enter a new cleanup scope.
983 explicit RunCleanupsScope(CodeGenFunction
&CGF
)
984 : DeactivateCleanups(CGF
), PerformCleanup(true), CGF(CGF
) {
985 CleanupStackDepth
= CGF
.EHStack
.stable_begin();
986 LifetimeExtendedCleanupStackSize
=
987 CGF
.LifetimeExtendedCleanupStack
.size();
988 OldDidCallStackSave
= CGF
.DidCallStackSave
;
989 CGF
.DidCallStackSave
= false;
990 OldCleanupScopeDepth
= CGF
.CurrentCleanupScopeDepth
;
991 CGF
.CurrentCleanupScopeDepth
= CleanupStackDepth
;
994 /// Exit this cleanup scope, emitting any accumulated cleanups.
995 ~RunCleanupsScope() {
1000 /// Determine whether this scope requires any cleanups.
1001 bool requiresCleanups() const {
1002 return CGF
.EHStack
.stable_begin() != CleanupStackDepth
;
1005 /// Force the emission of cleanups now, instead of waiting
1006 /// until this object is destroyed.
1007 /// \param ValuesToReload - A list of values that need to be available at
1008 /// the insertion point after cleanup emission. If cleanup emission created
1009 /// a shared cleanup block, these value pointers will be rewritten.
1010 /// Otherwise, they not will be modified.
1011 void ForceCleanup(std::initializer_list
<llvm::Value
**> ValuesToReload
= {}) {
1012 assert(PerformCleanup
&& "Already forced cleanup");
1013 CGF
.DidCallStackSave
= OldDidCallStackSave
;
1014 DeactivateCleanups
.ForceDeactivate();
1015 CGF
.PopCleanupBlocks(CleanupStackDepth
, LifetimeExtendedCleanupStackSize
,
1017 PerformCleanup
= false;
1018 CGF
.CurrentCleanupScopeDepth
= OldCleanupScopeDepth
;
1022 // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
1023 EHScopeStack::stable_iterator CurrentCleanupScopeDepth
=
1024 EHScopeStack::stable_end();
1026 class LexicalScope
: public RunCleanupsScope
{
1028 SmallVector
<const LabelDecl
*, 4> Labels
;
1029 LexicalScope
*ParentScope
;
1031 LexicalScope(const LexicalScope
&) = delete;
1032 void operator=(const LexicalScope
&) = delete;
1035 /// Enter a new cleanup scope.
1036 explicit LexicalScope(CodeGenFunction
&CGF
, SourceRange Range
)
1037 : RunCleanupsScope(CGF
), Range(Range
), ParentScope(CGF
.CurLexicalScope
) {
1038 CGF
.CurLexicalScope
= this;
1039 if (CGDebugInfo
*DI
= CGF
.getDebugInfo())
1040 DI
->EmitLexicalBlockStart(CGF
.Builder
, Range
.getBegin());
1043 void addLabel(const LabelDecl
*label
) {
1044 assert(PerformCleanup
&& "adding label to dead scope?");
1045 Labels
.push_back(label
);
1048 /// Exit this cleanup scope, emitting any accumulated
1051 if (CGDebugInfo
*DI
= CGF
.getDebugInfo())
1052 DI
->EmitLexicalBlockEnd(CGF
.Builder
, Range
.getEnd());
1054 // If we should perform a cleanup, force them now. Note that
1055 // this ends the cleanup scope before rescoping any labels.
1056 if (PerformCleanup
) {
1057 ApplyDebugLocation
DL(CGF
, Range
.getEnd());
1062 /// Force the emission of cleanups now, instead of waiting
1063 /// until this object is destroyed.
1064 void ForceCleanup() {
1065 CGF
.CurLexicalScope
= ParentScope
;
1066 RunCleanupsScope::ForceCleanup();
1068 if (!Labels
.empty())
1072 bool hasLabels() const {
1073 return !Labels
.empty();
1076 void rescopeLabels();
1079 typedef llvm::DenseMap
<const Decl
*, Address
> DeclMapTy
;
1081 /// The class used to assign some variables some temporarily addresses.
1083 DeclMapTy SavedLocals
;
1084 DeclMapTy SavedTempAddresses
;
1085 OMPMapVars(const OMPMapVars
&) = delete;
1086 void operator=(const OMPMapVars
&) = delete;
1089 explicit OMPMapVars() = default;
1091 assert(SavedLocals
.empty() && "Did not restored original addresses.");
1094 /// Sets the address of the variable \p LocalVD to be \p TempAddr in
1095 /// function \p CGF.
1096 /// \return true if at least one variable was set already, false otherwise.
1097 bool setVarAddr(CodeGenFunction
&CGF
, const VarDecl
*LocalVD
,
1099 LocalVD
= LocalVD
->getCanonicalDecl();
1100 // Only save it once.
1101 if (SavedLocals
.count(LocalVD
)) return false;
1103 // Copy the existing local entry to SavedLocals.
1104 auto it
= CGF
.LocalDeclMap
.find(LocalVD
);
1105 if (it
!= CGF
.LocalDeclMap
.end())
1106 SavedLocals
.try_emplace(LocalVD
, it
->second
);
1108 SavedLocals
.try_emplace(LocalVD
, Address::invalid());
1110 // Generate the private entry.
1111 QualType VarTy
= LocalVD
->getType();
1112 if (VarTy
->isReferenceType()) {
1113 Address Temp
= CGF
.CreateMemTemp(VarTy
);
1114 CGF
.Builder
.CreateStore(TempAddr
.emitRawPointer(CGF
), Temp
);
1117 SavedTempAddresses
.try_emplace(LocalVD
, TempAddr
);
1122 /// Applies new addresses to the list of the variables.
1123 /// \return true if at least one variable is using new address, false
1125 bool apply(CodeGenFunction
&CGF
) {
1126 copyInto(SavedTempAddresses
, CGF
.LocalDeclMap
);
1127 SavedTempAddresses
.clear();
1128 return !SavedLocals
.empty();
1131 /// Restores original addresses of the variables.
1132 void restore(CodeGenFunction
&CGF
) {
1133 if (!SavedLocals
.empty()) {
1134 copyInto(SavedLocals
, CGF
.LocalDeclMap
);
1135 SavedLocals
.clear();
1140 /// Copy all the entries in the source map over the corresponding
1141 /// entries in the destination, which must exist.
1142 static void copyInto(const DeclMapTy
&Src
, DeclMapTy
&Dest
) {
1143 for (auto &Pair
: Src
) {
1144 if (!Pair
.second
.isValid()) {
1145 Dest
.erase(Pair
.first
);
1149 auto I
= Dest
.find(Pair
.first
);
1150 if (I
!= Dest
.end())
1151 I
->second
= Pair
.second
;
1158 /// The scope used to remap some variables as private in the OpenMP loop body
1159 /// (or other captured region emitted without outlining), and to restore old
1160 /// vars back on exit.
1161 class OMPPrivateScope
: public RunCleanupsScope
{
1162 OMPMapVars MappedVars
;
1163 OMPPrivateScope(const OMPPrivateScope
&) = delete;
1164 void operator=(const OMPPrivateScope
&) = delete;
1167 /// Enter a new OpenMP private scope.
1168 explicit OMPPrivateScope(CodeGenFunction
&CGF
) : RunCleanupsScope(CGF
) {}
1170 /// Registers \p LocalVD variable as a private with \p Addr as the address
1171 /// of the corresponding private variable. \p
1172 /// PrivateGen is the address of the generated private variable.
1173 /// \return true if the variable is registered as private, false if it has
1174 /// been privatized already.
1175 bool addPrivate(const VarDecl
*LocalVD
, Address Addr
) {
1176 assert(PerformCleanup
&& "adding private to dead scope");
1177 return MappedVars
.setVarAddr(CGF
, LocalVD
, Addr
);
1180 /// Privatizes local variables previously registered as private.
1181 /// Registration is separate from the actual privatization to allow
1182 /// initializers use values of the original variables, not the private one.
1183 /// This is important, for example, if the private variable is a class
1184 /// variable initialized by a constructor that references other private
1185 /// variables. But at initialization original variables must be used, not
1187 /// \return true if at least one variable was privatized, false otherwise.
1188 bool Privatize() { return MappedVars
.apply(CGF
); }
1190 void ForceCleanup() {
1191 RunCleanupsScope::ForceCleanup();
1195 /// Exit scope - all the mapped variables are restored.
1196 ~OMPPrivateScope() {
1201 /// Checks if the global variable is captured in current function.
1202 bool isGlobalVarCaptured(const VarDecl
*VD
) const {
1203 VD
= VD
->getCanonicalDecl();
1204 return !VD
->isLocalVarDeclOrParm() && CGF
.LocalDeclMap
.count(VD
) > 0;
1207 /// Restore all mapped variables w/o clean up. This is usefully when we want
1208 /// to reference the original variables but don't want the clean up because
1209 /// that could emit lifetime end too early, causing backend issue #56913.
1210 void restoreMap() { MappedVars
.restore(CGF
); }
1213 /// Save/restore original map of previously emitted local vars in case when we
1214 /// need to duplicate emission of the same code several times in the same
1215 /// function for OpenMP code.
1216 class OMPLocalDeclMapRAII
{
1217 CodeGenFunction
&CGF
;
1221 OMPLocalDeclMapRAII(CodeGenFunction
&CGF
)
1222 : CGF(CGF
), SavedMap(CGF
.LocalDeclMap
) {}
1223 ~OMPLocalDeclMapRAII() { SavedMap
.swap(CGF
.LocalDeclMap
); }
1226 /// Takes the old cleanup stack size and emits the cleanup blocks
1227 /// that have been added.
1229 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize
,
1230 std::initializer_list
<llvm::Value
**> ValuesToReload
= {});
1232 /// Takes the old cleanup stack size and emits the cleanup blocks
1233 /// that have been added, then adds all lifetime-extended cleanups from
1234 /// the given position to the stack.
1236 PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize
,
1237 size_t OldLifetimeExtendedStackSize
,
1238 std::initializer_list
<llvm::Value
**> ValuesToReload
= {});
1240 void ResolveBranchFixups(llvm::BasicBlock
*Target
);
1242 /// The given basic block lies in the current EH scope, but may be a
1243 /// target of a potentially scope-crossing jump; get a stable handle
1244 /// to which we can perform this jump later.
1245 JumpDest
getJumpDestInCurrentScope(llvm::BasicBlock
*Target
) {
1246 return JumpDest(Target
,
1247 EHStack
.getInnermostNormalCleanup(),
1248 NextCleanupDestIndex
++);
1251 /// The given basic block lies in the current EH scope, but may be a
1252 /// target of a potentially scope-crossing jump; get a stable handle
1253 /// to which we can perform this jump later.
1254 JumpDest
getJumpDestInCurrentScope(StringRef Name
= StringRef()) {
1255 return getJumpDestInCurrentScope(createBasicBlock(Name
));
1258 /// EmitBranchThroughCleanup - Emit a branch from the current insert
1259 /// block through the normal cleanup handling code (if any) and then
1260 /// on to \arg Dest.
1261 void EmitBranchThroughCleanup(JumpDest Dest
);
1263 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
1264 /// specified destination obviously has no cleanups to run. 'false' is always
1265 /// a conservatively correct answer for this method.
1266 bool isObviouslyBranchWithoutCleanups(JumpDest Dest
) const;
1268 /// popCatchScope - Pops the catch scope at the top of the EHScope
1269 /// stack, emitting any required code (other than the catch handlers
1271 void popCatchScope();
1273 llvm::BasicBlock
*getEHResumeBlock(bool isCleanup
);
1274 llvm::BasicBlock
*getEHDispatchBlock(EHScopeStack::stable_iterator scope
);
1276 getFuncletEHDispatchBlock(EHScopeStack::stable_iterator scope
);
1278 /// An object to manage conditionally-evaluated expressions.
1279 class ConditionalEvaluation
{
1280 llvm::BasicBlock
*StartBB
;
1283 ConditionalEvaluation(CodeGenFunction
&CGF
)
1284 : StartBB(CGF
.Builder
.GetInsertBlock()) {}
1286 void begin(CodeGenFunction
&CGF
) {
1287 assert(CGF
.OutermostConditional
!= this);
1288 if (!CGF
.OutermostConditional
)
1289 CGF
.OutermostConditional
= this;
1292 void end(CodeGenFunction
&CGF
) {
1293 assert(CGF
.OutermostConditional
!= nullptr);
1294 if (CGF
.OutermostConditional
== this)
1295 CGF
.OutermostConditional
= nullptr;
1298 /// Returns a block which will be executed prior to each
1299 /// evaluation of the conditional code.
1300 llvm::BasicBlock
*getStartingBlock() const {
1305 /// isInConditionalBranch - Return true if we're currently emitting
1306 /// one branch or the other of a conditional expression.
1307 bool isInConditionalBranch() const { return OutermostConditional
!= nullptr; }
1309 void setBeforeOutermostConditional(llvm::Value
*value
, Address addr
,
1310 CodeGenFunction
&CGF
) {
1311 assert(isInConditionalBranch());
1312 llvm::BasicBlock
*block
= OutermostConditional
->getStartingBlock();
1314 new llvm::StoreInst(value
, addr
.emitRawPointer(CGF
), &block
->back());
1315 store
->setAlignment(addr
.getAlignment().getAsAlign());
1318 /// An RAII object to record that we're evaluating a statement
1320 class StmtExprEvaluation
{
1321 CodeGenFunction
&CGF
;
1323 /// We have to save the outermost conditional: cleanups in a
1324 /// statement expression aren't conditional just because the
1326 ConditionalEvaluation
*SavedOutermostConditional
;
1329 StmtExprEvaluation(CodeGenFunction
&CGF
)
1330 : CGF(CGF
), SavedOutermostConditional(CGF
.OutermostConditional
) {
1331 CGF
.OutermostConditional
= nullptr;
1334 ~StmtExprEvaluation() {
1335 CGF
.OutermostConditional
= SavedOutermostConditional
;
1336 CGF
.EnsureInsertPoint();
1340 /// An object which temporarily prevents a value from being
1341 /// destroyed by aggressive peephole optimizations that assume that
1342 /// all uses of a value have been realized in the IR.
1343 class PeepholeProtection
{
1344 llvm::Instruction
*Inst
= nullptr;
1345 friend class CodeGenFunction
;
1348 PeepholeProtection() = default;
1351 /// A non-RAII class containing all the information about a bound
1352 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
1353 /// this which makes individual mappings very simple; using this
1354 /// class directly is useful when you have a variable number of
1355 /// opaque values or don't want the RAII functionality for some
1357 class OpaqueValueMappingData
{
1358 const OpaqueValueExpr
*OpaqueValue
;
1360 CodeGenFunction::PeepholeProtection Protection
;
1362 OpaqueValueMappingData(const OpaqueValueExpr
*ov
,
1364 : OpaqueValue(ov
), BoundLValue(boundLValue
) {}
1366 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
1368 static bool shouldBindAsLValue(const Expr
*expr
) {
1369 // gl-values should be bound as l-values for obvious reasons.
1370 // Records should be bound as l-values because IR generation
1371 // always keeps them in memory. Expressions of function type
1372 // act exactly like l-values but are formally required to be
1374 return expr
->isGLValue() ||
1375 expr
->getType()->isFunctionType() ||
1376 hasAggregateEvaluationKind(expr
->getType());
1379 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1380 const OpaqueValueExpr
*ov
,
1382 if (shouldBindAsLValue(ov
))
1383 return bind(CGF
, ov
, CGF
.EmitLValue(e
));
1384 return bind(CGF
, ov
, CGF
.EmitAnyExpr(e
));
1387 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1388 const OpaqueValueExpr
*ov
,
1390 assert(shouldBindAsLValue(ov
));
1391 CGF
.OpaqueLValues
.insert(std::make_pair(ov
, lv
));
1392 return OpaqueValueMappingData(ov
, true);
1395 static OpaqueValueMappingData
bind(CodeGenFunction
&CGF
,
1396 const OpaqueValueExpr
*ov
,
1398 assert(!shouldBindAsLValue(ov
));
1399 CGF
.OpaqueRValues
.insert(std::make_pair(ov
, rv
));
1401 OpaqueValueMappingData
data(ov
, false);
1403 // Work around an extremely aggressive peephole optimization in
1404 // EmitScalarConversion which assumes that all other uses of a
1405 // value are extant.
1406 data
.Protection
= CGF
.protectFromPeepholes(rv
);
1411 bool isValid() const { return OpaqueValue
!= nullptr; }
1412 void clear() { OpaqueValue
= nullptr; }
1414 void unbind(CodeGenFunction
&CGF
) {
1415 assert(OpaqueValue
&& "no data to unbind!");
1418 CGF
.OpaqueLValues
.erase(OpaqueValue
);
1420 CGF
.OpaqueRValues
.erase(OpaqueValue
);
1421 CGF
.unprotectFromPeepholes(Protection
);
1426 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1427 class OpaqueValueMapping
{
1428 CodeGenFunction
&CGF
;
1429 OpaqueValueMappingData Data
;
1432 static bool shouldBindAsLValue(const Expr
*expr
) {
1433 return OpaqueValueMappingData::shouldBindAsLValue(expr
);
1436 /// Build the opaque value mapping for the given conditional
1437 /// operator if it's the GNU ?: extension. This is a common
1438 /// enough pattern that the convenience operator is really
1441 OpaqueValueMapping(CodeGenFunction
&CGF
,
1442 const AbstractConditionalOperator
*op
) : CGF(CGF
) {
1443 if (isa
<ConditionalOperator
>(op
))
1444 // Leave Data empty.
1447 const BinaryConditionalOperator
*e
= cast
<BinaryConditionalOperator
>(op
);
1448 Data
= OpaqueValueMappingData::bind(CGF
, e
->getOpaqueValue(),
1452 /// Build the opaque value mapping for an OpaqueValueExpr whose source
1453 /// expression is set to the expression the OVE represents.
1454 OpaqueValueMapping(CodeGenFunction
&CGF
, const OpaqueValueExpr
*OV
)
1457 assert(OV
->getSourceExpr() && "wrong form of OpaqueValueMapping used "
1458 "for OVE with no source expression");
1459 Data
= OpaqueValueMappingData::bind(CGF
, OV
, OV
->getSourceExpr());
1463 OpaqueValueMapping(CodeGenFunction
&CGF
,
1464 const OpaqueValueExpr
*opaqueValue
,
1466 : CGF(CGF
), Data(OpaqueValueMappingData::bind(CGF
, opaqueValue
, lvalue
)) {
1469 OpaqueValueMapping(CodeGenFunction
&CGF
,
1470 const OpaqueValueExpr
*opaqueValue
,
1472 : CGF(CGF
), Data(OpaqueValueMappingData::bind(CGF
, opaqueValue
, rvalue
)) {
1480 ~OpaqueValueMapping() {
1481 if (Data
.isValid()) Data
.unbind(CGF
);
1486 CGDebugInfo
*DebugInfo
;
1487 /// Used to create unique names for artificial VLA size debug info variables.
1488 unsigned VLAExprCounter
= 0;
1489 bool DisableDebugInfo
= false;
1491 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1492 /// calling llvm.stacksave for multiple VLAs in the same scope.
1493 bool DidCallStackSave
= false;
1495 /// IndirectBranch - The first time an indirect goto is seen we create a block
1496 /// with an indirect branch. Every time we see the address of a label taken,
1497 /// we add the label to the indirect goto. Every subsequent indirect goto is
1498 /// codegen'd as a jump to the IndirectBranch's basic block.
1499 llvm::IndirectBrInst
*IndirectBranch
= nullptr;
1501 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1503 DeclMapTy LocalDeclMap
;
1505 // Keep track of the cleanups for callee-destructed parameters pushed to the
1506 // cleanup stack so that they can be deactivated later.
1507 llvm::DenseMap
<const ParmVarDecl
*, EHScopeStack::stable_iterator
>
1508 CalleeDestructedParamCleanups
;
1510 /// SizeArguments - If a ParmVarDecl had the pass_object_size attribute, this
1511 /// will contain a mapping from said ParmVarDecl to its implicit "object_size"
1513 llvm::SmallDenseMap
<const ParmVarDecl
*, const ImplicitParamDecl
*, 2>
1516 /// Track escaped local variables with auto storage. Used during SEH
1517 /// outlining to produce a call to llvm.localescape.
1518 llvm::DenseMap
<llvm::AllocaInst
*, int> EscapedLocals
;
1520 /// LabelMap - This keeps track of the LLVM basic block for each C label.
1521 llvm::DenseMap
<const LabelDecl
*, JumpDest
> LabelMap
;
1523 // BreakContinueStack - This keeps track of where break and continue
1524 // statements should jump to.
1525 struct BreakContinue
{
1526 BreakContinue(JumpDest Break
, JumpDest Continue
)
1527 : BreakBlock(Break
), ContinueBlock(Continue
) {}
1529 JumpDest BreakBlock
;
1530 JumpDest ContinueBlock
;
1532 SmallVector
<BreakContinue
, 8> BreakContinueStack
;
1534 /// Handles cancellation exit points in OpenMP-related constructs.
1535 class OpenMPCancelExitStack
{
1536 /// Tracks cancellation exit point and join point for cancel-related exit
1537 /// and normal exit.
1539 CancelExit() = default;
1540 CancelExit(OpenMPDirectiveKind Kind
, JumpDest ExitBlock
,
1542 : Kind(Kind
), ExitBlock(ExitBlock
), ContBlock(ContBlock
) {}
1543 OpenMPDirectiveKind Kind
= llvm::omp::OMPD_unknown
;
1544 /// true if the exit block has been emitted already by the special
1545 /// emitExit() call, false if the default codegen is used.
1546 bool HasBeenEmitted
= false;
1551 SmallVector
<CancelExit
, 8> Stack
;
1554 OpenMPCancelExitStack() : Stack(1) {}
1555 ~OpenMPCancelExitStack() = default;
1556 /// Fetches the exit block for the current OpenMP construct.
1557 JumpDest
getExitBlock() const { return Stack
.back().ExitBlock
; }
1558 /// Emits exit block with special codegen procedure specific for the related
1559 /// OpenMP construct + emits code for normal construct cleanup.
1560 void emitExit(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
,
1561 const llvm::function_ref
<void(CodeGenFunction
&)> CodeGen
) {
1562 if (Stack
.back().Kind
== Kind
&& getExitBlock().isValid()) {
1563 assert(CGF
.getOMPCancelDestination(Kind
).isValid());
1564 assert(CGF
.HaveInsertPoint());
1565 assert(!Stack
.back().HasBeenEmitted
);
1566 auto IP
= CGF
.Builder
.saveAndClearIP();
1567 CGF
.EmitBlock(Stack
.back().ExitBlock
.getBlock());
1569 CGF
.EmitBranch(Stack
.back().ContBlock
.getBlock());
1570 CGF
.Builder
.restoreIP(IP
);
1571 Stack
.back().HasBeenEmitted
= true;
1575 /// Enter the cancel supporting \a Kind construct.
1576 /// \param Kind OpenMP directive that supports cancel constructs.
1577 /// \param HasCancel true, if the construct has inner cancel directive,
1578 /// false otherwise.
1579 void enter(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
, bool HasCancel
) {
1580 Stack
.push_back({Kind
,
1581 HasCancel
? CGF
.getJumpDestInCurrentScope("cancel.exit")
1583 HasCancel
? CGF
.getJumpDestInCurrentScope("cancel.cont")
1586 /// Emits default exit point for the cancel construct (if the special one
1587 /// has not be used) + join point for cancel/normal exits.
1588 void exit(CodeGenFunction
&CGF
) {
1589 if (getExitBlock().isValid()) {
1590 assert(CGF
.getOMPCancelDestination(Stack
.back().Kind
).isValid());
1591 bool HaveIP
= CGF
.HaveInsertPoint();
1592 if (!Stack
.back().HasBeenEmitted
) {
1594 CGF
.EmitBranchThroughCleanup(Stack
.back().ContBlock
);
1595 CGF
.EmitBlock(Stack
.back().ExitBlock
.getBlock());
1596 CGF
.EmitBranchThroughCleanup(Stack
.back().ContBlock
);
1598 CGF
.EmitBlock(Stack
.back().ContBlock
.getBlock());
1600 CGF
.Builder
.CreateUnreachable();
1601 CGF
.Builder
.ClearInsertionPoint();
1607 OpenMPCancelExitStack OMPCancelStack
;
1609 /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
1610 llvm::Value
*emitCondLikelihoodViaExpectIntrinsic(llvm::Value
*Cond
,
1611 Stmt::Likelihood LH
);
1615 /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
1616 Address MCDCCondBitmapAddr
= Address::invalid();
1618 /// Calculate branch weights appropriate for PGO data
1619 llvm::MDNode
*createProfileWeights(uint64_t TrueCount
,
1620 uint64_t FalseCount
) const;
1621 llvm::MDNode
*createProfileWeights(ArrayRef
<uint64_t> Weights
) const;
1622 llvm::MDNode
*createProfileWeightsForLoop(const Stmt
*Cond
,
1623 uint64_t LoopCount
) const;
1626 /// Increment the profiler's counter for the given statement by \p StepV.
1627 /// If \p StepV is null, the default increment is 1.
1628 void incrementProfileCounter(const Stmt
*S
, llvm::Value
*StepV
= nullptr) {
1629 if (CGM
.getCodeGenOpts().hasProfileClangInstr() &&
1630 !CurFn
->hasFnAttribute(llvm::Attribute::NoProfile
) &&
1631 !CurFn
->hasFnAttribute(llvm::Attribute::SkipProfile
)) {
1632 auto AL
= ApplyDebugLocation::CreateArtificial(*this);
1633 PGO
.emitCounterSetOrIncrement(Builder
, S
, StepV
);
1635 PGO
.setCurrentStmt(S
);
1638 bool isMCDCCoverageEnabled() const {
1639 return (CGM
.getCodeGenOpts().hasProfileClangInstr() &&
1640 CGM
.getCodeGenOpts().MCDCCoverage
&&
1641 !CurFn
->hasFnAttribute(llvm::Attribute::NoProfile
));
1644 /// Allocate a temp value on the stack that MCDC can use to track condition
1646 void maybeCreateMCDCCondBitmap() {
1647 if (isMCDCCoverageEnabled()) {
1648 PGO
.emitMCDCParameters(Builder
);
1649 MCDCCondBitmapAddr
=
1650 CreateIRTemp(getContext().UnsignedIntTy
, "mcdc.addr");
1654 bool isBinaryLogicalOp(const Expr
*E
) const {
1655 const BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(E
->IgnoreParens());
1656 return (BOp
&& BOp
->isLogicalOp());
1659 /// Zero-init the MCDC temp value.
1660 void maybeResetMCDCCondBitmap(const Expr
*E
) {
1661 if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E
)) {
1662 PGO
.emitMCDCCondBitmapReset(Builder
, E
, MCDCCondBitmapAddr
);
1663 PGO
.setCurrentStmt(E
);
1667 /// Increment the profiler's counter for the given expression by \p StepV.
1668 /// If \p StepV is null, the default increment is 1.
1669 void maybeUpdateMCDCTestVectorBitmap(const Expr
*E
) {
1670 if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E
)) {
1671 PGO
.emitMCDCTestVectorBitmapUpdate(Builder
, E
, MCDCCondBitmapAddr
, *this);
1672 PGO
.setCurrentStmt(E
);
1676 /// Update the MCDC temp value with the condition's evaluated result.
1677 void maybeUpdateMCDCCondBitmap(const Expr
*E
, llvm::Value
*Val
) {
1678 if (isMCDCCoverageEnabled()) {
1679 PGO
.emitMCDCCondBitmapUpdate(Builder
, E
, MCDCCondBitmapAddr
, Val
, *this);
1680 PGO
.setCurrentStmt(E
);
1684 /// Get the profiler's count for the given statement.
1685 uint64_t getProfileCount(const Stmt
*S
) {
1686 return PGO
.getStmtCount(S
).value_or(0);
1689 /// Set the profiler's current count.
1690 void setCurrentProfileCount(uint64_t Count
) {
1691 PGO
.setCurrentRegionCount(Count
);
1694 /// Get the profiler's current count. This is generally the count for the most
1695 /// recently incremented counter.
1696 uint64_t getCurrentProfileCount() {
1697 return PGO
.getCurrentRegionCount();
1702 /// SwitchInsn - This is nearest current switch instruction. It is null if
1703 /// current context is not in a switch.
1704 llvm::SwitchInst
*SwitchInsn
= nullptr;
1705 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
1706 SmallVector
<uint64_t, 16> *SwitchWeights
= nullptr;
1708 /// The likelihood attributes of the SwitchCase.
1709 SmallVector
<Stmt::Likelihood
, 16> *SwitchLikelihood
= nullptr;
1711 /// CaseRangeBlock - This block holds if condition check for last case
1712 /// statement range in current switch instruction.
1713 llvm::BasicBlock
*CaseRangeBlock
= nullptr;
1715 /// OpaqueLValues - Keeps track of the current set of opaque value
1717 llvm::DenseMap
<const OpaqueValueExpr
*, LValue
> OpaqueLValues
;
1718 llvm::DenseMap
<const OpaqueValueExpr
*, RValue
> OpaqueRValues
;
1720 // VLASizeMap - This keeps track of the associated size for each VLA type.
1721 // We track this by the size expression rather than the type itself because
1722 // in certain situations, like a const qualifier applied to an VLA typedef,
1723 // multiple VLA types can share the same size expression.
1724 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1725 // enter/leave scopes.
1726 llvm::DenseMap
<const Expr
*, llvm::Value
*> VLASizeMap
;
1728 /// A block containing a single 'unreachable' instruction. Created
1729 /// lazily by getUnreachableBlock().
1730 llvm::BasicBlock
*UnreachableBlock
= nullptr;
1732 /// Counts of the number return expressions in the function.
1733 unsigned NumReturnExprs
= 0;
1735 /// Count the number of simple (constant) return expressions in the function.
1736 unsigned NumSimpleReturnExprs
= 0;
1738 /// The last regular (non-return) debug location (breakpoint) in the function.
1739 SourceLocation LastStopPoint
;
1742 /// Source location information about the default argument or member
1743 /// initializer expression we're evaluating, if any.
1744 CurrentSourceLocExprScope CurSourceLocExprScope
;
1745 using SourceLocExprScopeGuard
=
1746 CurrentSourceLocExprScope::SourceLocExprScopeGuard
;
1748 /// A scope within which we are constructing the fields of an object which
1749 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
1750 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
1751 class FieldConstructionScope
{
1753 FieldConstructionScope(CodeGenFunction
&CGF
, Address This
)
1754 : CGF(CGF
), OldCXXDefaultInitExprThis(CGF
.CXXDefaultInitExprThis
) {
1755 CGF
.CXXDefaultInitExprThis
= This
;
1757 ~FieldConstructionScope() {
1758 CGF
.CXXDefaultInitExprThis
= OldCXXDefaultInitExprThis
;
1762 CodeGenFunction
&CGF
;
1763 Address OldCXXDefaultInitExprThis
;
1766 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
1767 /// is overridden to be the object under construction.
1768 class CXXDefaultInitExprScope
{
1770 CXXDefaultInitExprScope(CodeGenFunction
&CGF
, const CXXDefaultInitExpr
*E
)
1771 : CGF(CGF
), OldCXXThisValue(CGF
.CXXThisValue
),
1772 OldCXXThisAlignment(CGF
.CXXThisAlignment
),
1773 SourceLocScope(E
, CGF
.CurSourceLocExprScope
) {
1774 CGF
.CXXThisValue
= CGF
.CXXDefaultInitExprThis
.getBasePointer();
1775 CGF
.CXXThisAlignment
= CGF
.CXXDefaultInitExprThis
.getAlignment();
1777 ~CXXDefaultInitExprScope() {
1778 CGF
.CXXThisValue
= OldCXXThisValue
;
1779 CGF
.CXXThisAlignment
= OldCXXThisAlignment
;
1783 CodeGenFunction
&CGF
;
1784 llvm::Value
*OldCXXThisValue
;
1785 CharUnits OldCXXThisAlignment
;
1786 SourceLocExprScopeGuard SourceLocScope
;
1789 struct CXXDefaultArgExprScope
: SourceLocExprScopeGuard
{
1790 CXXDefaultArgExprScope(CodeGenFunction
&CGF
, const CXXDefaultArgExpr
*E
)
1791 : SourceLocExprScopeGuard(E
, CGF
.CurSourceLocExprScope
) {}
1794 /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
1795 /// current loop index is overridden.
1796 class ArrayInitLoopExprScope
{
1798 ArrayInitLoopExprScope(CodeGenFunction
&CGF
, llvm::Value
*Index
)
1799 : CGF(CGF
), OldArrayInitIndex(CGF
.ArrayInitIndex
) {
1800 CGF
.ArrayInitIndex
= Index
;
1802 ~ArrayInitLoopExprScope() {
1803 CGF
.ArrayInitIndex
= OldArrayInitIndex
;
1807 CodeGenFunction
&CGF
;
1808 llvm::Value
*OldArrayInitIndex
;
1811 class InlinedInheritingConstructorScope
{
1813 InlinedInheritingConstructorScope(CodeGenFunction
&CGF
, GlobalDecl GD
)
1814 : CGF(CGF
), OldCurGD(CGF
.CurGD
), OldCurFuncDecl(CGF
.CurFuncDecl
),
1815 OldCurCodeDecl(CGF
.CurCodeDecl
),
1816 OldCXXABIThisDecl(CGF
.CXXABIThisDecl
),
1817 OldCXXABIThisValue(CGF
.CXXABIThisValue
),
1818 OldCXXThisValue(CGF
.CXXThisValue
),
1819 OldCXXABIThisAlignment(CGF
.CXXABIThisAlignment
),
1820 OldCXXThisAlignment(CGF
.CXXThisAlignment
),
1821 OldReturnValue(CGF
.ReturnValue
), OldFnRetTy(CGF
.FnRetTy
),
1822 OldCXXInheritedCtorInitExprArgs(
1823 std::move(CGF
.CXXInheritedCtorInitExprArgs
)) {
1825 CGF
.CurFuncDecl
= CGF
.CurCodeDecl
=
1826 cast
<CXXConstructorDecl
>(GD
.getDecl());
1827 CGF
.CXXABIThisDecl
= nullptr;
1828 CGF
.CXXABIThisValue
= nullptr;
1829 CGF
.CXXThisValue
= nullptr;
1830 CGF
.CXXABIThisAlignment
= CharUnits();
1831 CGF
.CXXThisAlignment
= CharUnits();
1832 CGF
.ReturnValue
= Address::invalid();
1833 CGF
.FnRetTy
= QualType();
1834 CGF
.CXXInheritedCtorInitExprArgs
.clear();
1836 ~InlinedInheritingConstructorScope() {
1837 CGF
.CurGD
= OldCurGD
;
1838 CGF
.CurFuncDecl
= OldCurFuncDecl
;
1839 CGF
.CurCodeDecl
= OldCurCodeDecl
;
1840 CGF
.CXXABIThisDecl
= OldCXXABIThisDecl
;
1841 CGF
.CXXABIThisValue
= OldCXXABIThisValue
;
1842 CGF
.CXXThisValue
= OldCXXThisValue
;
1843 CGF
.CXXABIThisAlignment
= OldCXXABIThisAlignment
;
1844 CGF
.CXXThisAlignment
= OldCXXThisAlignment
;
1845 CGF
.ReturnValue
= OldReturnValue
;
1846 CGF
.FnRetTy
= OldFnRetTy
;
1847 CGF
.CXXInheritedCtorInitExprArgs
=
1848 std::move(OldCXXInheritedCtorInitExprArgs
);
1852 CodeGenFunction
&CGF
;
1853 GlobalDecl OldCurGD
;
1854 const Decl
*OldCurFuncDecl
;
1855 const Decl
*OldCurCodeDecl
;
1856 ImplicitParamDecl
*OldCXXABIThisDecl
;
1857 llvm::Value
*OldCXXABIThisValue
;
1858 llvm::Value
*OldCXXThisValue
;
1859 CharUnits OldCXXABIThisAlignment
;
1860 CharUnits OldCXXThisAlignment
;
1861 Address OldReturnValue
;
1862 QualType OldFnRetTy
;
1863 CallArgList OldCXXInheritedCtorInitExprArgs
;
1866 // Helper class for the OpenMP IR Builder. Allows reusability of code used for
1867 // region body, and finalization codegen callbacks. This will class will also
1868 // contain privatization functions used by the privatization call backs
1870 // TODO: this is temporary class for things that are being moved out of
1871 // CGOpenMPRuntime, new versions of current CodeGenFunction methods, or
1872 // utility function for use with the OMPBuilder. Once that move to use the
1873 // OMPBuilder is done, everything here will either become part of CodeGenFunc.
1874 // directly, or a new helper class that will contain functions used by both
1875 // this and the OMPBuilder
1877 struct OMPBuilderCBHelpers
{
1879 OMPBuilderCBHelpers() = delete;
1880 OMPBuilderCBHelpers(const OMPBuilderCBHelpers
&) = delete;
1881 OMPBuilderCBHelpers
&operator=(const OMPBuilderCBHelpers
&) = delete;
1883 using InsertPointTy
= llvm::OpenMPIRBuilder::InsertPointTy
;
1885 /// Cleanup action for allocate support.
1886 class OMPAllocateCleanupTy final
: public EHScopeStack::Cleanup
{
1889 llvm::CallInst
*RTLFnCI
;
1892 OMPAllocateCleanupTy(llvm::CallInst
*RLFnCI
) : RTLFnCI(RLFnCI
) {
1893 RLFnCI
->removeFromParent();
1896 void Emit(CodeGenFunction
&CGF
, Flags
/*flags*/) override
{
1897 if (!CGF
.HaveInsertPoint())
1899 CGF
.Builder
.Insert(RTLFnCI
);
1903 /// Returns address of the threadprivate variable for the current
1904 /// thread. This Also create any necessary OMP runtime calls.
1906 /// \param VD VarDecl for Threadprivate variable.
1907 /// \param VDAddr Address of the Vardecl
1908 /// \param Loc The location where the barrier directive was encountered
1909 static Address
getAddrOfThreadPrivate(CodeGenFunction
&CGF
,
1910 const VarDecl
*VD
, Address VDAddr
,
1911 SourceLocation Loc
);
1913 /// Gets the OpenMP-specific address of the local variable /p VD.
1914 static Address
getAddressOfLocalVariable(CodeGenFunction
&CGF
,
1916 /// Get the platform-specific name separator.
1917 /// \param Parts different parts of the final name that needs separation
1918 /// \param FirstSeparator First separator used between the initial two
1919 /// parts of the name.
1920 /// \param Separator separator used between all of the rest consecutinve
1921 /// parts of the name
1922 static std::string
getNameWithSeparators(ArrayRef
<StringRef
> Parts
,
1923 StringRef FirstSeparator
= ".",
1924 StringRef Separator
= ".");
1925 /// Emit the Finalization for an OMP region
1926 /// \param CGF The Codegen function this belongs to
1927 /// \param IP Insertion point for generating the finalization code.
1928 static void FinalizeOMPRegion(CodeGenFunction
&CGF
, InsertPointTy IP
) {
1929 CGBuilderTy::InsertPointGuard
IPG(CGF
.Builder
);
1930 assert(IP
.getBlock()->end() != IP
.getPoint() &&
1931 "OpenMP IR Builder should cause terminated block!");
1933 llvm::BasicBlock
*IPBB
= IP
.getBlock();
1934 llvm::BasicBlock
*DestBB
= IPBB
->getUniqueSuccessor();
1935 assert(DestBB
&& "Finalization block should have one successor!");
1937 // erase and replace with cleanup branch.
1938 IPBB
->getTerminator()->eraseFromParent();
1939 CGF
.Builder
.SetInsertPoint(IPBB
);
1940 CodeGenFunction::JumpDest Dest
= CGF
.getJumpDestInCurrentScope(DestBB
);
1941 CGF
.EmitBranchThroughCleanup(Dest
);
1944 /// Emit the body of an OMP region
1945 /// \param CGF The Codegen function this belongs to
1946 /// \param RegionBodyStmt The body statement for the OpenMP region being
1948 /// \param AllocaIP Where to insert alloca instructions
1949 /// \param CodeGenIP Where to insert the region code
1950 /// \param RegionName Name to be used for new blocks
1951 static void EmitOMPInlinedRegionBody(CodeGenFunction
&CGF
,
1952 const Stmt
*RegionBodyStmt
,
1953 InsertPointTy AllocaIP
,
1954 InsertPointTy CodeGenIP
,
1957 static void EmitCaptureStmt(CodeGenFunction
&CGF
, InsertPointTy CodeGenIP
,
1958 llvm::BasicBlock
&FiniBB
, llvm::Function
*Fn
,
1959 ArrayRef
<llvm::Value
*> Args
) {
1960 llvm::BasicBlock
*CodeGenIPBB
= CodeGenIP
.getBlock();
1961 if (llvm::Instruction
*CodeGenIPBBTI
= CodeGenIPBB
->getTerminator())
1962 CodeGenIPBBTI
->eraseFromParent();
1964 CGF
.Builder
.SetInsertPoint(CodeGenIPBB
);
1966 if (Fn
->doesNotThrow())
1967 CGF
.EmitNounwindRuntimeCall(Fn
, Args
);
1969 CGF
.EmitRuntimeCall(Fn
, Args
);
1971 if (CGF
.Builder
.saveIP().isSet())
1972 CGF
.Builder
.CreateBr(&FiniBB
);
1975 /// Emit the body of an OMP region that will be outlined in
1976 /// OpenMPIRBuilder::finalize().
1977 /// \param CGF The Codegen function this belongs to
1978 /// \param RegionBodyStmt The body statement for the OpenMP region being
1980 /// \param AllocaIP Where to insert alloca instructions
1981 /// \param CodeGenIP Where to insert the region code
1982 /// \param RegionName Name to be used for new blocks
1983 static void EmitOMPOutlinedRegionBody(CodeGenFunction
&CGF
,
1984 const Stmt
*RegionBodyStmt
,
1985 InsertPointTy AllocaIP
,
1986 InsertPointTy CodeGenIP
,
1989 /// RAII for preserving necessary info during Outlined region body codegen.
1990 class OutlinedRegionBodyRAII
{
1992 llvm::AssertingVH
<llvm::Instruction
> OldAllocaIP
;
1993 CodeGenFunction::JumpDest OldReturnBlock
;
1994 CodeGenFunction
&CGF
;
1997 OutlinedRegionBodyRAII(CodeGenFunction
&cgf
, InsertPointTy
&AllocaIP
,
1998 llvm::BasicBlock
&RetBB
)
2000 assert(AllocaIP
.isSet() &&
2001 "Must specify Insertion point for allocas of outlined function");
2002 OldAllocaIP
= CGF
.AllocaInsertPt
;
2003 CGF
.AllocaInsertPt
= &*AllocaIP
.getPoint();
2005 OldReturnBlock
= CGF
.ReturnBlock
;
2006 CGF
.ReturnBlock
= CGF
.getJumpDestInCurrentScope(&RetBB
);
2009 ~OutlinedRegionBodyRAII() {
2010 CGF
.AllocaInsertPt
= OldAllocaIP
;
2011 CGF
.ReturnBlock
= OldReturnBlock
;
2015 /// RAII for preserving necessary info during inlined region body codegen.
2016 class InlinedRegionBodyRAII
{
2018 llvm::AssertingVH
<llvm::Instruction
> OldAllocaIP
;
2019 CodeGenFunction
&CGF
;
2022 InlinedRegionBodyRAII(CodeGenFunction
&cgf
, InsertPointTy
&AllocaIP
,
2023 llvm::BasicBlock
&FiniBB
)
2025 // Alloca insertion block should be in the entry block of the containing
2026 // function so it expects an empty AllocaIP in which case will reuse the
2027 // old alloca insertion point, or a new AllocaIP in the same block as
2029 assert((!AllocaIP
.isSet() ||
2030 CGF
.AllocaInsertPt
->getParent() == AllocaIP
.getBlock()) &&
2031 "Insertion point should be in the entry block of containing "
2033 OldAllocaIP
= CGF
.AllocaInsertPt
;
2034 if (AllocaIP
.isSet())
2035 CGF
.AllocaInsertPt
= &*AllocaIP
.getPoint();
2037 // TODO: Remove the call, after making sure the counter is not used by
2039 // Since this is an inlined region, it should not modify the
2040 // ReturnBlock, and should reuse the one for the enclosing outlined
2041 // region. So, the JumpDest being return by the function is discarded
2042 (void)CGF
.getJumpDestInCurrentScope(&FiniBB
);
2045 ~InlinedRegionBodyRAII() { CGF
.AllocaInsertPt
= OldAllocaIP
; }
2050 /// CXXThisDecl - When generating code for a C++ member function,
2051 /// this will hold the implicit 'this' declaration.
2052 ImplicitParamDecl
*CXXABIThisDecl
= nullptr;
2053 llvm::Value
*CXXABIThisValue
= nullptr;
2054 llvm::Value
*CXXThisValue
= nullptr;
2055 CharUnits CXXABIThisAlignment
;
2056 CharUnits CXXThisAlignment
;
2058 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
2059 /// this expression.
2060 Address CXXDefaultInitExprThis
= Address::invalid();
2062 /// The current array initialization index when evaluating an
2063 /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
2064 llvm::Value
*ArrayInitIndex
= nullptr;
2066 /// The values of function arguments to use when evaluating
2067 /// CXXInheritedCtorInitExprs within this context.
2068 CallArgList CXXInheritedCtorInitExprArgs
;
2070 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
2071 /// destructor, this will hold the implicit argument (e.g. VTT).
2072 ImplicitParamDecl
*CXXStructorImplicitParamDecl
= nullptr;
2073 llvm::Value
*CXXStructorImplicitParamValue
= nullptr;
2075 /// OutermostConditional - Points to the outermost active
2076 /// conditional control. This is used so that we know if a
2077 /// temporary should be destroyed conditionally.
2078 ConditionalEvaluation
*OutermostConditional
= nullptr;
2080 /// The current lexical scope.
2081 LexicalScope
*CurLexicalScope
= nullptr;
2083 /// The current source location that should be used for exception
2085 SourceLocation CurEHLocation
;
2087 /// BlockByrefInfos - For each __block variable, contains
2088 /// information about the layout of the variable.
2089 llvm::DenseMap
<const ValueDecl
*, BlockByrefInfo
> BlockByrefInfos
;
2091 /// Used by -fsanitize=nullability-return to determine whether the return
2092 /// value can be checked.
2093 llvm::Value
*RetValNullabilityPrecondition
= nullptr;
2095 /// Check if -fsanitize=nullability-return instrumentation is required for
2097 bool requiresReturnValueNullabilityCheck() const {
2098 return RetValNullabilityPrecondition
;
2101 /// Used to store precise source locations for return statements by the
2102 /// runtime return value checks.
2103 Address ReturnLocation
= Address::invalid();
2105 /// Check if the return value of this function requires sanitization.
2106 bool requiresReturnValueCheck() const;
2108 bool isInAllocaArgument(CGCXXABI
&ABI
, QualType Ty
);
2109 bool hasInAllocaArg(const CXXMethodDecl
*MD
);
2111 llvm::BasicBlock
*TerminateLandingPad
= nullptr;
2112 llvm::BasicBlock
*TerminateHandler
= nullptr;
2113 llvm::SmallVector
<llvm::BasicBlock
*, 2> TrapBBs
;
2115 /// Terminate funclets keyed by parent funclet pad.
2116 llvm::MapVector
<llvm::Value
*, llvm::BasicBlock
*> TerminateFunclets
;
2118 /// Largest vector width used in ths function. Will be used to create a
2119 /// function attribute.
2120 unsigned LargestVectorWidth
= 0;
2122 /// True if we need emit the life-time markers. This is initially set in
2123 /// the constructor, but could be overwritten to true if this is a coroutine.
2124 bool ShouldEmitLifetimeMarkers
;
2126 /// Add OpenCL kernel arg metadata and the kernel attribute metadata to
2127 /// the function metadata.
2128 void EmitKernelMetadata(const FunctionDecl
*FD
, llvm::Function
*Fn
);
2131 CodeGenFunction(CodeGenModule
&cgm
, bool suppressNewContext
=false);
2134 CodeGenTypes
&getTypes() const { return CGM
.getTypes(); }
2135 ASTContext
&getContext() const { return CGM
.getContext(); }
2136 CGDebugInfo
*getDebugInfo() {
2137 if (DisableDebugInfo
)
2141 void disableDebugInfo() { DisableDebugInfo
= true; }
2142 void enableDebugInfo() { DisableDebugInfo
= false; }
2144 bool shouldUseFusedARCCalls() {
2145 return CGM
.getCodeGenOpts().OptimizationLevel
== 0;
2148 const LangOptions
&getLangOpts() const { return CGM
.getLangOpts(); }
2150 /// Returns a pointer to the function's exception object and selector slot,
2151 /// which is assigned in every landing pad.
2152 Address
getExceptionSlot();
2153 Address
getEHSelectorSlot();
2155 /// Returns the contents of the function's exception object and selector
2157 llvm::Value
*getExceptionFromSlot();
2158 llvm::Value
*getSelectorFromSlot();
2160 RawAddress
getNormalCleanupDestSlot();
2162 llvm::BasicBlock
*getUnreachableBlock() {
2163 if (!UnreachableBlock
) {
2164 UnreachableBlock
= createBasicBlock("unreachable");
2165 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock
);
2167 return UnreachableBlock
;
2170 llvm::BasicBlock
*getInvokeDest() {
2171 if (!EHStack
.requiresLandingPad()) return nullptr;
2172 return getInvokeDestImpl();
2175 bool currentFunctionUsesSEHTry() const { return !!CurSEHParent
; }
2177 const TargetInfo
&getTarget() const { return Target
; }
2178 llvm::LLVMContext
&getLLVMContext() { return CGM
.getLLVMContext(); }
2179 const TargetCodeGenInfo
&getTargetHooks() const {
2180 return CGM
.getTargetCodeGenInfo();
2183 //===--------------------------------------------------------------------===//
2185 //===--------------------------------------------------------------------===//
2187 typedef void Destroyer(CodeGenFunction
&CGF
, Address addr
, QualType ty
);
2189 void pushIrregularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2190 Address arrayEndPointer
,
2191 QualType elementType
,
2192 CharUnits elementAlignment
,
2193 Destroyer
*destroyer
);
2194 void pushRegularPartialArrayCleanup(llvm::Value
*arrayBegin
,
2195 llvm::Value
*arrayEnd
,
2196 QualType elementType
,
2197 CharUnits elementAlignment
,
2198 Destroyer
*destroyer
);
2200 void pushDestroy(QualType::DestructionKind dtorKind
,
2201 Address addr
, QualType type
);
2202 void pushEHDestroy(QualType::DestructionKind dtorKind
,
2203 Address addr
, QualType type
);
2204 void pushDestroy(CleanupKind kind
, Address addr
, QualType type
,
2205 Destroyer
*destroyer
, bool useEHCleanupForArray
);
2206 void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind
,
2207 Address addr
, QualType type
);
2208 void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind
, Address addr
,
2209 QualType type
, Destroyer
*destroyer
,
2210 bool useEHCleanupForArray
);
2211 void pushLifetimeExtendedDestroy(CleanupKind kind
, Address addr
,
2212 QualType type
, Destroyer
*destroyer
,
2213 bool useEHCleanupForArray
);
2214 void pushCallObjectDeleteCleanup(const FunctionDecl
*OperatorDelete
,
2215 llvm::Value
*CompletePtr
,
2216 QualType ElementType
);
2217 void pushStackRestore(CleanupKind kind
, Address SPMem
);
2218 void pushKmpcAllocFree(CleanupKind Kind
,
2219 std::pair
<llvm::Value
*, llvm::Value
*> AddrSizePair
);
2220 void emitDestroy(Address addr
, QualType type
, Destroyer
*destroyer
,
2221 bool useEHCleanupForArray
);
2222 llvm::Function
*generateDestroyHelper(Address addr
, QualType type
,
2223 Destroyer
*destroyer
,
2224 bool useEHCleanupForArray
,
2226 void emitArrayDestroy(llvm::Value
*begin
, llvm::Value
*end
,
2227 QualType elementType
, CharUnits elementAlign
,
2228 Destroyer
*destroyer
,
2229 bool checkZeroLength
, bool useEHCleanup
);
2231 Destroyer
*getDestroyer(QualType::DestructionKind destructionKind
);
2233 /// Determines whether an EH cleanup is required to destroy a type
2234 /// with the given destruction kind.
2235 bool needsEHCleanup(QualType::DestructionKind kind
) {
2237 case QualType::DK_none
:
2239 case QualType::DK_cxx_destructor
:
2240 case QualType::DK_objc_weak_lifetime
:
2241 case QualType::DK_nontrivial_c_struct
:
2242 return getLangOpts().Exceptions
;
2243 case QualType::DK_objc_strong_lifetime
:
2244 return getLangOpts().Exceptions
&&
2245 CGM
.getCodeGenOpts().ObjCAutoRefCountExceptions
;
2247 llvm_unreachable("bad destruction kind");
2250 CleanupKind
getCleanupKind(QualType::DestructionKind kind
) {
2251 return (needsEHCleanup(kind
) ? NormalAndEHCleanup
: NormalCleanup
);
2254 //===--------------------------------------------------------------------===//
2256 //===--------------------------------------------------------------------===//
2258 void GenerateObjCMethod(const ObjCMethodDecl
*OMD
);
2260 void StartObjCMethod(const ObjCMethodDecl
*MD
, const ObjCContainerDecl
*CD
);
2262 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
2263 void GenerateObjCGetter(ObjCImplementationDecl
*IMP
,
2264 const ObjCPropertyImplDecl
*PID
);
2265 void generateObjCGetterBody(const ObjCImplementationDecl
*classImpl
,
2266 const ObjCPropertyImplDecl
*propImpl
,
2267 const ObjCMethodDecl
*GetterMothodDecl
,
2268 llvm::Constant
*AtomicHelperFn
);
2270 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl
*IMP
,
2271 ObjCMethodDecl
*MD
, bool ctor
);
2273 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
2274 /// for the given property.
2275 void GenerateObjCSetter(ObjCImplementationDecl
*IMP
,
2276 const ObjCPropertyImplDecl
*PID
);
2277 void generateObjCSetterBody(const ObjCImplementationDecl
*classImpl
,
2278 const ObjCPropertyImplDecl
*propImpl
,
2279 llvm::Constant
*AtomicHelperFn
);
2281 //===--------------------------------------------------------------------===//
2283 //===--------------------------------------------------------------------===//
2285 /// Emit block literal.
2286 /// \return an LLVM value which is a pointer to a struct which contains
2287 /// information about the block, including the block invoke function, the
2288 /// captured variables, etc.
2289 llvm::Value
*EmitBlockLiteral(const BlockExpr
*);
2291 llvm::Function
*GenerateBlockFunction(GlobalDecl GD
,
2292 const CGBlockInfo
&Info
,
2293 const DeclMapTy
&ldm
,
2294 bool IsLambdaConversionToBlock
,
2295 bool BuildGlobalBlock
);
2297 /// Check if \p T is a C++ class that has a destructor that can throw.
2298 static bool cxxDestructorCanThrow(QualType T
);
2300 llvm::Constant
*GenerateCopyHelperFunction(const CGBlockInfo
&blockInfo
);
2301 llvm::Constant
*GenerateDestroyHelperFunction(const CGBlockInfo
&blockInfo
);
2302 llvm::Constant
*GenerateObjCAtomicSetterCopyHelperFunction(
2303 const ObjCPropertyImplDecl
*PID
);
2304 llvm::Constant
*GenerateObjCAtomicGetterCopyHelperFunction(
2305 const ObjCPropertyImplDecl
*PID
);
2306 llvm::Value
*EmitBlockCopyAndAutorelease(llvm::Value
*Block
, QualType Ty
);
2308 void BuildBlockRelease(llvm::Value
*DeclPtr
, BlockFieldFlags flags
,
2311 class AutoVarEmission
;
2313 void emitByrefStructureInit(const AutoVarEmission
&emission
);
2315 /// Enter a cleanup to destroy a __block variable. Note that this
2316 /// cleanup should be a no-op if the variable hasn't left the stack
2317 /// yet; if a cleanup is required for the variable itself, that needs
2318 /// to be done externally.
2320 /// \param Kind Cleanup kind.
2322 /// \param Addr When \p LoadBlockVarAddr is false, the address of the __block
2323 /// structure that will be passed to _Block_object_dispose. When
2324 /// \p LoadBlockVarAddr is true, the address of the field of the block
2325 /// structure that holds the address of the __block structure.
2327 /// \param Flags The flag that will be passed to _Block_object_dispose.
2329 /// \param LoadBlockVarAddr Indicates whether we need to emit a load from
2330 /// \p Addr to get the address of the __block structure.
2331 void enterByrefCleanup(CleanupKind Kind
, Address Addr
, BlockFieldFlags Flags
,
2332 bool LoadBlockVarAddr
, bool CanThrow
);
2334 void setBlockContextParameter(const ImplicitParamDecl
*D
, unsigned argNum
,
2337 Address
LoadBlockStruct();
2338 Address
GetAddrOfBlockDecl(const VarDecl
*var
);
2340 /// BuildBlockByrefAddress - Computes the location of the
2341 /// data in a variable which is declared as __block.
2342 Address
emitBlockByrefAddress(Address baseAddr
, const VarDecl
*V
,
2343 bool followForward
= true);
2344 Address
emitBlockByrefAddress(Address baseAddr
,
2345 const BlockByrefInfo
&info
,
2347 const llvm::Twine
&name
);
2349 const BlockByrefInfo
&getBlockByrefInfo(const VarDecl
*var
);
2351 QualType
BuildFunctionArgList(GlobalDecl GD
, FunctionArgList
&Args
);
2353 void GenerateCode(GlobalDecl GD
, llvm::Function
*Fn
,
2354 const CGFunctionInfo
&FnInfo
);
2356 /// Annotate the function with an attribute that disables TSan checking at
2358 void markAsIgnoreThreadCheckingAtRuntime(llvm::Function
*Fn
);
2360 /// Emit code for the start of a function.
2361 /// \param Loc The location to be associated with the function.
2362 /// \param StartLoc The location of the function body.
2363 void StartFunction(GlobalDecl GD
,
2366 const CGFunctionInfo
&FnInfo
,
2367 const FunctionArgList
&Args
,
2368 SourceLocation Loc
= SourceLocation(),
2369 SourceLocation StartLoc
= SourceLocation());
2371 static bool IsConstructorDelegationValid(const CXXConstructorDecl
*Ctor
);
2373 void EmitConstructorBody(FunctionArgList
&Args
);
2374 void EmitDestructorBody(FunctionArgList
&Args
);
2375 void emitImplicitAssignmentOperatorBody(FunctionArgList
&Args
);
2376 void EmitFunctionBody(const Stmt
*Body
);
2377 void EmitBlockWithFallThrough(llvm::BasicBlock
*BB
, const Stmt
*S
);
2379 void EmitForwardingCallToLambda(const CXXMethodDecl
*LambdaCallOperator
,
2380 CallArgList
&CallArgs
,
2381 const CGFunctionInfo
*CallOpFnInfo
= nullptr,
2382 llvm::Constant
*CallOpFn
= nullptr);
2383 void EmitLambdaBlockInvokeBody();
2384 void EmitLambdaStaticInvokeBody(const CXXMethodDecl
*MD
);
2385 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl
*MD
,
2386 CallArgList
&CallArgs
);
2387 void EmitLambdaInAllocaImplFn(const CXXMethodDecl
*CallOp
,
2388 const CGFunctionInfo
**ImplFnInfo
,
2389 llvm::Function
**ImplFn
);
2390 void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl
*MD
);
2391 void EmitLambdaVLACapture(const VariableArrayType
*VAT
, LValue LV
) {
2392 EmitStoreThroughLValue(RValue::get(VLASizeMap
[VAT
->getSizeExpr()]), LV
);
2394 void EmitAsanPrologueOrEpilogue(bool Prologue
);
2396 /// Emit the unified return block, trying to avoid its emission when
2398 /// \return The debug location of the user written return statement if the
2399 /// return block is avoided.
2400 llvm::DebugLoc
EmitReturnBlock();
2402 /// FinishFunction - Complete IR generation of the current function. It is
2403 /// legal to call this function even if there is no current insertion point.
2404 void FinishFunction(SourceLocation EndLoc
=SourceLocation());
2406 void StartThunk(llvm::Function
*Fn
, GlobalDecl GD
,
2407 const CGFunctionInfo
&FnInfo
, bool IsUnprototyped
);
2409 void EmitCallAndReturnForThunk(llvm::FunctionCallee Callee
,
2410 const ThunkInfo
*Thunk
, bool IsUnprototyped
);
2414 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
2415 void EmitMustTailThunk(GlobalDecl GD
, llvm::Value
*AdjustedThisPtr
,
2416 llvm::FunctionCallee Callee
);
2418 /// Generate a thunk for the given method.
2419 void generateThunk(llvm::Function
*Fn
, const CGFunctionInfo
&FnInfo
,
2420 GlobalDecl GD
, const ThunkInfo
&Thunk
,
2421 bool IsUnprototyped
);
2423 llvm::Function
*GenerateVarArgsThunk(llvm::Function
*Fn
,
2424 const CGFunctionInfo
&FnInfo
,
2425 GlobalDecl GD
, const ThunkInfo
&Thunk
);
2427 void EmitCtorPrologue(const CXXConstructorDecl
*CD
, CXXCtorType Type
,
2428 FunctionArgList
&Args
);
2430 void EmitInitializerForField(FieldDecl
*Field
, LValue LHS
, Expr
*Init
);
2432 /// Struct with all information about dynamic [sub]class needed to set vptr.
2435 const CXXRecordDecl
*NearestVBase
;
2436 CharUnits OffsetFromNearestVBase
;
2437 const CXXRecordDecl
*VTableClass
;
2440 /// Initialize the vtable pointer of the given subobject.
2441 void InitializeVTablePointer(const VPtr
&vptr
);
2443 typedef llvm::SmallVector
<VPtr
, 4> VPtrsVector
;
2445 typedef llvm::SmallPtrSet
<const CXXRecordDecl
*, 4> VisitedVirtualBasesSetTy
;
2446 VPtrsVector
getVTablePointers(const CXXRecordDecl
*VTableClass
);
2448 void getVTablePointers(BaseSubobject Base
, const CXXRecordDecl
*NearestVBase
,
2449 CharUnits OffsetFromNearestVBase
,
2450 bool BaseIsNonVirtualPrimaryBase
,
2451 const CXXRecordDecl
*VTableClass
,
2452 VisitedVirtualBasesSetTy
&VBases
, VPtrsVector
&vptrs
);
2454 void InitializeVTablePointers(const CXXRecordDecl
*ClassDecl
);
2456 // VTableTrapMode - whether we guarantee that loading the
2457 // vtable is guaranteed to trap on authentication failure,
2458 // even if the resulting vtable pointer is unused.
2459 enum class VTableAuthMode
{
2462 UnsafeUbsanStrip
// Should only be used for Vptr UBSan check
2464 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
2467 GetVTablePtr(Address This
, llvm::Type
*VTableTy
,
2468 const CXXRecordDecl
*VTableClass
,
2469 VTableAuthMode AuthMode
= VTableAuthMode::Authenticate
);
2471 enum CFITypeCheckKind
{
2475 CFITCK_UnrelatedCast
,
2481 /// Derived is the presumed address of an object of type T after a
2482 /// cast. If T is a polymorphic class type, emit a check that the virtual
2483 /// table for Derived belongs to a class derived from T.
2484 void EmitVTablePtrCheckForCast(QualType T
, Address Derived
, bool MayBeNull
,
2485 CFITypeCheckKind TCK
, SourceLocation Loc
);
2487 /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
2488 /// If vptr CFI is enabled, emit a check that VTable is valid.
2489 void EmitVTablePtrCheckForCall(const CXXRecordDecl
*RD
, llvm::Value
*VTable
,
2490 CFITypeCheckKind TCK
, SourceLocation Loc
);
2492 /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for
2493 /// RD using llvm.type.test.
2494 void EmitVTablePtrCheck(const CXXRecordDecl
*RD
, llvm::Value
*VTable
,
2495 CFITypeCheckKind TCK
, SourceLocation Loc
);
2497 /// If whole-program virtual table optimization is enabled, emit an assumption
2498 /// that VTable is a member of RD's type identifier. Or, if vptr CFI is
2499 /// enabled, emit a check that VTable is a member of RD's type identifier.
2500 void EmitTypeMetadataCodeForVCall(const CXXRecordDecl
*RD
,
2501 llvm::Value
*VTable
, SourceLocation Loc
);
2503 /// Returns whether we should perform a type checked load when loading a
2504 /// virtual function for virtual calls to members of RD. This is generally
2505 /// true when both vcall CFI and whole-program-vtables are enabled.
2506 bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl
*RD
);
2508 /// Emit a type checked load from the given vtable.
2509 llvm::Value
*EmitVTableTypeCheckedLoad(const CXXRecordDecl
*RD
,
2510 llvm::Value
*VTable
,
2511 llvm::Type
*VTableTy
,
2512 uint64_t VTableByteOffset
);
2514 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
2515 /// given phase of destruction for a destructor. The end result
2516 /// should call destructors on members and base classes in reverse
2517 /// order of their construction.
2518 void EnterDtorCleanups(const CXXDestructorDecl
*Dtor
, CXXDtorType Type
);
2520 /// ShouldInstrumentFunction - Return true if the current function should be
2521 /// instrumented with __cyg_profile_func_* calls
2522 bool ShouldInstrumentFunction();
2524 /// ShouldSkipSanitizerInstrumentation - Return true if the current function
2525 /// should not be instrumented with sanitizers.
2526 bool ShouldSkipSanitizerInstrumentation();
2528 /// ShouldXRayInstrument - Return true if the current function should be
2529 /// instrumented with XRay nop sleds.
2530 bool ShouldXRayInstrumentFunction() const;
2532 /// AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit
2533 /// XRay custom event handling calls.
2534 bool AlwaysEmitXRayCustomEvents() const;
2536 /// AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit
2537 /// XRay typed event handling calls.
2538 bool AlwaysEmitXRayTypedEvents() const;
2540 /// Return a type hash constant for a function instrumented by
2541 /// -fsanitize=function.
2542 llvm::ConstantInt
*getUBSanFunctionTypeHash(QualType T
) const;
2544 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
2545 /// arguments for the given function. This is also responsible for naming the
2546 /// LLVM function arguments.
2547 void EmitFunctionProlog(const CGFunctionInfo
&FI
,
2549 const FunctionArgList
&Args
);
2551 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
2552 /// given temporary.
2553 void EmitFunctionEpilog(const CGFunctionInfo
&FI
, bool EmitRetDbgLoc
,
2554 SourceLocation EndLoc
);
2556 /// Emit a test that checks if the return value \p RV is nonnull.
2557 void EmitReturnValueCheck(llvm::Value
*RV
);
2559 /// EmitStartEHSpec - Emit the start of the exception spec.
2560 void EmitStartEHSpec(const Decl
*D
);
2562 /// EmitEndEHSpec - Emit the end of the exception spec.
2563 void EmitEndEHSpec(const Decl
*D
);
2565 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
2566 llvm::BasicBlock
*getTerminateLandingPad();
2568 /// getTerminateLandingPad - Return a cleanup funclet that just calls
2570 llvm::BasicBlock
*getTerminateFunclet();
2572 /// getTerminateHandler - Return a handler (not a landing pad, just
2573 /// a catch handler) that just calls terminate. This is used when
2574 /// a terminate scope encloses a try.
2575 llvm::BasicBlock
*getTerminateHandler();
2577 llvm::Type
*ConvertTypeForMem(QualType T
);
2578 llvm::Type
*ConvertType(QualType T
);
2579 llvm::Type
*convertTypeForLoadStore(QualType ASTTy
,
2580 llvm::Type
*LLVMTy
= nullptr);
2581 llvm::Type
*ConvertType(const TypeDecl
*T
) {
2582 return ConvertType(getContext().getTypeDeclType(T
));
2585 /// LoadObjCSelf - Load the value of self. This function is only valid while
2586 /// generating code for an Objective-C method.
2587 llvm::Value
*LoadObjCSelf();
2589 /// TypeOfSelfObject - Return type of object that this self represents.
2590 QualType
TypeOfSelfObject();
2592 /// getEvaluationKind - Return the TypeEvaluationKind of QualType \c T.
2593 static TypeEvaluationKind
getEvaluationKind(QualType T
);
2595 static bool hasScalarEvaluationKind(QualType T
) {
2596 return getEvaluationKind(T
) == TEK_Scalar
;
2599 static bool hasAggregateEvaluationKind(QualType T
) {
2600 return getEvaluationKind(T
) == TEK_Aggregate
;
2603 /// createBasicBlock - Create an LLVM basic block.
2604 llvm::BasicBlock
*createBasicBlock(const Twine
&name
= "",
2605 llvm::Function
*parent
= nullptr,
2606 llvm::BasicBlock
*before
= nullptr) {
2607 return llvm::BasicBlock::Create(getLLVMContext(), name
, parent
, before
);
2610 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
2612 JumpDest
getJumpDestForLabel(const LabelDecl
*S
);
2614 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
2615 /// another basic block, simplify it. This assumes that no other code could
2616 /// potentially reference the basic block.
2617 void SimplifyForwardingBlocks(llvm::BasicBlock
*BB
);
2619 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
2620 /// adding a fall-through branch from the current insert block if
2621 /// necessary. It is legal to call this function even if there is no current
2622 /// insertion point.
2624 /// IsFinished - If true, indicates that the caller has finished emitting
2625 /// branches to the given block and does not expect to emit code into it. This
2626 /// means the block can be ignored if it is unreachable.
2627 void EmitBlock(llvm::BasicBlock
*BB
, bool IsFinished
=false);
2629 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
2630 /// near its uses, and leave the insertion point in it.
2631 void EmitBlockAfterUses(llvm::BasicBlock
*BB
);
2633 /// EmitBranch - Emit a branch to the specified basic block from the current
2634 /// insert block, taking care to avoid creation of branches from dummy
2635 /// blocks. It is legal to call this function even if there is no current
2636 /// insertion point.
2638 /// This function clears the current insertion point. The caller should follow
2639 /// calls to this function with calls to Emit*Block prior to generation new
2641 void EmitBranch(llvm::BasicBlock
*Block
);
2643 /// HaveInsertPoint - True if an insertion point is defined. If not, this
2644 /// indicates that the current code being emitted is unreachable.
2645 bool HaveInsertPoint() const {
2646 return Builder
.GetInsertBlock() != nullptr;
2649 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
2650 /// emitted IR has a place to go. Note that by definition, if this function
2651 /// creates a block then that block is unreachable; callers may do better to
2652 /// detect when no insertion point is defined and simply skip IR generation.
2653 void EnsureInsertPoint() {
2654 if (!HaveInsertPoint())
2655 EmitBlock(createBasicBlock());
2658 /// ErrorUnsupported - Print out an error that codegen doesn't support the
2659 /// specified stmt yet.
2660 void ErrorUnsupported(const Stmt
*S
, const char *Type
);
2662 //===--------------------------------------------------------------------===//
2664 //===--------------------------------------------------------------------===//
2666 Address
mergeAddressesInConditionalExpr(Address LHS
, Address RHS
,
2667 llvm::BasicBlock
*LHSBlock
,
2668 llvm::BasicBlock
*RHSBlock
,
2669 llvm::BasicBlock
*MergeBlock
,
2670 QualType MergedType
) {
2671 Builder
.SetInsertPoint(MergeBlock
);
2672 llvm::PHINode
*PtrPhi
= Builder
.CreatePHI(LHS
.getType(), 2, "cond");
2673 PtrPhi
->addIncoming(LHS
.getBasePointer(), LHSBlock
);
2674 PtrPhi
->addIncoming(RHS
.getBasePointer(), RHSBlock
);
2675 LHS
.replaceBasePointer(PtrPhi
);
2676 LHS
.setAlignment(std::min(LHS
.getAlignment(), RHS
.getAlignment()));
2680 /// Construct an address with the natural alignment of T. If a pointer to T
2681 /// is expected to be signed, the pointer passed to this function must have
2682 /// been signed, and the returned Address will have the pointer authentication
2683 /// information needed to authenticate the signed pointer.
2684 Address
makeNaturalAddressForPointer(
2685 llvm::Value
*Ptr
, QualType T
, CharUnits Alignment
= CharUnits::Zero(),
2686 bool ForPointeeType
= false, LValueBaseInfo
*BaseInfo
= nullptr,
2687 TBAAAccessInfo
*TBAAInfo
= nullptr,
2688 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
) {
2689 if (Alignment
.isZero())
2691 CGM
.getNaturalTypeAlignment(T
, BaseInfo
, TBAAInfo
, ForPointeeType
);
2692 return Address(Ptr
, ConvertTypeForMem(T
), Alignment
,
2693 CGM
.getPointerAuthInfoForPointeeType(T
), /*Offset=*/nullptr,
2697 LValue
MakeAddrLValue(Address Addr
, QualType T
,
2698 AlignmentSource Source
= AlignmentSource::Type
) {
2699 return MakeAddrLValue(Addr
, T
, LValueBaseInfo(Source
),
2700 CGM
.getTBAAAccessInfo(T
));
2703 LValue
MakeAddrLValue(Address Addr
, QualType T
, LValueBaseInfo BaseInfo
,
2704 TBAAAccessInfo TBAAInfo
) {
2705 return LValue::MakeAddr(Addr
, T
, getContext(), BaseInfo
, TBAAInfo
);
2708 LValue
MakeAddrLValue(llvm::Value
*V
, QualType T
, CharUnits Alignment
,
2709 AlignmentSource Source
= AlignmentSource::Type
) {
2710 return MakeAddrLValue(makeNaturalAddressForPointer(V
, T
, Alignment
), T
,
2711 LValueBaseInfo(Source
), CGM
.getTBAAAccessInfo(T
));
2714 /// Same as MakeAddrLValue above except that the pointer is known to be
2716 LValue
MakeRawAddrLValue(llvm::Value
*V
, QualType T
, CharUnits Alignment
,
2717 AlignmentSource Source
= AlignmentSource::Type
) {
2718 Address
Addr(V
, ConvertTypeForMem(T
), Alignment
);
2719 return LValue::MakeAddr(Addr
, T
, getContext(), LValueBaseInfo(Source
),
2720 CGM
.getTBAAAccessInfo(T
));
2724 MakeAddrLValueWithoutTBAA(Address Addr
, QualType T
,
2725 AlignmentSource Source
= AlignmentSource::Type
) {
2726 return LValue::MakeAddr(Addr
, T
, getContext(), LValueBaseInfo(Source
),
2730 /// Given a value of type T* that may not be to a complete object, construct
2731 /// an l-value with the natural pointee alignment of T.
2732 LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value
*V
, QualType T
);
2735 MakeNaturalAlignAddrLValue(llvm::Value
*V
, QualType T
,
2736 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
);
2738 /// Same as MakeNaturalAlignPointeeAddrLValue except that the pointer is known
2740 LValue
MakeNaturalAlignPointeeRawAddrLValue(llvm::Value
*V
, QualType T
);
2742 LValue
MakeNaturalAlignRawAddrLValue(llvm::Value
*V
, QualType T
);
2744 Address
EmitLoadOfReference(LValue RefLVal
,
2745 LValueBaseInfo
*PointeeBaseInfo
= nullptr,
2746 TBAAAccessInfo
*PointeeTBAAInfo
= nullptr);
2747 LValue
EmitLoadOfReferenceLValue(LValue RefLVal
);
2748 LValue
EmitLoadOfReferenceLValue(Address RefAddr
, QualType RefTy
,
2749 AlignmentSource Source
=
2750 AlignmentSource::Type
) {
2751 LValue RefLVal
= MakeAddrLValue(RefAddr
, RefTy
, LValueBaseInfo(Source
),
2752 CGM
.getTBAAAccessInfo(RefTy
));
2753 return EmitLoadOfReferenceLValue(RefLVal
);
2756 /// Load a pointer with type \p PtrTy stored at address \p Ptr.
2757 /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
2758 /// it is loaded from.
2759 Address
EmitLoadOfPointer(Address Ptr
, const PointerType
*PtrTy
,
2760 LValueBaseInfo
*BaseInfo
= nullptr,
2761 TBAAAccessInfo
*TBAAInfo
= nullptr);
2762 LValue
EmitLoadOfPointerLValue(Address Ptr
, const PointerType
*PtrTy
);
2765 struct AllocaTracker
{
2766 void Add(llvm::AllocaInst
*I
) { Allocas
.push_back(I
); }
2767 llvm::SmallVector
<llvm::AllocaInst
*> Take() { return std::move(Allocas
); }
2770 llvm::SmallVector
<llvm::AllocaInst
*> Allocas
;
2772 AllocaTracker
*Allocas
= nullptr;
2775 // Captures all the allocas created during the scope of its RAII object.
2776 struct AllocaTrackerRAII
{
2777 AllocaTrackerRAII(CodeGenFunction
&CGF
)
2778 : CGF(CGF
), OldTracker(CGF
.Allocas
) {
2779 CGF
.Allocas
= &Tracker
;
2781 ~AllocaTrackerRAII() { CGF
.Allocas
= OldTracker
; }
2783 llvm::SmallVector
<llvm::AllocaInst
*> Take() { return Tracker
.Take(); }
2786 CodeGenFunction
&CGF
;
2787 AllocaTracker
*OldTracker
;
2788 AllocaTracker Tracker
;
2791 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
2792 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
2793 /// insertion point of the builder. The caller is responsible for setting an
2794 /// appropriate alignment on
2797 /// \p ArraySize is the number of array elements to be allocated if it
2800 /// LangAS::Default is the address space of pointers to local variables and
2801 /// temporaries, as exposed in the source language. In certain
2802 /// configurations, this is not the same as the alloca address space, and a
2803 /// cast is needed to lift the pointer from the alloca AS into
2804 /// LangAS::Default. This can happen when the target uses a restricted
2805 /// address space for the stack but the source language requires
2806 /// LangAS::Default to be a generic address space. The latter condition is
2807 /// common for most programming languages; OpenCL is an exception in that
2808 /// LangAS::Default is the private address space, which naturally maps
2811 /// Because the address of a temporary is often exposed to the program in
2812 /// various ways, this function will perform the cast. The original alloca
2813 /// instruction is returned through \p Alloca if it is not nullptr.
2815 /// The cast is not performaed in CreateTempAllocaWithoutCast. This is
2816 /// more efficient if the caller knows that the address will not be exposed.
2817 llvm::AllocaInst
*CreateTempAlloca(llvm::Type
*Ty
, const Twine
&Name
= "tmp",
2818 llvm::Value
*ArraySize
= nullptr);
2819 RawAddress
CreateTempAlloca(llvm::Type
*Ty
, CharUnits align
,
2820 const Twine
&Name
= "tmp",
2821 llvm::Value
*ArraySize
= nullptr,
2822 RawAddress
*Alloca
= nullptr);
2823 RawAddress
CreateTempAllocaWithoutCast(llvm::Type
*Ty
, CharUnits align
,
2824 const Twine
&Name
= "tmp",
2825 llvm::Value
*ArraySize
= nullptr);
2827 /// CreateDefaultAlignedTempAlloca - This creates an alloca with the
2828 /// default ABI alignment of the given LLVM type.
2830 /// IMPORTANT NOTE: This is *not* generally the right alignment for
2831 /// any given AST type that happens to have been lowered to the
2832 /// given IR type. This should only ever be used for function-local,
2833 /// IR-driven manipulations like saving and restoring a value. Do
2834 /// not hand this address off to arbitrary IRGen routines, and especially
2835 /// do not pass it as an argument to a function that might expect a
2836 /// properly ABI-aligned value.
2837 RawAddress
CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
2838 const Twine
&Name
= "tmp");
2840 /// CreateIRTemp - Create a temporary IR object of the given type, with
2841 /// appropriate alignment. This routine should only be used when an temporary
2842 /// value needs to be stored into an alloca (for example, to avoid explicit
2843 /// PHI construction), but the type is the IR type, not the type appropriate
2844 /// for storing in memory.
2846 /// That is, this is exactly equivalent to CreateMemTemp, but calling
2847 /// ConvertType instead of ConvertTypeForMem.
2848 RawAddress
CreateIRTemp(QualType T
, const Twine
&Name
= "tmp");
2850 /// CreateMemTemp - Create a temporary memory object of the given type, with
2851 /// appropriate alignmen and cast it to the default address space. Returns
2852 /// the original alloca instruction by \p Alloca if it is not nullptr.
2853 RawAddress
CreateMemTemp(QualType T
, const Twine
&Name
= "tmp",
2854 RawAddress
*Alloca
= nullptr);
2855 RawAddress
CreateMemTemp(QualType T
, CharUnits Align
,
2856 const Twine
&Name
= "tmp",
2857 RawAddress
*Alloca
= nullptr);
2859 /// CreateMemTemp - Create a temporary memory object of the given type, with
2860 /// appropriate alignmen without casting it to the default address space.
2861 RawAddress
CreateMemTempWithoutCast(QualType T
, const Twine
&Name
= "tmp");
2862 RawAddress
CreateMemTempWithoutCast(QualType T
, CharUnits Align
,
2863 const Twine
&Name
= "tmp");
2865 /// CreateAggTemp - Create a temporary memory object for the given
2867 AggValueSlot
CreateAggTemp(QualType T
, const Twine
&Name
= "tmp",
2868 RawAddress
*Alloca
= nullptr) {
2869 return AggValueSlot::forAddr(
2870 CreateMemTemp(T
, Name
, Alloca
), T
.getQualifiers(),
2871 AggValueSlot::IsNotDestructed
, AggValueSlot::DoesNotNeedGCBarriers
,
2872 AggValueSlot::IsNotAliased
, AggValueSlot::DoesNotOverlap
);
2875 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
2876 /// expression and compare the result against zero, returning an Int1Ty value.
2877 llvm::Value
*EvaluateExprAsBool(const Expr
*E
);
2879 /// Retrieve the implicit cast expression of the rhs in a binary operator
2880 /// expression by passing pointers to Value and QualType
2881 /// This is used for implicit bitfield conversion checks, which
2882 /// must compare with the value before potential truncation.
2883 llvm::Value
*EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator
*E
,
2884 llvm::Value
**Previous
,
2887 /// Emit a check that an [implicit] conversion of a bitfield. It is not UB,
2888 /// so we use the value after conversion.
2889 void EmitBitfieldConversionCheck(llvm::Value
*Src
, QualType SrcType
,
2890 llvm::Value
*Dst
, QualType DstType
,
2891 const CGBitFieldInfo
&Info
,
2892 SourceLocation Loc
);
2894 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
2895 void EmitIgnoredExpr(const Expr
*E
);
2897 /// EmitAnyExpr - Emit code to compute the specified expression which can have
2898 /// any type. The result is returned as an RValue struct. If this is an
2899 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
2900 /// the result should be returned.
2902 /// \param ignoreResult True if the resulting value isn't used.
2903 RValue
EmitAnyExpr(const Expr
*E
,
2904 AggValueSlot aggSlot
= AggValueSlot::ignored(),
2905 bool ignoreResult
= false);
2907 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
2908 // or the value of the expression, depending on how va_list is defined.
2909 Address
EmitVAListRef(const Expr
*E
);
2911 /// Emit a "reference" to a __builtin_ms_va_list; this is
2912 /// always the value of the expression, because a __builtin_ms_va_list is a
2913 /// pointer to a char.
2914 Address
EmitMSVAListRef(const Expr
*E
);
2916 /// EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will
2917 /// always be accessible even if no aggregate location is provided.
2918 RValue
EmitAnyExprToTemp(const Expr
*E
);
2920 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
2921 /// arbitrary expression into the given memory location.
2922 void EmitAnyExprToMem(const Expr
*E
, Address Location
,
2923 Qualifiers Quals
, bool IsInitializer
);
2925 void EmitAnyExprToExn(const Expr
*E
, Address Addr
);
2927 /// EmitExprAsInit - Emits the code necessary to initialize a
2928 /// location in memory with the given initializer.
2929 void EmitExprAsInit(const Expr
*init
, const ValueDecl
*D
, LValue lvalue
,
2930 bool capturedByInit
);
2932 /// hasVolatileMember - returns true if aggregate type has a volatile
2934 bool hasVolatileMember(QualType T
) {
2935 if (const RecordType
*RT
= T
->getAs
<RecordType
>()) {
2936 const RecordDecl
*RD
= cast
<RecordDecl
>(RT
->getDecl());
2937 return RD
->hasVolatileMember();
2942 /// Determine whether a return value slot may overlap some other object.
2943 AggValueSlot::Overlap_t
getOverlapForReturnValue() {
2944 // FIXME: Assuming no overlap here breaks guaranteed copy elision for base
2945 // class subobjects. These cases may need to be revisited depending on the
2946 // resolution of the relevant core issue.
2947 return AggValueSlot::DoesNotOverlap
;
2950 /// Determine whether a field initialization may overlap some other object.
2951 AggValueSlot::Overlap_t
getOverlapForFieldInit(const FieldDecl
*FD
);
2953 /// Determine whether a base class initialization may overlap some other
2955 AggValueSlot::Overlap_t
getOverlapForBaseInit(const CXXRecordDecl
*RD
,
2956 const CXXRecordDecl
*BaseRD
,
2959 /// Emit an aggregate assignment.
2960 void EmitAggregateAssign(LValue Dest
, LValue Src
, QualType EltTy
) {
2961 bool IsVolatile
= hasVolatileMember(EltTy
);
2962 EmitAggregateCopy(Dest
, Src
, EltTy
, AggValueSlot::MayOverlap
, IsVolatile
);
2965 void EmitAggregateCopyCtor(LValue Dest
, LValue Src
,
2966 AggValueSlot::Overlap_t MayOverlap
) {
2967 EmitAggregateCopy(Dest
, Src
, Src
.getType(), MayOverlap
);
2970 /// EmitAggregateCopy - Emit an aggregate copy.
2972 /// \param isVolatile \c true iff either the source or the destination is
2974 /// \param MayOverlap Whether the tail padding of the destination might be
2975 /// occupied by some other object. More efficient code can often be
2976 /// generated if not.
2977 void EmitAggregateCopy(LValue Dest
, LValue Src
, QualType EltTy
,
2978 AggValueSlot::Overlap_t MayOverlap
,
2979 bool isVolatile
= false);
2981 /// GetAddrOfLocalVar - Return the address of a local variable.
2982 Address
GetAddrOfLocalVar(const VarDecl
*VD
) {
2983 auto it
= LocalDeclMap
.find(VD
);
2984 assert(it
!= LocalDeclMap
.end() &&
2985 "Invalid argument to GetAddrOfLocalVar(), no decl!");
2989 /// Given an opaque value expression, return its LValue mapping if it exists,
2990 /// otherwise create one.
2991 LValue
getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
);
2993 /// Given an opaque value expression, return its RValue mapping if it exists,
2994 /// otherwise create one.
2995 RValue
getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
);
2997 /// Get the index of the current ArrayInitLoopExpr, if any.
2998 llvm::Value
*getArrayInitIndex() { return ArrayInitIndex
; }
3000 /// getAccessedFieldNo - Given an encoded value and a result number, return
3001 /// the input field number being accessed.
3002 static unsigned getAccessedFieldNo(unsigned Idx
, const llvm::Constant
*Elts
);
3004 llvm::BlockAddress
*GetAddrOfLabel(const LabelDecl
*L
);
3005 llvm::BasicBlock
*GetIndirectGotoBlock();
3007 /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts.
3008 static bool IsWrappedCXXThis(const Expr
*E
);
3010 /// EmitNullInitialization - Generate code to set a value of the given type to
3011 /// null, If the type contains data member pointers, they will be initialized
3012 /// to -1 in accordance with the Itanium C++ ABI.
3013 void EmitNullInitialization(Address DestPtr
, QualType Ty
);
3015 /// Emits a call to an LLVM variable-argument intrinsic, either
3016 /// \c llvm.va_start or \c llvm.va_end.
3017 /// \param ArgValue A reference to the \c va_list as emitted by either
3018 /// \c EmitVAListRef or \c EmitMSVAListRef.
3019 /// \param IsStart If \c true, emits a call to \c llvm.va_start; otherwise,
3020 /// calls \c llvm.va_end.
3021 llvm::Value
*EmitVAStartEnd(llvm::Value
*ArgValue
, bool IsStart
);
3023 /// Generate code to get an argument from the passed in pointer
3024 /// and update it accordingly.
3025 /// \param VE The \c VAArgExpr for which to generate code.
3026 /// \param VAListAddr Receives a reference to the \c va_list as emitted by
3027 /// either \c EmitVAListRef or \c EmitMSVAListRef.
3028 /// \returns A pointer to the argument.
3029 // FIXME: We should be able to get rid of this method and use the va_arg
3030 // instruction in LLVM instead once it works well enough.
3031 RValue
EmitVAArg(VAArgExpr
*VE
, Address
&VAListAddr
,
3032 AggValueSlot Slot
= AggValueSlot::ignored());
3034 /// emitArrayLength - Compute the length of an array, even if it's a
3035 /// VLA, and drill down to the base element type.
3036 llvm::Value
*emitArrayLength(const ArrayType
*arrayType
,
3040 /// EmitVLASize - Capture all the sizes for the VLA expressions in
3041 /// the given variably-modified type and store them in the VLASizeMap.
3043 /// This function can be called with a null (unreachable) insert point.
3044 void EmitVariablyModifiedType(QualType Ty
);
3046 struct VlaSizePair
{
3047 llvm::Value
*NumElts
;
3050 VlaSizePair(llvm::Value
*NE
, QualType T
) : NumElts(NE
), Type(T
) {}
3053 /// Return the number of elements for a single dimension
3054 /// for the given array type.
3055 VlaSizePair
getVLAElements1D(const VariableArrayType
*vla
);
3056 VlaSizePair
getVLAElements1D(QualType vla
);
3058 /// Returns an LLVM value that corresponds to the size,
3059 /// in non-variably-sized elements, of a variable length array type,
3060 /// plus that largest non-variably-sized element type. Assumes that
3061 /// the type has already been emitted with EmitVariablyModifiedType.
3062 VlaSizePair
getVLASize(const VariableArrayType
*vla
);
3063 VlaSizePair
getVLASize(QualType vla
);
3065 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
3066 /// generating code for an C++ member function.
3067 llvm::Value
*LoadCXXThis() {
3068 assert(CXXThisValue
&& "no 'this' value for this function");
3069 return CXXThisValue
;
3071 Address
LoadCXXThisAddress();
3073 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
3075 // FIXME: Every place that calls LoadCXXVTT is something
3076 // that needs to be abstracted properly.
3077 llvm::Value
*LoadCXXVTT() {
3078 assert(CXXStructorImplicitParamValue
&& "no VTT value for this function");
3079 return CXXStructorImplicitParamValue
;
3082 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
3083 /// complete class to the given direct base.
3085 GetAddressOfDirectBaseInCompleteClass(Address Value
,
3086 const CXXRecordDecl
*Derived
,
3087 const CXXRecordDecl
*Base
,
3088 bool BaseIsVirtual
);
3090 static bool ShouldNullCheckClassCastValue(const CastExpr
*Cast
);
3092 /// GetAddressOfBaseClass - This function will add the necessary delta to the
3093 /// load of 'this' and returns address of the base class.
3094 Address
GetAddressOfBaseClass(Address Value
,
3095 const CXXRecordDecl
*Derived
,
3096 CastExpr::path_const_iterator PathBegin
,
3097 CastExpr::path_const_iterator PathEnd
,
3098 bool NullCheckValue
, SourceLocation Loc
);
3100 Address
GetAddressOfDerivedClass(Address Value
,
3101 const CXXRecordDecl
*Derived
,
3102 CastExpr::path_const_iterator PathBegin
,
3103 CastExpr::path_const_iterator PathEnd
,
3104 bool NullCheckValue
);
3106 /// GetVTTParameter - Return the VTT parameter that should be passed to a
3107 /// base constructor/destructor with virtual bases.
3108 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
3109 /// to ItaniumCXXABI.cpp together with all the references to VTT.
3110 llvm::Value
*GetVTTParameter(GlobalDecl GD
, bool ForVirtualBase
,
3113 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
3114 CXXCtorType CtorType
,
3115 const FunctionArgList
&Args
,
3116 SourceLocation Loc
);
3117 // It's important not to confuse this and the previous function. Delegating
3118 // constructors are the C++0x feature. The constructor delegate optimization
3119 // is used to reduce duplication in the base and complete consturctors where
3120 // they are substantially the same.
3121 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
3122 const FunctionArgList
&Args
);
3124 /// Emit a call to an inheriting constructor (that is, one that invokes a
3125 /// constructor inherited from a base class) by inlining its definition. This
3126 /// is necessary if the ABI does not support forwarding the arguments to the
3127 /// base class constructor (because they're variadic or similar).
3128 void EmitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl
*Ctor
,
3129 CXXCtorType CtorType
,
3130 bool ForVirtualBase
,
3134 /// Emit a call to a constructor inherited from a base class, passing the
3135 /// current constructor's arguments along unmodified (without even making
3137 void EmitInheritedCXXConstructorCall(const CXXConstructorDecl
*D
,
3138 bool ForVirtualBase
, Address This
,
3139 bool InheritedFromVBase
,
3140 const CXXInheritedCtorInitExpr
*E
);
3142 void EmitCXXConstructorCall(const CXXConstructorDecl
*D
, CXXCtorType Type
,
3143 bool ForVirtualBase
, bool Delegating
,
3144 AggValueSlot ThisAVS
, const CXXConstructExpr
*E
);
3146 void EmitCXXConstructorCall(const CXXConstructorDecl
*D
, CXXCtorType Type
,
3147 bool ForVirtualBase
, bool Delegating
,
3148 Address This
, CallArgList
&Args
,
3149 AggValueSlot::Overlap_t Overlap
,
3150 SourceLocation Loc
, bool NewPointerIsChecked
);
3152 /// Emit assumption load for all bases. Requires to be called only on
3153 /// most-derived class and not under construction of the object.
3154 void EmitVTableAssumptionLoads(const CXXRecordDecl
*ClassDecl
, Address This
);
3156 /// Emit assumption that vptr load == global vtable.
3157 void EmitVTableAssumptionLoad(const VPtr
&vptr
, Address This
);
3159 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl
*D
,
3160 Address This
, Address Src
,
3161 const CXXConstructExpr
*E
);
3163 void EmitCXXAggrConstructorCall(const CXXConstructorDecl
*D
,
3164 const ArrayType
*ArrayTy
,
3166 const CXXConstructExpr
*E
,
3167 bool NewPointerIsChecked
,
3168 bool ZeroInitialization
= false);
3170 void EmitCXXAggrConstructorCall(const CXXConstructorDecl
*D
,
3171 llvm::Value
*NumElements
,
3173 const CXXConstructExpr
*E
,
3174 bool NewPointerIsChecked
,
3175 bool ZeroInitialization
= false);
3177 static Destroyer destroyCXXObject
;
3179 void EmitCXXDestructorCall(const CXXDestructorDecl
*D
, CXXDtorType Type
,
3180 bool ForVirtualBase
, bool Delegating
, Address This
,
3183 void EmitNewArrayInitializer(const CXXNewExpr
*E
, QualType elementType
,
3184 llvm::Type
*ElementTy
, Address NewPtr
,
3185 llvm::Value
*NumElements
,
3186 llvm::Value
*AllocSizeWithoutCookie
);
3188 void EmitCXXTemporary(const CXXTemporary
*Temporary
, QualType TempType
,
3191 void EmitSehCppScopeBegin();
3192 void EmitSehCppScopeEnd();
3193 void EmitSehTryScopeBegin();
3194 void EmitSehTryScopeEnd();
3196 llvm::Value
*EmitLifetimeStart(llvm::TypeSize Size
, llvm::Value
*Addr
);
3197 void EmitLifetimeEnd(llvm::Value
*Size
, llvm::Value
*Addr
);
3199 llvm::Value
*EmitCXXNewExpr(const CXXNewExpr
*E
);
3200 void EmitCXXDeleteExpr(const CXXDeleteExpr
*E
);
3202 void EmitDeleteCall(const FunctionDecl
*DeleteFD
, llvm::Value
*Ptr
,
3203 QualType DeleteTy
, llvm::Value
*NumElements
= nullptr,
3204 CharUnits CookieSize
= CharUnits());
3206 RValue
EmitBuiltinNewDeleteCall(const FunctionProtoType
*Type
,
3207 const CallExpr
*TheCallExpr
, bool IsDelete
);
3209 llvm::Value
*EmitCXXTypeidExpr(const CXXTypeidExpr
*E
);
3210 llvm::Value
*EmitDynamicCast(Address V
, const CXXDynamicCastExpr
*DCE
);
3211 Address
EmitCXXUuidofExpr(const CXXUuidofExpr
*E
);
3213 /// Situations in which we might emit a check for the suitability of a
3214 /// pointer or glvalue. Needs to be kept in sync with ubsan_handlers.cpp in
3216 enum TypeCheckKind
{
3217 /// Checking the operand of a load. Must be suitably sized and aligned.
3219 /// Checking the destination of a store. Must be suitably sized and aligned.
3221 /// Checking the bound value in a reference binding. Must be suitably sized
3222 /// and aligned, but is not required to refer to an object (until the
3223 /// reference is used), per core issue 453.
3224 TCK_ReferenceBinding
,
3225 /// Checking the object expression in a non-static data member access. Must
3226 /// be an object within its lifetime.
3228 /// Checking the 'this' pointer for a call to a non-static member function.
3229 /// Must be an object within its lifetime.
3231 /// Checking the 'this' pointer for a constructor call.
3232 TCK_ConstructorCall
,
3233 /// Checking the operand of a static_cast to a derived pointer type. Must be
3234 /// null or an object within its lifetime.
3235 TCK_DowncastPointer
,
3236 /// Checking the operand of a static_cast to a derived reference type. Must
3237 /// be an object within its lifetime.
3238 TCK_DowncastReference
,
3239 /// Checking the operand of a cast to a base object. Must be suitably sized
3242 /// Checking the operand of a cast to a virtual base object. Must be an
3243 /// object within its lifetime.
3244 TCK_UpcastToVirtualBase
,
3245 /// Checking the value assigned to a _Nonnull pointer. Must not be null.
3247 /// Checking the operand of a dynamic_cast or a typeid expression. Must be
3248 /// null or an object within its lifetime.
3249 TCK_DynamicOperation
3252 /// Determine whether the pointer type check \p TCK permits null pointers.
3253 static bool isNullPointerAllowed(TypeCheckKind TCK
);
3255 /// Determine whether the pointer type check \p TCK requires a vptr check.
3256 static bool isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
);
3258 /// Whether any type-checking sanitizers are enabled. If \c false,
3259 /// calls to EmitTypeCheck can be skipped.
3260 bool sanitizePerformTypeCheck() const;
3262 void EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
, LValue LV
,
3263 QualType Type
, SanitizerSet SkippedChecks
= SanitizerSet(),
3264 llvm::Value
*ArraySize
= nullptr) {
3265 if (!sanitizePerformTypeCheck())
3267 EmitTypeCheck(TCK
, Loc
, LV
.emitRawPointer(*this), Type
, LV
.getAlignment(),
3268 SkippedChecks
, ArraySize
);
3271 void EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
, Address Addr
,
3272 QualType Type
, CharUnits Alignment
= CharUnits::Zero(),
3273 SanitizerSet SkippedChecks
= SanitizerSet(),
3274 llvm::Value
*ArraySize
= nullptr) {
3275 if (!sanitizePerformTypeCheck())
3277 EmitTypeCheck(TCK
, Loc
, Addr
.emitRawPointer(*this), Type
, Alignment
,
3278 SkippedChecks
, ArraySize
);
3281 /// Emit a check that \p V is the address of storage of the
3282 /// appropriate size and alignment for an object of type \p Type
3283 /// (or if ArraySize is provided, for an array of that bound).
3284 void EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
, llvm::Value
*V
,
3285 QualType Type
, CharUnits Alignment
= CharUnits::Zero(),
3286 SanitizerSet SkippedChecks
= SanitizerSet(),
3287 llvm::Value
*ArraySize
= nullptr);
3289 /// Emit a check that \p Base points into an array object, which
3290 /// we can access at index \p Index. \p Accessed should be \c false if we
3291 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
3292 void EmitBoundsCheck(const Expr
*E
, const Expr
*Base
, llvm::Value
*Index
,
3293 QualType IndexType
, bool Accessed
);
3294 void EmitBoundsCheckImpl(const Expr
*E
, llvm::Value
*Bound
,
3295 llvm::Value
*Index
, QualType IndexType
,
3296 QualType IndexedType
, bool Accessed
);
3298 // Find a struct's flexible array member and get its offset. It may be
3299 // embedded inside multiple sub-structs, but must still be the last field.
3301 FindFlexibleArrayMemberFieldAndOffset(ASTContext
&Ctx
, const RecordDecl
*RD
,
3302 const FieldDecl
*FAMDecl
,
3305 /// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
3306 /// \p nullptr if either the attribute or the field doesn't exist.
3307 const FieldDecl
*FindCountedByField(const FieldDecl
*FD
);
3309 /// Build an expression accessing the "counted_by" field.
3310 llvm::Value
*EmitCountedByFieldExpr(const Expr
*Base
,
3311 const FieldDecl
*FAMDecl
,
3312 const FieldDecl
*CountDecl
);
3314 llvm::Value
*EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
3315 bool isInc
, bool isPre
);
3316 ComplexPairTy
EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
3317 bool isInc
, bool isPre
);
3319 /// Converts Location to a DebugLoc, if debug information is enabled.
3320 llvm::DebugLoc
SourceLocToDebugLoc(SourceLocation Location
);
3322 /// Get the record field index as represented in debug info.
3323 unsigned getDebugInfoFIndex(const RecordDecl
*Rec
, unsigned FieldIndex
);
3326 //===--------------------------------------------------------------------===//
3327 // Declaration Emission
3328 //===--------------------------------------------------------------------===//
3330 /// EmitDecl - Emit a declaration.
3332 /// This function can be called with a null (unreachable) insert point.
3333 void EmitDecl(const Decl
&D
);
3335 /// EmitVarDecl - Emit a local variable declaration.
3337 /// This function can be called with a null (unreachable) insert point.
3338 void EmitVarDecl(const VarDecl
&D
);
3340 void EmitScalarInit(const Expr
*init
, const ValueDecl
*D
, LValue lvalue
,
3341 bool capturedByInit
);
3343 typedef void SpecialInitFn(CodeGenFunction
&Init
, const VarDecl
&D
,
3344 llvm::Value
*Address
);
3346 /// Determine whether the given initializer is trivial in the sense
3347 /// that it requires no code to be generated.
3348 bool isTrivialInitializer(const Expr
*Init
);
3350 /// EmitAutoVarDecl - Emit an auto variable declaration.
3352 /// This function can be called with a null (unreachable) insert point.
3353 void EmitAutoVarDecl(const VarDecl
&D
);
3355 class AutoVarEmission
{
3356 friend class CodeGenFunction
;
3358 const VarDecl
*Variable
;
3360 /// The address of the alloca for languages with explicit address space
3361 /// (e.g. OpenCL) or alloca casted to generic pointer for address space
3362 /// agnostic languages (e.g. C++). Invalid if the variable was emitted
3363 /// as a global constant.
3366 llvm::Value
*NRVOFlag
;
3368 /// True if the variable is a __block variable that is captured by an
3370 bool IsEscapingByRef
;
3372 /// True if the variable is of aggregate type and has a constant
3374 bool IsConstantAggregate
;
3376 /// Non-null if we should use lifetime annotations.
3377 llvm::Value
*SizeForLifetimeMarkers
;
3379 /// Address with original alloca instruction. Invalid if the variable was
3380 /// emitted as a global constant.
3381 RawAddress AllocaAddr
;
3384 AutoVarEmission(Invalid
)
3385 : Variable(nullptr), Addr(Address::invalid()),
3386 AllocaAddr(RawAddress::invalid()) {}
3388 AutoVarEmission(const VarDecl
&variable
)
3389 : Variable(&variable
), Addr(Address::invalid()), NRVOFlag(nullptr),
3390 IsEscapingByRef(false), IsConstantAggregate(false),
3391 SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
3393 bool wasEmittedAsGlobal() const { return !Addr
.isValid(); }
3396 static AutoVarEmission
invalid() { return AutoVarEmission(Invalid()); }
3398 bool useLifetimeMarkers() const {
3399 return SizeForLifetimeMarkers
!= nullptr;
3401 llvm::Value
*getSizeForLifetimeMarkers() const {
3402 assert(useLifetimeMarkers());
3403 return SizeForLifetimeMarkers
;
3406 /// Returns the raw, allocated address, which is not necessarily
3407 /// the address of the object itself. It is casted to default
3408 /// address space for address space agnostic languages.
3409 Address
getAllocatedAddress() const {
3413 /// Returns the address for the original alloca instruction.
3414 RawAddress
getOriginalAllocatedAddress() const { return AllocaAddr
; }
3416 /// Returns the address of the object within this declaration.
3417 /// Note that this does not chase the forwarding pointer for
3419 Address
getObjectAddress(CodeGenFunction
&CGF
) const {
3420 if (!IsEscapingByRef
) return Addr
;
3422 return CGF
.emitBlockByrefAddress(Addr
, Variable
, /*forward*/ false);
3425 AutoVarEmission
EmitAutoVarAlloca(const VarDecl
&var
);
3426 void EmitAutoVarInit(const AutoVarEmission
&emission
);
3427 void EmitAutoVarCleanups(const AutoVarEmission
&emission
);
3428 void emitAutoVarTypeCleanup(const AutoVarEmission
&emission
,
3429 QualType::DestructionKind dtorKind
);
3431 /// Emits the alloca and debug information for the size expressions for each
3432 /// dimension of an array. It registers the association of its (1-dimensional)
3433 /// QualTypes and size expression's debug node, so that CGDebugInfo can
3434 /// reference this node when creating the DISubrange object to describe the
3436 void EmitAndRegisterVariableArrayDimensions(CGDebugInfo
*DI
,
3438 bool EmitDebugInfo
);
3440 void EmitStaticVarDecl(const VarDecl
&D
,
3441 llvm::GlobalValue::LinkageTypes Linkage
);
3451 ParamValue(llvm::Value
*V
) : Value(V
), IsIndirect(false) {}
3452 ParamValue(Address A
) : Addr(A
), IsIndirect(true) {}
3455 static ParamValue
forDirect(llvm::Value
*value
) {
3456 return ParamValue(value
);
3458 static ParamValue
forIndirect(Address addr
) {
3459 assert(!addr
.getAlignment().isZero());
3460 return ParamValue(addr
);
3463 bool isIndirect() const { return IsIndirect
; }
3464 llvm::Value
*getAnyValue() const {
3467 assert(!Addr
.hasOffset() && "unexpected offset");
3468 return Addr
.getBasePointer();
3471 llvm::Value
*getDirectValue() const {
3472 assert(!isIndirect());
3476 Address
getIndirectAddress() const {
3477 assert(isIndirect());
3482 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
3483 void EmitParmDecl(const VarDecl
&D
, ParamValue Arg
, unsigned ArgNo
);
3485 /// protectFromPeepholes - Protect a value that we're intending to
3486 /// store to the side, but which will probably be used later, from
3487 /// aggressive peepholing optimizations that might delete it.
3489 /// Pass the result to unprotectFromPeepholes to declare that
3490 /// protection is no longer required.
3492 /// There's no particular reason why this shouldn't apply to
3493 /// l-values, it's just that no existing peepholes work on pointers.
3494 PeepholeProtection
protectFromPeepholes(RValue rvalue
);
3495 void unprotectFromPeepholes(PeepholeProtection protection
);
3497 void emitAlignmentAssumptionCheck(llvm::Value
*Ptr
, QualType Ty
,
3499 SourceLocation AssumptionLoc
,
3500 llvm::Value
*Alignment
,
3501 llvm::Value
*OffsetValue
,
3502 llvm::Value
*TheCheck
,
3503 llvm::Instruction
*Assumption
);
3505 void emitAlignmentAssumption(llvm::Value
*PtrValue
, QualType Ty
,
3506 SourceLocation Loc
, SourceLocation AssumptionLoc
,
3507 llvm::Value
*Alignment
,
3508 llvm::Value
*OffsetValue
= nullptr);
3510 void emitAlignmentAssumption(llvm::Value
*PtrValue
, const Expr
*E
,
3511 SourceLocation AssumptionLoc
,
3512 llvm::Value
*Alignment
,
3513 llvm::Value
*OffsetValue
= nullptr);
3515 //===--------------------------------------------------------------------===//
3516 // Statement Emission
3517 //===--------------------------------------------------------------------===//
3519 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
3520 void EmitStopPoint(const Stmt
*S
);
3522 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
3523 /// this function even if there is no current insertion point.
3525 /// This function may clear the current insertion point; callers should use
3526 /// EnsureInsertPoint if they wish to subsequently generate code without first
3527 /// calling EmitBlock, EmitBranch, or EmitStmt.
3528 void EmitStmt(const Stmt
*S
, ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3530 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
3531 /// necessarily require an insertion point or debug information; typically
3532 /// because the statement amounts to a jump or a container of other
3535 /// \return True if the statement was handled.
3536 bool EmitSimpleStmt(const Stmt
*S
, ArrayRef
<const Attr
*> Attrs
);
3538 Address
EmitCompoundStmt(const CompoundStmt
&S
, bool GetLast
= false,
3539 AggValueSlot AVS
= AggValueSlot::ignored());
3540 Address
EmitCompoundStmtWithoutScope(const CompoundStmt
&S
,
3541 bool GetLast
= false,
3543 AggValueSlot::ignored());
3545 /// EmitLabel - Emit the block for the given label. It is legal to call this
3546 /// function even if there is no current insertion point.
3547 void EmitLabel(const LabelDecl
*D
); // helper for EmitLabelStmt.
3549 void EmitLabelStmt(const LabelStmt
&S
);
3550 void EmitAttributedStmt(const AttributedStmt
&S
);
3551 void EmitGotoStmt(const GotoStmt
&S
);
3552 void EmitIndirectGotoStmt(const IndirectGotoStmt
&S
);
3553 void EmitIfStmt(const IfStmt
&S
);
3555 void EmitWhileStmt(const WhileStmt
&S
,
3556 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3557 void EmitDoStmt(const DoStmt
&S
, ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3558 void EmitForStmt(const ForStmt
&S
,
3559 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3560 void EmitReturnStmt(const ReturnStmt
&S
);
3561 void EmitDeclStmt(const DeclStmt
&S
);
3562 void EmitBreakStmt(const BreakStmt
&S
);
3563 void EmitContinueStmt(const ContinueStmt
&S
);
3564 void EmitSwitchStmt(const SwitchStmt
&S
);
3565 void EmitDefaultStmt(const DefaultStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3566 void EmitCaseStmt(const CaseStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3567 void EmitCaseStmtRange(const CaseStmt
&S
, ArrayRef
<const Attr
*> Attrs
);
3568 void EmitAsmStmt(const AsmStmt
&S
);
3570 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt
&S
);
3571 void EmitObjCAtTryStmt(const ObjCAtTryStmt
&S
);
3572 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt
&S
);
3573 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt
&S
);
3574 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt
&S
);
3576 void EmitCoroutineBody(const CoroutineBodyStmt
&S
);
3577 void EmitCoreturnStmt(const CoreturnStmt
&S
);
3578 RValue
EmitCoawaitExpr(const CoawaitExpr
&E
,
3579 AggValueSlot aggSlot
= AggValueSlot::ignored(),
3580 bool ignoreResult
= false);
3581 LValue
EmitCoawaitLValue(const CoawaitExpr
*E
);
3582 RValue
EmitCoyieldExpr(const CoyieldExpr
&E
,
3583 AggValueSlot aggSlot
= AggValueSlot::ignored(),
3584 bool ignoreResult
= false);
3585 LValue
EmitCoyieldLValue(const CoyieldExpr
*E
);
3586 RValue
EmitCoroutineIntrinsic(const CallExpr
*E
, unsigned int IID
);
3588 void EnterCXXTryStmt(const CXXTryStmt
&S
, bool IsFnTryBlock
= false);
3589 void ExitCXXTryStmt(const CXXTryStmt
&S
, bool IsFnTryBlock
= false);
3591 void EmitCXXTryStmt(const CXXTryStmt
&S
);
3592 void EmitSEHTryStmt(const SEHTryStmt
&S
);
3593 void EmitSEHLeaveStmt(const SEHLeaveStmt
&S
);
3594 void EnterSEHTryStmt(const SEHTryStmt
&S
);
3595 void ExitSEHTryStmt(const SEHTryStmt
&S
);
3596 void VolatilizeTryBlocks(llvm::BasicBlock
*BB
,
3597 llvm::SmallPtrSet
<llvm::BasicBlock
*, 10> &V
);
3599 void pushSEHCleanup(CleanupKind kind
,
3600 llvm::Function
*FinallyFunc
);
3601 void startOutlinedSEHHelper(CodeGenFunction
&ParentCGF
, bool IsFilter
,
3602 const Stmt
*OutlinedStmt
);
3604 llvm::Function
*GenerateSEHFilterFunction(CodeGenFunction
&ParentCGF
,
3605 const SEHExceptStmt
&Except
);
3607 llvm::Function
*GenerateSEHFinallyFunction(CodeGenFunction
&ParentCGF
,
3608 const SEHFinallyStmt
&Finally
);
3610 void EmitSEHExceptionCodeSave(CodeGenFunction
&ParentCGF
,
3611 llvm::Value
*ParentFP
,
3612 llvm::Value
*EntryEBP
);
3613 llvm::Value
*EmitSEHExceptionCode();
3614 llvm::Value
*EmitSEHExceptionInfo();
3615 llvm::Value
*EmitSEHAbnormalTermination();
3617 /// Emit simple code for OpenMP directives in Simd-only mode.
3618 void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective
&D
);
3620 /// Scan the outlined statement for captures from the parent function. For
3621 /// each capture, mark the capture as escaped and emit a call to
3622 /// llvm.localrecover. Insert the localrecover result into the LocalDeclMap.
3623 void EmitCapturedLocals(CodeGenFunction
&ParentCGF
, const Stmt
*OutlinedStmt
,
3626 /// Recovers the address of a local in a parent function. ParentVar is the
3627 /// address of the variable used in the immediate parent function. It can
3628 /// either be an alloca or a call to llvm.localrecover if there are nested
3629 /// outlined functions. ParentFP is the frame pointer of the outermost parent
3631 Address
recoverAddrOfEscapedLocal(CodeGenFunction
&ParentCGF
,
3633 llvm::Value
*ParentFP
);
3635 void EmitCXXForRangeStmt(const CXXForRangeStmt
&S
,
3636 ArrayRef
<const Attr
*> Attrs
= std::nullopt
);
3638 /// Controls insertion of cancellation exit blocks in worksharing constructs.
3639 class OMPCancelStackRAII
{
3640 CodeGenFunction
&CGF
;
3643 OMPCancelStackRAII(CodeGenFunction
&CGF
, OpenMPDirectiveKind Kind
,
3646 CGF
.OMPCancelStack
.enter(CGF
, Kind
, HasCancel
);
3648 ~OMPCancelStackRAII() { CGF
.OMPCancelStack
.exit(CGF
); }
3651 /// Returns calculated size of the specified type.
3652 llvm::Value
*getTypeSize(QualType Ty
);
3653 LValue
InitCapturedStruct(const CapturedStmt
&S
);
3654 llvm::Function
*EmitCapturedStmt(const CapturedStmt
&S
, CapturedRegionKind K
);
3655 llvm::Function
*GenerateCapturedStmtFunction(const CapturedStmt
&S
);
3656 Address
GenerateCapturedStmtArgument(const CapturedStmt
&S
);
3657 llvm::Function
*GenerateOpenMPCapturedStmtFunction(const CapturedStmt
&S
,
3658 SourceLocation Loc
);
3659 void GenerateOpenMPCapturedVars(const CapturedStmt
&S
,
3660 SmallVectorImpl
<llvm::Value
*> &CapturedVars
);
3661 void emitOMPSimpleStore(LValue LVal
, RValue RVal
, QualType RValTy
,
3662 SourceLocation Loc
);
3663 /// Perform element by element copying of arrays with type \a
3664 /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure
3665 /// generated by \a CopyGen.
3667 /// \param DestAddr Address of the destination array.
3668 /// \param SrcAddr Address of the source array.
3669 /// \param OriginalType Type of destination and source arrays.
3670 /// \param CopyGen Copying procedure that copies value of single array element
3671 /// to another single array element.
3672 void EmitOMPAggregateAssign(
3673 Address DestAddr
, Address SrcAddr
, QualType OriginalType
,
3674 const llvm::function_ref
<void(Address
, Address
)> CopyGen
);
3675 /// Emit proper copying of data from one variable to another.
3677 /// \param OriginalType Original type of the copied variables.
3678 /// \param DestAddr Destination address.
3679 /// \param SrcAddr Source address.
3680 /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has
3681 /// type of the base array element).
3682 /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of
3683 /// the base array element).
3684 /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a
3686 void EmitOMPCopy(QualType OriginalType
,
3687 Address DestAddr
, Address SrcAddr
,
3688 const VarDecl
*DestVD
, const VarDecl
*SrcVD
,
3690 /// Emit atomic update code for constructs: \a X = \a X \a BO \a E or
3691 /// \a X = \a E \a BO \a E.
3693 /// \param X Value to be updated.
3694 /// \param E Update value.
3695 /// \param BO Binary operation for update operation.
3696 /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update
3697 /// expression, false otherwise.
3698 /// \param AO Atomic ordering of the generated atomic instructions.
3699 /// \param CommonGen Code generator for complex expressions that cannot be
3700 /// expressed through atomicrmw instruction.
3701 /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was
3702 /// generated, <false, RValue::get(nullptr)> otherwise.
3703 std::pair
<bool, RValue
> EmitOMPAtomicSimpleUpdateExpr(
3704 LValue X
, RValue E
, BinaryOperatorKind BO
, bool IsXLHSInRHSPart
,
3705 llvm::AtomicOrdering AO
, SourceLocation Loc
,
3706 const llvm::function_ref
<RValue(RValue
)> CommonGen
);
3707 bool EmitOMPFirstprivateClause(const OMPExecutableDirective
&D
,
3708 OMPPrivateScope
&PrivateScope
);
3709 void EmitOMPPrivateClause(const OMPExecutableDirective
&D
,
3710 OMPPrivateScope
&PrivateScope
);
3711 void EmitOMPUseDevicePtrClause(
3712 const OMPUseDevicePtrClause
&C
, OMPPrivateScope
&PrivateScope
,
3713 const llvm::DenseMap
<const ValueDecl
*, llvm::Value
*>
3714 CaptureDeviceAddrMap
);
3715 void EmitOMPUseDeviceAddrClause(
3716 const OMPUseDeviceAddrClause
&C
, OMPPrivateScope
&PrivateScope
,
3717 const llvm::DenseMap
<const ValueDecl
*, llvm::Value
*>
3718 CaptureDeviceAddrMap
);
3719 /// Emit code for copyin clause in \a D directive. The next code is
3720 /// generated at the start of outlined functions for directives:
3722 /// threadprivate_var1 = master_threadprivate_var1;
3723 /// operator=(threadprivate_var2, master_threadprivate_var2);
3725 /// __kmpc_barrier(&loc, global_tid);
3728 /// \param D OpenMP directive possibly with 'copyin' clause(s).
3729 /// \returns true if at least one copyin variable is found, false otherwise.
3730 bool EmitOMPCopyinClause(const OMPExecutableDirective
&D
);
3731 /// Emit initial code for lastprivate variables. If some variable is
3732 /// not also firstprivate, then the default initialization is used. Otherwise
3733 /// initialization of this variable is performed by EmitOMPFirstprivateClause
3736 /// \param D Directive that may have 'lastprivate' directives.
3737 /// \param PrivateScope Private scope for capturing lastprivate variables for
3738 /// proper codegen in internal captured statement.
3740 /// \returns true if there is at least one lastprivate variable, false
3742 bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective
&D
,
3743 OMPPrivateScope
&PrivateScope
);
3744 /// Emit final copying of lastprivate values to original variables at
3745 /// the end of the worksharing or simd directive.
3747 /// \param D Directive that has at least one 'lastprivate' directives.
3748 /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if
3749 /// it is the last iteration of the loop code in associated directive, or to
3750 /// 'i1 false' otherwise. If this item is nullptr, no final check is required.
3751 void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective
&D
,
3753 llvm::Value
*IsLastIterCond
= nullptr);
3754 /// Emit initial code for linear clauses.
3755 void EmitOMPLinearClause(const OMPLoopDirective
&D
,
3756 CodeGenFunction::OMPPrivateScope
&PrivateScope
);
3757 /// Emit final code for linear clauses.
3758 /// \param CondGen Optional conditional code for final part of codegen for
3760 void EmitOMPLinearClauseFinal(
3761 const OMPLoopDirective
&D
,
3762 const llvm::function_ref
<llvm::Value
*(CodeGenFunction
&)> CondGen
);
3763 /// Emit initial code for reduction variables. Creates reduction copies
3764 /// and initializes them with the values according to OpenMP standard.
3766 /// \param D Directive (possibly) with the 'reduction' clause.
3767 /// \param PrivateScope Private scope for capturing reduction variables for
3768 /// proper codegen in internal captured statement.
3770 void EmitOMPReductionClauseInit(const OMPExecutableDirective
&D
,
3771 OMPPrivateScope
&PrivateScope
,
3772 bool ForInscan
= false);
3773 /// Emit final update of reduction values to original variables at
3774 /// the end of the directive.
3776 /// \param D Directive that has at least one 'reduction' directives.
3777 /// \param ReductionKind The kind of reduction to perform.
3778 void EmitOMPReductionClauseFinal(const OMPExecutableDirective
&D
,
3779 const OpenMPDirectiveKind ReductionKind
);
3780 /// Emit initial code for linear variables. Creates private copies
3781 /// and initializes them with the values according to OpenMP standard.
3783 /// \param D Directive (possibly) with the 'linear' clause.
3784 /// \return true if at least one linear variable is found that should be
3785 /// initialized with the value of the original variable, false otherwise.
3786 bool EmitOMPLinearClauseInit(const OMPLoopDirective
&D
);
3788 typedef const llvm::function_ref
<void(CodeGenFunction
& /*CGF*/,
3789 llvm::Function
* /*OutlinedFn*/,
3790 const OMPTaskDataTy
& /*Data*/)>
3792 void EmitOMPTaskBasedDirective(const OMPExecutableDirective
&S
,
3793 const OpenMPDirectiveKind CapturedRegion
,
3794 const RegionCodeGenTy
&BodyGen
,
3795 const TaskGenTy
&TaskGen
, OMPTaskDataTy
&Data
);
3796 struct OMPTargetDataInfo
{
3797 Address BasePointersArray
= Address::invalid();
3798 Address PointersArray
= Address::invalid();
3799 Address SizesArray
= Address::invalid();
3800 Address MappersArray
= Address::invalid();
3801 unsigned NumberOfTargetItems
= 0;
3802 explicit OMPTargetDataInfo() = default;
3803 OMPTargetDataInfo(Address BasePointersArray
, Address PointersArray
,
3804 Address SizesArray
, Address MappersArray
,
3805 unsigned NumberOfTargetItems
)
3806 : BasePointersArray(BasePointersArray
), PointersArray(PointersArray
),
3807 SizesArray(SizesArray
), MappersArray(MappersArray
),
3808 NumberOfTargetItems(NumberOfTargetItems
) {}
3810 void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective
&S
,
3811 const RegionCodeGenTy
&BodyGen
,
3812 OMPTargetDataInfo
&InputInfo
);
3813 void processInReduction(const OMPExecutableDirective
&S
,
3814 OMPTaskDataTy
&Data
,
3815 CodeGenFunction
&CGF
,
3816 const CapturedStmt
*CS
,
3817 OMPPrivateScope
&Scope
);
3818 void EmitOMPMetaDirective(const OMPMetaDirective
&S
);
3819 void EmitOMPParallelDirective(const OMPParallelDirective
&S
);
3820 void EmitOMPSimdDirective(const OMPSimdDirective
&S
);
3821 void EmitOMPTileDirective(const OMPTileDirective
&S
);
3822 void EmitOMPUnrollDirective(const OMPUnrollDirective
&S
);
3823 void EmitOMPReverseDirective(const OMPReverseDirective
&S
);
3824 void EmitOMPInterchangeDirective(const OMPInterchangeDirective
&S
);
3825 void EmitOMPForDirective(const OMPForDirective
&S
);
3826 void EmitOMPForSimdDirective(const OMPForSimdDirective
&S
);
3827 void EmitOMPSectionsDirective(const OMPSectionsDirective
&S
);
3828 void EmitOMPSectionDirective(const OMPSectionDirective
&S
);
3829 void EmitOMPSingleDirective(const OMPSingleDirective
&S
);
3830 void EmitOMPMasterDirective(const OMPMasterDirective
&S
);
3831 void EmitOMPMaskedDirective(const OMPMaskedDirective
&S
);
3832 void EmitOMPCriticalDirective(const OMPCriticalDirective
&S
);
3833 void EmitOMPParallelForDirective(const OMPParallelForDirective
&S
);
3834 void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective
&S
);
3835 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective
&S
);
3836 void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective
&S
);
3837 void EmitOMPTaskDirective(const OMPTaskDirective
&S
);
3838 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective
&S
);
3839 void EmitOMPErrorDirective(const OMPErrorDirective
&S
);
3840 void EmitOMPBarrierDirective(const OMPBarrierDirective
&S
);
3841 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective
&S
);
3842 void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective
&S
);
3843 void EmitOMPFlushDirective(const OMPFlushDirective
&S
);
3844 void EmitOMPDepobjDirective(const OMPDepobjDirective
&S
);
3845 void EmitOMPScanDirective(const OMPScanDirective
&S
);
3846 void EmitOMPOrderedDirective(const OMPOrderedDirective
&S
);
3847 void EmitOMPAtomicDirective(const OMPAtomicDirective
&S
);
3848 void EmitOMPTargetDirective(const OMPTargetDirective
&S
);
3849 void EmitOMPTargetDataDirective(const OMPTargetDataDirective
&S
);
3850 void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective
&S
);
3851 void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective
&S
);
3852 void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective
&S
);
3853 void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective
&S
);
3855 EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective
&S
);
3856 void EmitOMPTeamsDirective(const OMPTeamsDirective
&S
);
3858 EmitOMPCancellationPointDirective(const OMPCancellationPointDirective
&S
);
3859 void EmitOMPCancelDirective(const OMPCancelDirective
&S
);
3860 void EmitOMPTaskLoopBasedDirective(const OMPLoopDirective
&S
);
3861 void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective
&S
);
3862 void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective
&S
);
3863 void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective
&S
);
3865 EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective
&S
);
3866 void EmitOMPParallelMasterTaskLoopDirective(
3867 const OMPParallelMasterTaskLoopDirective
&S
);
3868 void EmitOMPParallelMasterTaskLoopSimdDirective(
3869 const OMPParallelMasterTaskLoopSimdDirective
&S
);
3870 void EmitOMPDistributeDirective(const OMPDistributeDirective
&S
);
3871 void EmitOMPDistributeParallelForDirective(
3872 const OMPDistributeParallelForDirective
&S
);
3873 void EmitOMPDistributeParallelForSimdDirective(
3874 const OMPDistributeParallelForSimdDirective
&S
);
3875 void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective
&S
);
3876 void EmitOMPTargetParallelForSimdDirective(
3877 const OMPTargetParallelForSimdDirective
&S
);
3878 void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective
&S
);
3879 void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective
&S
);
3881 EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective
&S
);
3882 void EmitOMPTeamsDistributeParallelForSimdDirective(
3883 const OMPTeamsDistributeParallelForSimdDirective
&S
);
3884 void EmitOMPTeamsDistributeParallelForDirective(
3885 const OMPTeamsDistributeParallelForDirective
&S
);
3886 void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective
&S
);
3887 void EmitOMPTargetTeamsDistributeDirective(
3888 const OMPTargetTeamsDistributeDirective
&S
);
3889 void EmitOMPTargetTeamsDistributeParallelForDirective(
3890 const OMPTargetTeamsDistributeParallelForDirective
&S
);
3891 void EmitOMPTargetTeamsDistributeParallelForSimdDirective(
3892 const OMPTargetTeamsDistributeParallelForSimdDirective
&S
);
3893 void EmitOMPTargetTeamsDistributeSimdDirective(
3894 const OMPTargetTeamsDistributeSimdDirective
&S
);
3895 void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective
&S
);
3896 void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective
&S
);
3897 void EmitOMPTargetParallelGenericLoopDirective(
3898 const OMPTargetParallelGenericLoopDirective
&S
);
3899 void EmitOMPTargetTeamsGenericLoopDirective(
3900 const OMPTargetTeamsGenericLoopDirective
&S
);
3901 void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective
&S
);
3902 void EmitOMPInteropDirective(const OMPInteropDirective
&S
);
3903 void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective
&S
);
3905 /// Emit device code for the target directive.
3906 static void EmitOMPTargetDeviceFunction(CodeGenModule
&CGM
,
3907 StringRef ParentName
,
3908 const OMPTargetDirective
&S
);
3910 EmitOMPTargetParallelDeviceFunction(CodeGenModule
&CGM
, StringRef ParentName
,
3911 const OMPTargetParallelDirective
&S
);
3912 /// Emit device code for the target parallel for directive.
3913 static void EmitOMPTargetParallelForDeviceFunction(
3914 CodeGenModule
&CGM
, StringRef ParentName
,
3915 const OMPTargetParallelForDirective
&S
);
3916 /// Emit device code for the target parallel for simd directive.
3917 static void EmitOMPTargetParallelForSimdDeviceFunction(
3918 CodeGenModule
&CGM
, StringRef ParentName
,
3919 const OMPTargetParallelForSimdDirective
&S
);
3920 /// Emit device code for the target teams directive.
3922 EmitOMPTargetTeamsDeviceFunction(CodeGenModule
&CGM
, StringRef ParentName
,
3923 const OMPTargetTeamsDirective
&S
);
3924 /// Emit device code for the target teams distribute directive.
3925 static void EmitOMPTargetTeamsDistributeDeviceFunction(
3926 CodeGenModule
&CGM
, StringRef ParentName
,
3927 const OMPTargetTeamsDistributeDirective
&S
);
3928 /// Emit device code for the target teams distribute simd directive.
3929 static void EmitOMPTargetTeamsDistributeSimdDeviceFunction(
3930 CodeGenModule
&CGM
, StringRef ParentName
,
3931 const OMPTargetTeamsDistributeSimdDirective
&S
);
3932 /// Emit device code for the target simd directive.
3933 static void EmitOMPTargetSimdDeviceFunction(CodeGenModule
&CGM
,
3934 StringRef ParentName
,
3935 const OMPTargetSimdDirective
&S
);
3936 /// Emit device code for the target teams distribute parallel for simd
3938 static void EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
3939 CodeGenModule
&CGM
, StringRef ParentName
,
3940 const OMPTargetTeamsDistributeParallelForSimdDirective
&S
);
3942 /// Emit device code for the target teams loop directive.
3943 static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
3944 CodeGenModule
&CGM
, StringRef ParentName
,
3945 const OMPTargetTeamsGenericLoopDirective
&S
);
3947 /// Emit device code for the target parallel loop directive.
3948 static void EmitOMPTargetParallelGenericLoopDeviceFunction(
3949 CodeGenModule
&CGM
, StringRef ParentName
,
3950 const OMPTargetParallelGenericLoopDirective
&S
);
3952 static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
3953 CodeGenModule
&CGM
, StringRef ParentName
,
3954 const OMPTargetTeamsDistributeParallelForDirective
&S
);
3956 /// Emit the Stmt \p S and return its topmost canonical loop, if any.
3957 /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
3958 /// future it is meant to be the number of loops expected in the loop nests
3959 /// (usually specified by the "collapse" clause) that are collapsed to a
3960 /// single loop by this function.
3961 llvm::CanonicalLoopInfo
*EmitOMPCollapsedCanonicalLoopNest(const Stmt
*S
,
3964 /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
3965 void EmitOMPCanonicalLoop(const OMPCanonicalLoop
*S
);
3967 /// Emit inner loop of the worksharing/simd construct.
3969 /// \param S Directive, for which the inner loop must be emitted.
3970 /// \param RequiresCleanup true, if directive has some associated private
3972 /// \param LoopCond Bollean condition for loop continuation.
3973 /// \param IncExpr Increment expression for loop control variable.
3974 /// \param BodyGen Generator for the inner body of the inner loop.
3975 /// \param PostIncGen Genrator for post-increment code (required for ordered
3976 /// loop directvies).
3977 void EmitOMPInnerLoop(
3978 const OMPExecutableDirective
&S
, bool RequiresCleanup
,
3979 const Expr
*LoopCond
, const Expr
*IncExpr
,
3980 const llvm::function_ref
<void(CodeGenFunction
&)> BodyGen
,
3981 const llvm::function_ref
<void(CodeGenFunction
&)> PostIncGen
);
3983 JumpDest
getOMPCancelDestination(OpenMPDirectiveKind Kind
);
3984 /// Emit initial code for loop counters of loop-based directives.
3985 void EmitOMPPrivateLoopCounters(const OMPLoopDirective
&S
,
3986 OMPPrivateScope
&LoopScope
);
3988 /// Helper for the OpenMP loop directives.
3989 void EmitOMPLoopBody(const OMPLoopDirective
&D
, JumpDest LoopExit
);
3991 /// Emit code for the worksharing loop-based directive.
3992 /// \return true, if this construct has any lastprivate clause, false -
3994 bool EmitOMPWorksharingLoop(const OMPLoopDirective
&S
, Expr
*EUB
,
3995 const CodeGenLoopBoundsTy
&CodeGenLoopBounds
,
3996 const CodeGenDispatchBoundsTy
&CGDispatchBounds
);
3998 /// Emit code for the distribute loop-based directive.
3999 void EmitOMPDistributeLoop(const OMPLoopDirective
&S
,
4000 const CodeGenLoopTy
&CodeGenLoop
, Expr
*IncExpr
);
4002 /// Helpers for the OpenMP loop directives.
4003 void EmitOMPSimdInit(const OMPLoopDirective
&D
);
4004 void EmitOMPSimdFinal(
4005 const OMPLoopDirective
&D
,
4006 const llvm::function_ref
<llvm::Value
*(CodeGenFunction
&)> CondGen
);
4008 /// Emits the lvalue for the expression with possibly captured variable.
4009 LValue
EmitOMPSharedLValue(const Expr
*E
);
4012 /// Helpers for blocks.
4013 llvm::Value
*EmitBlockLiteral(const CGBlockInfo
&Info
);
4015 /// struct with the values to be passed to the OpenMP loop-related functions
4016 struct OMPLoopArguments
{
4017 /// loop lower bound
4018 Address LB
= Address::invalid();
4019 /// loop upper bound
4020 Address UB
= Address::invalid();
4022 Address ST
= Address::invalid();
4023 /// isLastIteration argument for runtime functions
4024 Address IL
= Address::invalid();
4025 /// Chunk value generated by sema
4026 llvm::Value
*Chunk
= nullptr;
4027 /// EnsureUpperBound
4028 Expr
*EUB
= nullptr;
4029 /// IncrementExpression
4030 Expr
*IncExpr
= nullptr;
4031 /// Loop initialization
4032 Expr
*Init
= nullptr;
4033 /// Loop exit condition
4034 Expr
*Cond
= nullptr;
4035 /// Update of LB after a whole chunk has been executed
4036 Expr
*NextLB
= nullptr;
4037 /// Update of UB after a whole chunk has been executed
4038 Expr
*NextUB
= nullptr;
4039 /// Distinguish between the for distribute and sections
4040 OpenMPDirectiveKind DKind
= llvm::omp::OMPD_unknown
;
4041 OMPLoopArguments() = default;
4042 OMPLoopArguments(Address LB
, Address UB
, Address ST
, Address IL
,
4043 llvm::Value
*Chunk
= nullptr, Expr
*EUB
= nullptr,
4044 Expr
*IncExpr
= nullptr, Expr
*Init
= nullptr,
4045 Expr
*Cond
= nullptr, Expr
*NextLB
= nullptr,
4046 Expr
*NextUB
= nullptr)
4047 : LB(LB
), UB(UB
), ST(ST
), IL(IL
), Chunk(Chunk
), EUB(EUB
),
4048 IncExpr(IncExpr
), Init(Init
), Cond(Cond
), NextLB(NextLB
),
4051 void EmitOMPOuterLoop(bool DynamicOrOrdered
, bool IsMonotonic
,
4052 const OMPLoopDirective
&S
, OMPPrivateScope
&LoopScope
,
4053 const OMPLoopArguments
&LoopArgs
,
4054 const CodeGenLoopTy
&CodeGenLoop
,
4055 const CodeGenOrderedTy
&CodeGenOrdered
);
4056 void EmitOMPForOuterLoop(const OpenMPScheduleTy
&ScheduleKind
,
4057 bool IsMonotonic
, const OMPLoopDirective
&S
,
4058 OMPPrivateScope
&LoopScope
, bool Ordered
,
4059 const OMPLoopArguments
&LoopArgs
,
4060 const CodeGenDispatchBoundsTy
&CGDispatchBounds
);
4061 void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind
,
4062 const OMPLoopDirective
&S
,
4063 OMPPrivateScope
&LoopScope
,
4064 const OMPLoopArguments
&LoopArgs
,
4065 const CodeGenLoopTy
&CodeGenLoopContent
);
4066 /// Emit code for sections directive.
4067 void EmitSections(const OMPExecutableDirective
&S
);
4070 //===--------------------------------------------------------------------===//
4072 //===--------------------------------------------------------------------===//
4073 void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct
&S
) {
4074 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4075 // simply emitting its structured block, but in the future we will implement
4077 EmitStmt(S
.getStructuredBlock());
4080 void EmitOpenACCLoopConstruct(const OpenACCLoopConstruct
&S
) {
4081 // TODO OpenACC: Implement this. It is currently implemented as a 'no-op',
4082 // simply emitting its loop, but in the future we will implement
4084 EmitStmt(S
.getLoop());
4087 //===--------------------------------------------------------------------===//
4088 // LValue Expression Emission
4089 //===--------------------------------------------------------------------===//
4091 /// Create a check that a scalar RValue is non-null.
4092 llvm::Value
*EmitNonNullRValueCheck(RValue RV
, QualType T
);
4094 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
4095 RValue
GetUndefRValue(QualType Ty
);
4097 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
4098 /// and issue an ErrorUnsupported style diagnostic (using the
4100 RValue
EmitUnsupportedRValue(const Expr
*E
,
4103 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
4104 /// an ErrorUnsupported style diagnostic (using the provided Name).
4105 LValue
EmitUnsupportedLValue(const Expr
*E
,
4108 /// EmitLValue - Emit code to compute a designator that specifies the location
4109 /// of the expression.
4111 /// This can return one of two things: a simple address or a bitfield
4112 /// reference. In either case, the LLVM Value* in the LValue structure is
4113 /// guaranteed to be an LLVM pointer type.
4115 /// If this returns a bitfield reference, nothing about the pointee type of
4116 /// the LLVM value is known: For example, it may not be a pointer to an
4119 /// If this returns a normal address, and if the lvalue's C type is fixed
4120 /// size, this method guarantees that the returned pointer type will point to
4121 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
4122 /// variable length type, this is not possible.
4124 LValue
EmitLValue(const Expr
*E
,
4125 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
);
4128 LValue
EmitLValueHelper(const Expr
*E
, KnownNonNull_t IsKnownNonNull
);
4131 /// Same as EmitLValue but additionally we generate checking code to
4132 /// guard against undefined behavior. This is only suitable when we know
4133 /// that the address will be used to access the object.
4134 LValue
EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
);
4136 RValue
convertTempToRValue(Address addr
, QualType type
,
4137 SourceLocation Loc
);
4139 void EmitAtomicInit(Expr
*E
, LValue lvalue
);
4141 bool LValueIsSuitableForInlineAtomic(LValue Src
);
4143 RValue
EmitAtomicLoad(LValue LV
, SourceLocation SL
,
4144 AggValueSlot Slot
= AggValueSlot::ignored());
4146 RValue
EmitAtomicLoad(LValue lvalue
, SourceLocation loc
,
4147 llvm::AtomicOrdering AO
, bool IsVolatile
= false,
4148 AggValueSlot slot
= AggValueSlot::ignored());
4150 void EmitAtomicStore(RValue rvalue
, LValue lvalue
, bool isInit
);
4152 void EmitAtomicStore(RValue rvalue
, LValue lvalue
, llvm::AtomicOrdering AO
,
4153 bool IsVolatile
, bool isInit
);
4155 std::pair
<RValue
, llvm::Value
*> EmitAtomicCompareExchange(
4156 LValue Obj
, RValue Expected
, RValue Desired
, SourceLocation Loc
,
4157 llvm::AtomicOrdering Success
=
4158 llvm::AtomicOrdering::SequentiallyConsistent
,
4159 llvm::AtomicOrdering Failure
=
4160 llvm::AtomicOrdering::SequentiallyConsistent
,
4161 bool IsWeak
= false, AggValueSlot Slot
= AggValueSlot::ignored());
4163 void EmitAtomicUpdate(LValue LVal
, llvm::AtomicOrdering AO
,
4164 const llvm::function_ref
<RValue(RValue
)> &UpdateOp
,
4167 /// EmitToMemory - Change a scalar value from its value
4168 /// representation to its in-memory representation.
4169 llvm::Value
*EmitToMemory(llvm::Value
*Value
, QualType Ty
);
4171 /// EmitFromMemory - Change a scalar value from its memory
4172 /// representation to its value representation.
4173 llvm::Value
*EmitFromMemory(llvm::Value
*Value
, QualType Ty
);
4175 /// Check if the scalar \p Value is within the valid range for the given
4178 /// Returns true if a check is needed (even if the range is unknown).
4179 bool EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
4180 SourceLocation Loc
);
4182 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4183 /// care to appropriately convert from the memory representation to
4184 /// the LLVM value representation.
4185 llvm::Value
*EmitLoadOfScalar(Address Addr
, bool Volatile
, QualType Ty
,
4187 AlignmentSource Source
= AlignmentSource::Type
,
4188 bool isNontemporal
= false) {
4189 return EmitLoadOfScalar(Addr
, Volatile
, Ty
, Loc
, LValueBaseInfo(Source
),
4190 CGM
.getTBAAAccessInfo(Ty
), isNontemporal
);
4193 llvm::Value
*EmitLoadOfScalar(Address Addr
, bool Volatile
, QualType Ty
,
4194 SourceLocation Loc
, LValueBaseInfo BaseInfo
,
4195 TBAAAccessInfo TBAAInfo
,
4196 bool isNontemporal
= false);
4198 /// EmitLoadOfScalar - Load a scalar value from an address, taking
4199 /// care to appropriately convert from the memory representation to
4200 /// the LLVM value representation. The l-value must be a simple
4202 llvm::Value
*EmitLoadOfScalar(LValue lvalue
, SourceLocation Loc
);
4204 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4205 /// care to appropriately convert from the memory representation to
4206 /// the LLVM value representation.
4207 void EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
4208 bool Volatile
, QualType Ty
,
4209 AlignmentSource Source
= AlignmentSource::Type
,
4210 bool isInit
= false, bool isNontemporal
= false) {
4211 EmitStoreOfScalar(Value
, Addr
, Volatile
, Ty
, LValueBaseInfo(Source
),
4212 CGM
.getTBAAAccessInfo(Ty
), isInit
, isNontemporal
);
4215 void EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
4216 bool Volatile
, QualType Ty
,
4217 LValueBaseInfo BaseInfo
, TBAAAccessInfo TBAAInfo
,
4218 bool isInit
= false, bool isNontemporal
= false);
4220 /// EmitStoreOfScalar - Store a scalar value to an address, taking
4221 /// care to appropriately convert from the memory representation to
4222 /// the LLVM value representation. The l-value must be a simple
4223 /// l-value. The isInit flag indicates whether this is an initialization.
4224 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
4225 void EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
, bool isInit
=false);
4227 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
4228 /// this method emits the address of the lvalue, then loads the result as an
4229 /// rvalue, returning the rvalue.
4230 RValue
EmitLoadOfLValue(LValue V
, SourceLocation Loc
);
4231 RValue
EmitLoadOfExtVectorElementLValue(LValue V
);
4232 RValue
EmitLoadOfBitfieldLValue(LValue LV
, SourceLocation Loc
);
4233 RValue
EmitLoadOfGlobalRegLValue(LValue LV
);
4235 /// Like EmitLoadOfLValue but also handles complex and aggregate types.
4236 RValue
EmitLoadOfAnyValue(LValue V
,
4237 AggValueSlot Slot
= AggValueSlot::ignored(),
4238 SourceLocation Loc
= {});
4240 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
4241 /// lvalue, where both are guaranteed to the have the same type, and that type
4243 void EmitStoreThroughLValue(RValue Src
, LValue Dst
, bool isInit
= false);
4244 void EmitStoreThroughExtVectorComponentLValue(RValue Src
, LValue Dst
);
4245 void EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
);
4247 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
4248 /// as EmitStoreThroughLValue.
4250 /// \param Result [out] - If non-null, this will be set to a Value* for the
4251 /// bit-field contents after the store, appropriate for use as the result of
4252 /// an assignment to the bit-field.
4253 void EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
4254 llvm::Value
**Result
=nullptr);
4256 /// Emit an l-value for an assignment (simple or compound) of complex type.
4257 LValue
EmitComplexAssignmentLValue(const BinaryOperator
*E
);
4258 LValue
EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator
*E
);
4259 LValue
EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator
*E
,
4260 llvm::Value
*&Result
);
4262 // Note: only available for agg return types
4263 LValue
EmitBinaryOperatorLValue(const BinaryOperator
*E
);
4264 LValue
EmitCompoundAssignmentLValue(const CompoundAssignOperator
*E
);
4265 // Note: only available for agg return types
4266 LValue
EmitCallExprLValue(const CallExpr
*E
);
4267 // Note: only available for agg return types
4268 LValue
EmitVAArgExprLValue(const VAArgExpr
*E
);
4269 LValue
EmitDeclRefLValue(const DeclRefExpr
*E
);
4270 LValue
EmitStringLiteralLValue(const StringLiteral
*E
);
4271 LValue
EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
);
4272 LValue
EmitPredefinedLValue(const PredefinedExpr
*E
);
4273 LValue
EmitUnaryOpLValue(const UnaryOperator
*E
);
4274 LValue
EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
4275 bool Accessed
= false);
4276 LValue
EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
);
4277 LValue
EmitArraySectionExpr(const ArraySectionExpr
*E
,
4278 bool IsLowerBound
= true);
4279 LValue
EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
);
4280 LValue
EmitMemberExpr(const MemberExpr
*E
);
4281 LValue
EmitObjCIsaExpr(const ObjCIsaExpr
*E
);
4282 LValue
EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
);
4283 LValue
EmitInitListLValue(const InitListExpr
*E
);
4284 void EmitIgnoredConditionalOperator(const AbstractConditionalOperator
*E
);
4285 LValue
EmitConditionalOperatorLValue(const AbstractConditionalOperator
*E
);
4286 LValue
EmitCastLValue(const CastExpr
*E
);
4287 LValue
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*E
);
4288 LValue
EmitOpaqueValueLValue(const OpaqueValueExpr
*e
);
4290 Address
EmitExtVectorElementLValue(LValue V
);
4292 RValue
EmitRValueForField(LValue LV
, const FieldDecl
*FD
, SourceLocation Loc
);
4294 Address
EmitArrayToPointerDecay(const Expr
*Array
,
4295 LValueBaseInfo
*BaseInfo
= nullptr,
4296 TBAAAccessInfo
*TBAAInfo
= nullptr);
4298 class ConstantEmission
{
4299 llvm::PointerIntPair
<llvm::Constant
*, 1, bool> ValueAndIsReference
;
4300 ConstantEmission(llvm::Constant
*C
, bool isReference
)
4301 : ValueAndIsReference(C
, isReference
) {}
4303 ConstantEmission() {}
4304 static ConstantEmission
forReference(llvm::Constant
*C
) {
4305 return ConstantEmission(C
, true);
4307 static ConstantEmission
forValue(llvm::Constant
*C
) {
4308 return ConstantEmission(C
, false);
4311 explicit operator bool() const {
4312 return ValueAndIsReference
.getOpaqueValue() != nullptr;
4315 bool isReference() const { return ValueAndIsReference
.getInt(); }
4316 LValue
getReferenceLValue(CodeGenFunction
&CGF
, Expr
*refExpr
) const {
4317 assert(isReference());
4318 return CGF
.MakeNaturalAlignAddrLValue(ValueAndIsReference
.getPointer(),
4319 refExpr
->getType());
4322 llvm::Constant
*getValue() const {
4323 assert(!isReference());
4324 return ValueAndIsReference
.getPointer();
4328 ConstantEmission
tryEmitAsConstant(DeclRefExpr
*refExpr
);
4329 ConstantEmission
tryEmitAsConstant(const MemberExpr
*ME
);
4330 llvm::Value
*emitScalarConstant(const ConstantEmission
&Constant
, Expr
*E
);
4332 RValue
EmitPseudoObjectRValue(const PseudoObjectExpr
*e
,
4333 AggValueSlot slot
= AggValueSlot::ignored());
4334 LValue
EmitPseudoObjectLValue(const PseudoObjectExpr
*e
);
4336 llvm::Value
*EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
4337 const ObjCIvarDecl
*Ivar
);
4338 llvm::Value
*EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl
*Interface
,
4339 const ObjCIvarDecl
*Ivar
);
4340 LValue
EmitLValueForField(LValue Base
, const FieldDecl
* Field
);
4341 LValue
EmitLValueForLambdaField(const FieldDecl
*Field
);
4342 LValue
EmitLValueForLambdaField(const FieldDecl
*Field
,
4343 llvm::Value
*ThisValue
);
4345 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
4346 /// if the Field is a reference, this will return the address of the reference
4347 /// and not the address of the value stored in the reference.
4348 LValue
EmitLValueForFieldInitialization(LValue Base
,
4349 const FieldDecl
* Field
);
4351 LValue
EmitLValueForIvar(QualType ObjectTy
,
4352 llvm::Value
* Base
, const ObjCIvarDecl
*Ivar
,
4353 unsigned CVRQualifiers
);
4355 LValue
EmitCXXConstructLValue(const CXXConstructExpr
*E
);
4356 LValue
EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
);
4357 LValue
EmitCXXTypeidLValue(const CXXTypeidExpr
*E
);
4358 LValue
EmitCXXUuidofLValue(const CXXUuidofExpr
*E
);
4360 LValue
EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
);
4361 LValue
EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
);
4362 LValue
EmitStmtExprLValue(const StmtExpr
*E
);
4363 LValue
EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
);
4364 LValue
EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
);
4365 void EmitDeclRefExprDbgValue(const DeclRefExpr
*E
, const APValue
&Init
);
4367 //===--------------------------------------------------------------------===//
4368 // Scalar Expression Emission
4369 //===--------------------------------------------------------------------===//
4371 /// EmitCall - Generate a call of the given function, expecting the given
4372 /// result type, and using the given argument list which specifies both the
4373 /// LLVM arguments and the types they were derived from.
4374 RValue
EmitCall(const CGFunctionInfo
&CallInfo
, const CGCallee
&Callee
,
4375 ReturnValueSlot ReturnValue
, const CallArgList
&Args
,
4376 llvm::CallBase
**callOrInvoke
, bool IsMustTail
,
4378 bool IsVirtualFunctionPointerThunk
= false);
4379 RValue
EmitCall(const CGFunctionInfo
&CallInfo
, const CGCallee
&Callee
,
4380 ReturnValueSlot ReturnValue
, const CallArgList
&Args
,
4381 llvm::CallBase
**callOrInvoke
= nullptr,
4382 bool IsMustTail
= false) {
4383 return EmitCall(CallInfo
, Callee
, ReturnValue
, Args
, callOrInvoke
,
4384 IsMustTail
, SourceLocation());
4386 RValue
EmitCall(QualType FnType
, const CGCallee
&Callee
, const CallExpr
*E
,
4387 ReturnValueSlot ReturnValue
, llvm::Value
*Chain
= nullptr);
4388 RValue
EmitCallExpr(const CallExpr
*E
,
4389 ReturnValueSlot ReturnValue
= ReturnValueSlot());
4390 RValue
EmitSimpleCallExpr(const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4391 CGCallee
EmitCallee(const Expr
*E
);
4393 void checkTargetFeatures(const CallExpr
*E
, const FunctionDecl
*TargetDecl
);
4394 void checkTargetFeatures(SourceLocation Loc
, const FunctionDecl
*TargetDecl
);
4396 llvm::CallInst
*EmitRuntimeCall(llvm::FunctionCallee callee
,
4397 const Twine
&name
= "");
4398 llvm::CallInst
*EmitRuntimeCall(llvm::FunctionCallee callee
,
4399 ArrayRef
<llvm::Value
*> args
,
4400 const Twine
&name
= "");
4401 llvm::CallInst
*EmitNounwindRuntimeCall(llvm::FunctionCallee callee
,
4402 const Twine
&name
= "");
4403 llvm::CallInst
*EmitNounwindRuntimeCall(llvm::FunctionCallee callee
,
4404 ArrayRef
<Address
> args
,
4405 const Twine
&name
= "");
4406 llvm::CallInst
*EmitNounwindRuntimeCall(llvm::FunctionCallee callee
,
4407 ArrayRef
<llvm::Value
*> args
,
4408 const Twine
&name
= "");
4410 SmallVector
<llvm::OperandBundleDef
, 1>
4411 getBundlesForFunclet(llvm::Value
*Callee
);
4413 llvm::CallBase
*EmitCallOrInvoke(llvm::FunctionCallee Callee
,
4414 ArrayRef
<llvm::Value
*> Args
,
4415 const Twine
&Name
= "");
4416 llvm::CallBase
*EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4417 ArrayRef
<llvm::Value
*> args
,
4418 const Twine
&name
= "");
4419 llvm::CallBase
*EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4420 const Twine
&name
= "");
4421 void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee
,
4422 ArrayRef
<llvm::Value
*> args
);
4424 CGCallee
BuildAppleKextVirtualCall(const CXXMethodDecl
*MD
,
4425 NestedNameSpecifier
*Qual
,
4428 CGCallee
BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl
*DD
,
4430 const CXXRecordDecl
*RD
);
4432 bool isPointerKnownNonNull(const Expr
*E
);
4434 /// Create the discriminator from the storage address and the entity hash.
4435 llvm::Value
*EmitPointerAuthBlendDiscriminator(llvm::Value
*StorageAddress
,
4436 llvm::Value
*Discriminator
);
4437 CGPointerAuthInfo
EmitPointerAuthInfo(const PointerAuthSchema
&Schema
,
4438 llvm::Value
*StorageAddress
,
4439 GlobalDecl SchemaDecl
,
4440 QualType SchemaType
);
4442 llvm::Value
*EmitPointerAuthSign(const CGPointerAuthInfo
&Info
,
4443 llvm::Value
*Pointer
);
4445 llvm::Value
*EmitPointerAuthAuth(const CGPointerAuthInfo
&Info
,
4446 llvm::Value
*Pointer
);
4448 llvm::Value
*emitPointerAuthResign(llvm::Value
*Pointer
, QualType PointerType
,
4449 const CGPointerAuthInfo
&CurAuthInfo
,
4450 const CGPointerAuthInfo
&NewAuthInfo
,
4451 bool IsKnownNonNull
);
4452 llvm::Value
*emitPointerAuthResignCall(llvm::Value
*Pointer
,
4453 const CGPointerAuthInfo
&CurInfo
,
4454 const CGPointerAuthInfo
&NewInfo
);
4456 void EmitPointerAuthOperandBundle(
4457 const CGPointerAuthInfo
&Info
,
4458 SmallVectorImpl
<llvm::OperandBundleDef
> &Bundles
);
4460 llvm::Value
*authPointerToPointerCast(llvm::Value
*ResultPtr
,
4461 QualType SourceType
, QualType DestType
);
4462 Address
authPointerToPointerCast(Address Ptr
, QualType SourceType
,
4465 Address
getAsNaturalAddressOf(Address Addr
, QualType PointeeTy
);
4467 llvm::Value
*getAsNaturalPointerTo(Address Addr
, QualType PointeeType
) {
4468 return getAsNaturalAddressOf(Addr
, PointeeType
).getBasePointer();
4471 // Return the copy constructor name with the prefix "__copy_constructor_"
4473 static std::string
getNonTrivialCopyConstructorStr(QualType QT
,
4474 CharUnits Alignment
,
4478 // Return the destructor name with the prefix "__destructor_" removed.
4479 static std::string
getNonTrivialDestructorStr(QualType QT
,
4480 CharUnits Alignment
,
4484 // These functions emit calls to the special functions of non-trivial C
4486 void defaultInitNonTrivialCStructVar(LValue Dst
);
4487 void callCStructDefaultConstructor(LValue Dst
);
4488 void callCStructDestructor(LValue Dst
);
4489 void callCStructCopyConstructor(LValue Dst
, LValue Src
);
4490 void callCStructMoveConstructor(LValue Dst
, LValue Src
);
4491 void callCStructCopyAssignmentOperator(LValue Dst
, LValue Src
);
4492 void callCStructMoveAssignmentOperator(LValue Dst
, LValue Src
);
4495 EmitCXXMemberOrOperatorCall(const CXXMethodDecl
*Method
,
4496 const CGCallee
&Callee
,
4497 ReturnValueSlot ReturnValue
, llvm::Value
*This
,
4498 llvm::Value
*ImplicitParam
,
4499 QualType ImplicitParamTy
, const CallExpr
*E
,
4500 CallArgList
*RtlArgs
);
4501 RValue
EmitCXXDestructorCall(GlobalDecl Dtor
, const CGCallee
&Callee
,
4502 llvm::Value
*This
, QualType ThisTy
,
4503 llvm::Value
*ImplicitParam
,
4504 QualType ImplicitParamTy
, const CallExpr
*E
);
4505 RValue
EmitCXXMemberCallExpr(const CXXMemberCallExpr
*E
,
4506 ReturnValueSlot ReturnValue
);
4507 RValue
EmitCXXMemberOrOperatorMemberCallExpr(const CallExpr
*CE
,
4508 const CXXMethodDecl
*MD
,
4509 ReturnValueSlot ReturnValue
,
4511 NestedNameSpecifier
*Qualifier
,
4512 bool IsArrow
, const Expr
*Base
);
4513 // Compute the object pointer.
4514 Address
EmitCXXMemberDataPointerAddress(const Expr
*E
, Address base
,
4515 llvm::Value
*memberPtr
,
4516 const MemberPointerType
*memberPtrType
,
4517 LValueBaseInfo
*BaseInfo
= nullptr,
4518 TBAAAccessInfo
*TBAAInfo
= nullptr);
4519 RValue
EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr
*E
,
4520 ReturnValueSlot ReturnValue
);
4522 RValue
EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr
*E
,
4523 const CXXMethodDecl
*MD
,
4524 ReturnValueSlot ReturnValue
);
4525 RValue
EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr
*E
);
4527 RValue
EmitCUDAKernelCallExpr(const CUDAKernelCallExpr
*E
,
4528 ReturnValueSlot ReturnValue
);
4530 RValue
EmitNVPTXDevicePrintfCallExpr(const CallExpr
*E
);
4531 RValue
EmitAMDGPUDevicePrintfCallExpr(const CallExpr
*E
);
4532 RValue
EmitOpenMPDevicePrintfCallExpr(const CallExpr
*E
);
4534 RValue
EmitBuiltinExpr(const GlobalDecl GD
, unsigned BuiltinID
,
4535 const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4537 RValue
emitRotate(const CallExpr
*E
, bool IsRotateRight
);
4539 /// Emit IR for __builtin_os_log_format.
4540 RValue
emitBuiltinOSLogFormat(const CallExpr
&E
);
4542 /// Emit IR for __builtin_is_aligned.
4543 RValue
EmitBuiltinIsAligned(const CallExpr
*E
);
4544 /// Emit IR for __builtin_align_up/__builtin_align_down.
4545 RValue
EmitBuiltinAlignTo(const CallExpr
*E
, bool AlignUp
);
4547 llvm::Function
*generateBuiltinOSLogHelperFunction(
4548 const analyze_os_log::OSLogBufferLayout
&Layout
,
4549 CharUnits BufferAlignment
);
4551 RValue
EmitBlockCallExpr(const CallExpr
*E
, ReturnValueSlot ReturnValue
);
4553 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
4554 /// is unhandled by the current target.
4555 llvm::Value
*EmitTargetBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4556 ReturnValueSlot ReturnValue
);
4558 llvm::Value
*EmitAArch64CompareBuiltinExpr(llvm::Value
*Op
, llvm::Type
*Ty
,
4559 const llvm::CmpInst::Predicate Fp
,
4560 const llvm::CmpInst::Predicate Ip
,
4561 const llvm::Twine
&Name
= "");
4562 llvm::Value
*EmitARMBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4563 ReturnValueSlot ReturnValue
,
4564 llvm::Triple::ArchType Arch
);
4565 llvm::Value
*EmitARMMVEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4566 ReturnValueSlot ReturnValue
,
4567 llvm::Triple::ArchType Arch
);
4568 llvm::Value
*EmitARMCDEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4569 ReturnValueSlot ReturnValue
,
4570 llvm::Triple::ArchType Arch
);
4571 llvm::Value
*EmitCMSEClearRecord(llvm::Value
*V
, llvm::IntegerType
*ITy
,
4573 llvm::Value
*EmitCMSEClearRecord(llvm::Value
*V
, llvm::ArrayType
*ATy
,
4576 llvm::Value
*EmitCommonNeonBuiltinExpr(unsigned BuiltinID
,
4577 unsigned LLVMIntrinsic
,
4578 unsigned AltLLVMIntrinsic
,
4579 const char *NameHint
,
4582 SmallVectorImpl
<llvm::Value
*> &Ops
,
4583 Address PtrOp0
, Address PtrOp1
,
4584 llvm::Triple::ArchType Arch
);
4586 llvm::Function
*LookupNeonLLVMIntrinsic(unsigned IntrinsicID
,
4587 unsigned Modifier
, llvm::Type
*ArgTy
,
4589 llvm::Value
*EmitNeonCall(llvm::Function
*F
,
4590 SmallVectorImpl
<llvm::Value
*> &O
,
4592 unsigned shift
= 0, bool rightshift
= false);
4593 llvm::Value
*EmitNeonSplat(llvm::Value
*V
, llvm::Constant
*Idx
,
4594 const llvm::ElementCount
&Count
);
4595 llvm::Value
*EmitNeonSplat(llvm::Value
*V
, llvm::Constant
*Idx
);
4596 llvm::Value
*EmitNeonShiftVector(llvm::Value
*V
, llvm::Type
*Ty
,
4597 bool negateForRightShift
);
4598 llvm::Value
*EmitNeonRShiftImm(llvm::Value
*Vec
, llvm::Value
*Amt
,
4599 llvm::Type
*Ty
, bool usgn
, const char *name
);
4600 llvm::Value
*vectorWrapScalar16(llvm::Value
*Op
);
4601 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
4602 /// access builtin. Only required if it can't be inferred from the base
4603 /// pointer operand.
4604 llvm::Type
*SVEBuiltinMemEltTy(const SVETypeFlags
&TypeFlags
);
4606 SmallVector
<llvm::Type
*, 2>
4607 getSVEOverloadTypes(const SVETypeFlags
&TypeFlags
, llvm::Type
*ReturnType
,
4608 ArrayRef
<llvm::Value
*> Ops
);
4609 llvm::Type
*getEltType(const SVETypeFlags
&TypeFlags
);
4610 llvm::ScalableVectorType
*getSVEType(const SVETypeFlags
&TypeFlags
);
4611 llvm::ScalableVectorType
*getSVEPredType(const SVETypeFlags
&TypeFlags
);
4612 llvm::Value
*EmitSVETupleSetOrGet(const SVETypeFlags
&TypeFlags
,
4613 llvm::Type
*ReturnType
,
4614 ArrayRef
<llvm::Value
*> Ops
);
4615 llvm::Value
*EmitSVETupleCreate(const SVETypeFlags
&TypeFlags
,
4616 llvm::Type
*ReturnType
,
4617 ArrayRef
<llvm::Value
*> Ops
);
4618 llvm::Value
*EmitSVEAllTruePred(const SVETypeFlags
&TypeFlags
);
4619 llvm::Value
*EmitSVEDupX(llvm::Value
*Scalar
);
4620 llvm::Value
*EmitSVEDupX(llvm::Value
*Scalar
, llvm::Type
*Ty
);
4621 llvm::Value
*EmitSVEReinterpret(llvm::Value
*Val
, llvm::Type
*Ty
);
4622 llvm::Value
*EmitSVEPMull(const SVETypeFlags
&TypeFlags
,
4623 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4624 unsigned BuiltinID
);
4625 llvm::Value
*EmitSVEMovl(const SVETypeFlags
&TypeFlags
,
4626 llvm::ArrayRef
<llvm::Value
*> Ops
,
4627 unsigned BuiltinID
);
4628 llvm::Value
*EmitSVEPredicateCast(llvm::Value
*Pred
,
4629 llvm::ScalableVectorType
*VTy
);
4630 llvm::Value
*EmitSVEGatherLoad(const SVETypeFlags
&TypeFlags
,
4631 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4633 llvm::Value
*EmitSVEScatterStore(const SVETypeFlags
&TypeFlags
,
4634 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4636 llvm::Value
*EmitSVEMaskedLoad(const CallExpr
*, llvm::Type
*ReturnTy
,
4637 SmallVectorImpl
<llvm::Value
*> &Ops
,
4638 unsigned BuiltinID
, bool IsZExtReturn
);
4639 llvm::Value
*EmitSVEMaskedStore(const CallExpr
*,
4640 SmallVectorImpl
<llvm::Value
*> &Ops
,
4641 unsigned BuiltinID
);
4642 llvm::Value
*EmitSVEPrefetchLoad(const SVETypeFlags
&TypeFlags
,
4643 SmallVectorImpl
<llvm::Value
*> &Ops
,
4644 unsigned BuiltinID
);
4645 llvm::Value
*EmitSVEGatherPrefetch(const SVETypeFlags
&TypeFlags
,
4646 SmallVectorImpl
<llvm::Value
*> &Ops
,
4648 llvm::Value
*EmitSVEStructLoad(const SVETypeFlags
&TypeFlags
,
4649 SmallVectorImpl
<llvm::Value
*> &Ops
,
4651 llvm::Value
*EmitSVEStructStore(const SVETypeFlags
&TypeFlags
,
4652 SmallVectorImpl
<llvm::Value
*> &Ops
,
4654 /// FormSVEBuiltinResult - Returns the struct of scalable vectors as a wider
4655 /// vector. It extracts the scalable vector from the struct and inserts into
4656 /// the wider vector. This avoids the error when allocating space in llvm
4657 /// for struct of scalable vectors if a function returns struct.
4658 llvm::Value
*FormSVEBuiltinResult(llvm::Value
*Call
);
4660 llvm::Value
*EmitAArch64SVEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4662 llvm::Value
*EmitSMELd1St1(const SVETypeFlags
&TypeFlags
,
4663 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4665 llvm::Value
*EmitSMEReadWrite(const SVETypeFlags
&TypeFlags
,
4666 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4668 llvm::Value
*EmitSMEZero(const SVETypeFlags
&TypeFlags
,
4669 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4671 llvm::Value
*EmitSMELdrStr(const SVETypeFlags
&TypeFlags
,
4672 llvm::SmallVectorImpl
<llvm::Value
*> &Ops
,
4675 void GetAArch64SVEProcessedOperands(unsigned BuiltinID
, const CallExpr
*E
,
4676 SmallVectorImpl
<llvm::Value
*> &Ops
,
4677 SVETypeFlags TypeFlags
);
4679 llvm::Value
*EmitAArch64SMEBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4681 llvm::Value
*EmitAArch64BuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4682 llvm::Triple::ArchType Arch
);
4683 llvm::Value
*EmitBPFBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4685 llvm::Value
*BuildVector(ArrayRef
<llvm::Value
*> Ops
);
4686 llvm::Value
*EmitX86BuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4687 llvm::Value
*EmitPPCBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4688 llvm::Value
*EmitAMDGPUBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4689 llvm::Value
*EmitHLSLBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4690 llvm::Value
*EmitScalarOrConstFoldImmArg(unsigned ICEArguments
, unsigned Idx
,
4692 llvm::Value
*EmitSystemZBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4693 llvm::Value
*EmitNVPTXBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4694 llvm::Value
*EmitWebAssemblyBuiltinExpr(unsigned BuiltinID
,
4696 llvm::Value
*EmitHexagonBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
);
4697 llvm::Value
*EmitRISCVBuiltinExpr(unsigned BuiltinID
, const CallExpr
*E
,
4698 ReturnValueSlot ReturnValue
);
4700 void AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction
*Inst
,
4702 void ProcessOrderScopeAMDGCN(llvm::Value
*Order
, llvm::Value
*Scope
,
4703 llvm::AtomicOrdering
&AO
,
4704 llvm::SyncScope::ID
&SSID
);
4706 enum class MSVCIntrin
;
4707 llvm::Value
*EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID
, const CallExpr
*E
);
4709 llvm::Value
*EmitBuiltinAvailable(const VersionTuple
&Version
);
4711 llvm::Value
*EmitObjCProtocolExpr(const ObjCProtocolExpr
*E
);
4712 llvm::Value
*EmitObjCStringLiteral(const ObjCStringLiteral
*E
);
4713 llvm::Value
*EmitObjCBoxedExpr(const ObjCBoxedExpr
*E
);
4714 llvm::Value
*EmitObjCArrayLiteral(const ObjCArrayLiteral
*E
);
4715 llvm::Value
*EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral
*E
);
4716 llvm::Value
*EmitObjCCollectionLiteral(const Expr
*E
,
4717 const ObjCMethodDecl
*MethodWithObjects
);
4718 llvm::Value
*EmitObjCSelectorExpr(const ObjCSelectorExpr
*E
);
4719 RValue
EmitObjCMessageExpr(const ObjCMessageExpr
*E
,
4720 ReturnValueSlot Return
= ReturnValueSlot());
4722 /// Retrieves the default cleanup kind for an ARC cleanup.
4723 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
4724 CleanupKind
getARCCleanupKind() {
4725 return CGM
.getCodeGenOpts().ObjCAutoRefCountExceptions
4726 ? NormalAndEHCleanup
: NormalCleanup
;
4730 void EmitARCInitWeak(Address addr
, llvm::Value
*value
);
4731 void EmitARCDestroyWeak(Address addr
);
4732 llvm::Value
*EmitARCLoadWeak(Address addr
);
4733 llvm::Value
*EmitARCLoadWeakRetained(Address addr
);
4734 llvm::Value
*EmitARCStoreWeak(Address addr
, llvm::Value
*value
, bool ignored
);
4735 void emitARCCopyAssignWeak(QualType Ty
, Address DstAddr
, Address SrcAddr
);
4736 void emitARCMoveAssignWeak(QualType Ty
, Address DstAddr
, Address SrcAddr
);
4737 void EmitARCCopyWeak(Address dst
, Address src
);
4738 void EmitARCMoveWeak(Address dst
, Address src
);
4739 llvm::Value
*EmitARCRetainAutorelease(QualType type
, llvm::Value
*value
);
4740 llvm::Value
*EmitARCRetainAutoreleaseNonBlock(llvm::Value
*value
);
4741 llvm::Value
*EmitARCStoreStrong(LValue lvalue
, llvm::Value
*value
,
4742 bool resultIgnored
);
4743 llvm::Value
*EmitARCStoreStrongCall(Address addr
, llvm::Value
*value
,
4744 bool resultIgnored
);
4745 llvm::Value
*EmitARCRetain(QualType type
, llvm::Value
*value
);
4746 llvm::Value
*EmitARCRetainNonBlock(llvm::Value
*value
);
4747 llvm::Value
*EmitARCRetainBlock(llvm::Value
*value
, bool mandatory
);
4748 void EmitARCDestroyStrong(Address addr
, ARCPreciseLifetime_t precise
);
4749 void EmitARCRelease(llvm::Value
*value
, ARCPreciseLifetime_t precise
);
4750 llvm::Value
*EmitARCAutorelease(llvm::Value
*value
);
4751 llvm::Value
*EmitARCAutoreleaseReturnValue(llvm::Value
*value
);
4752 llvm::Value
*EmitARCRetainAutoreleaseReturnValue(llvm::Value
*value
);
4753 llvm::Value
*EmitARCRetainAutoreleasedReturnValue(llvm::Value
*value
);
4754 llvm::Value
*EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value
*value
);
4756 llvm::Value
*EmitObjCAutorelease(llvm::Value
*value
, llvm::Type
*returnType
);
4757 llvm::Value
*EmitObjCRetainNonBlock(llvm::Value
*value
,
4758 llvm::Type
*returnType
);
4759 void EmitObjCRelease(llvm::Value
*value
, ARCPreciseLifetime_t precise
);
4761 std::pair
<LValue
,llvm::Value
*>
4762 EmitARCStoreAutoreleasing(const BinaryOperator
*e
);
4763 std::pair
<LValue
,llvm::Value
*>
4764 EmitARCStoreStrong(const BinaryOperator
*e
, bool ignored
);
4765 std::pair
<LValue
,llvm::Value
*>
4766 EmitARCStoreUnsafeUnretained(const BinaryOperator
*e
, bool ignored
);
4768 llvm::Value
*EmitObjCAlloc(llvm::Value
*value
,
4769 llvm::Type
*returnType
);
4770 llvm::Value
*EmitObjCAllocWithZone(llvm::Value
*value
,
4771 llvm::Type
*returnType
);
4772 llvm::Value
*EmitObjCAllocInit(llvm::Value
*value
, llvm::Type
*resultType
);
4774 llvm::Value
*EmitObjCThrowOperand(const Expr
*expr
);
4775 llvm::Value
*EmitObjCConsumeObject(QualType T
, llvm::Value
*Ptr
);
4776 llvm::Value
*EmitObjCExtendObjectLifetime(QualType T
, llvm::Value
*Ptr
);
4778 llvm::Value
*EmitARCExtendBlockObject(const Expr
*expr
);
4779 llvm::Value
*EmitARCReclaimReturnedObject(const Expr
*e
,
4780 bool allowUnsafeClaim
);
4781 llvm::Value
*EmitARCRetainScalarExpr(const Expr
*expr
);
4782 llvm::Value
*EmitARCRetainAutoreleaseScalarExpr(const Expr
*expr
);
4783 llvm::Value
*EmitARCUnsafeUnretainedScalarExpr(const Expr
*expr
);
4785 void EmitARCIntrinsicUse(ArrayRef
<llvm::Value
*> values
);
4787 void EmitARCNoopIntrinsicUse(ArrayRef
<llvm::Value
*> values
);
4789 static Destroyer destroyARCStrongImprecise
;
4790 static Destroyer destroyARCStrongPrecise
;
4791 static Destroyer destroyARCWeak
;
4792 static Destroyer emitARCIntrinsicUse
;
4793 static Destroyer destroyNonTrivialCStruct
;
4795 void EmitObjCAutoreleasePoolPop(llvm::Value
*Ptr
);
4796 llvm::Value
*EmitObjCAutoreleasePoolPush();
4797 llvm::Value
*EmitObjCMRRAutoreleasePoolPush();
4798 void EmitObjCAutoreleasePoolCleanup(llvm::Value
*Ptr
);
4799 void EmitObjCMRRAutoreleasePoolPop(llvm::Value
*Ptr
);
4801 /// Emits a reference binding to the passed in expression.
4802 RValue
EmitReferenceBindingToExpr(const Expr
*E
);
4804 //===--------------------------------------------------------------------===//
4805 // Expression Emission
4806 //===--------------------------------------------------------------------===//
4808 // Expressions are broken into three classes: scalar, complex, aggregate.
4810 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
4811 /// scalar type, returning the result.
4812 llvm::Value
*EmitScalarExpr(const Expr
*E
, bool IgnoreResultAssign
= false);
4814 /// Emit a conversion from the specified type to the specified destination
4815 /// type, both of which are LLVM scalar types.
4816 llvm::Value
*EmitScalarConversion(llvm::Value
*Src
, QualType SrcTy
,
4817 QualType DstTy
, SourceLocation Loc
);
4819 /// Emit a conversion from the specified complex type to the specified
4820 /// destination type, where the destination type is an LLVM scalar type.
4821 llvm::Value
*EmitComplexToScalarConversion(ComplexPairTy Src
, QualType SrcTy
,
4823 SourceLocation Loc
);
4825 /// EmitAggExpr - Emit the computation of the specified expression
4826 /// of aggregate type. The result is computed into the given slot,
4827 /// which may be null to indicate that the value is not needed.
4828 void EmitAggExpr(const Expr
*E
, AggValueSlot AS
);
4830 /// EmitAggExprToLValue - Emit the computation of the specified expression of
4831 /// aggregate type into a temporary LValue.
4832 LValue
EmitAggExprToLValue(const Expr
*E
);
4834 enum ExprValueKind
{ EVK_RValue
, EVK_NonRValue
};
4836 /// EmitAggFinalDestCopy - Emit copy of the specified aggregate into
4837 /// destination address.
4838 void EmitAggFinalDestCopy(QualType Type
, AggValueSlot Dest
, const LValue
&Src
,
4839 ExprValueKind SrcKind
);
4841 /// Create a store to \arg DstPtr from \arg Src, truncating the stored value
4842 /// to at most \arg DstSize bytes.
4843 void CreateCoercedStore(llvm::Value
*Src
, Address Dst
, llvm::TypeSize DstSize
,
4844 bool DstIsVolatile
);
4846 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
4847 /// make sure it survives garbage collection until this point.
4848 void EmitExtendGCLifetime(llvm::Value
*object
);
4850 /// EmitComplexExpr - Emit the computation of the specified expression of
4851 /// complex type, returning the result.
4852 ComplexPairTy
EmitComplexExpr(const Expr
*E
,
4853 bool IgnoreReal
= false,
4854 bool IgnoreImag
= false);
4856 /// EmitComplexExprIntoLValue - Emit the given expression of complex
4857 /// type and place its result into the specified l-value.
4858 void EmitComplexExprIntoLValue(const Expr
*E
, LValue dest
, bool isInit
);
4860 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
4861 void EmitStoreOfComplex(ComplexPairTy V
, LValue dest
, bool isInit
);
4863 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
4864 ComplexPairTy
EmitLoadOfComplex(LValue src
, SourceLocation loc
);
4866 ComplexPairTy
EmitPromotedComplexExpr(const Expr
*E
, QualType PromotionType
);
4867 llvm::Value
*EmitPromotedScalarExpr(const Expr
*E
, QualType PromotionType
);
4868 ComplexPairTy
EmitPromotedValue(ComplexPairTy result
, QualType PromotionType
);
4869 ComplexPairTy
EmitUnPromotedValue(ComplexPairTy result
, QualType PromotionType
);
4871 Address
emitAddrOfRealComponent(Address
complex, QualType complexType
);
4872 Address
emitAddrOfImagComponent(Address
complex, QualType complexType
);
4874 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
4875 /// global variable that has already been created for it. If the initializer
4876 /// has a different type than GV does, this may free GV and return a different
4877 /// one. Otherwise it just returns GV.
4878 llvm::GlobalVariable
*
4879 AddInitializerToStaticVarDecl(const VarDecl
&D
,
4880 llvm::GlobalVariable
*GV
);
4882 // Emit an @llvm.invariant.start call for the given memory region.
4883 void EmitInvariantStart(llvm::Constant
*Addr
, CharUnits Size
);
4885 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
4886 /// variable with global storage.
4887 void EmitCXXGlobalVarDeclInit(const VarDecl
&D
, llvm::GlobalVariable
*GV
,
4890 llvm::Constant
*createAtExitStub(const VarDecl
&VD
, llvm::FunctionCallee Dtor
,
4891 llvm::Constant
*Addr
);
4893 llvm::Function
*createTLSAtExitStub(const VarDecl
&VD
,
4894 llvm::FunctionCallee Dtor
,
4895 llvm::Constant
*Addr
,
4896 llvm::FunctionCallee
&AtExit
);
4898 /// Call atexit() with a function that passes the given argument to
4899 /// the given function.
4900 void registerGlobalDtorWithAtExit(const VarDecl
&D
, llvm::FunctionCallee fn
,
4901 llvm::Constant
*addr
);
4903 /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
4904 /// support an 'atexit()' function.
4905 void registerGlobalDtorWithLLVM(const VarDecl
&D
, llvm::FunctionCallee fn
,
4906 llvm::Constant
*addr
);
4908 /// Call atexit() with function dtorStub.
4909 void registerGlobalDtorWithAtExit(llvm::Constant
*dtorStub
);
4911 /// Call unatexit() with function dtorStub.
4912 llvm::Value
*unregisterGlobalDtorWithUnAtExit(llvm::Constant
*dtorStub
);
4914 /// Emit code in this function to perform a guarded variable
4915 /// initialization. Guarded initializations are used when it's not
4916 /// possible to prove that an initialization will be done exactly
4917 /// once, e.g. with a static local variable or a static data member
4918 /// of a class template.
4919 void EmitCXXGuardedInit(const VarDecl
&D
, llvm::GlobalVariable
*DeclPtr
,
4922 enum class GuardKind
{ VariableGuard
, TlsGuard
};
4924 /// Emit a branch to select whether or not to perform guarded initialization.
4925 void EmitCXXGuardedInitBranch(llvm::Value
*NeedsInit
,
4926 llvm::BasicBlock
*InitBlock
,
4927 llvm::BasicBlock
*NoInitBlock
,
4928 GuardKind Kind
, const VarDecl
*D
);
4930 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
4933 GenerateCXXGlobalInitFunc(llvm::Function
*Fn
,
4934 ArrayRef
<llvm::Function
*> CXXThreadLocals
,
4935 ConstantAddress Guard
= ConstantAddress::invalid());
4937 /// GenerateCXXGlobalCleanUpFunc - Generates code for cleaning up global
4939 void GenerateCXXGlobalCleanUpFunc(
4941 ArrayRef
<std::tuple
<llvm::FunctionType
*, llvm::WeakTrackingVH
,
4943 DtorsOrStermFinalizers
);
4945 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function
*Fn
,
4947 llvm::GlobalVariable
*Addr
,
4950 void EmitCXXConstructExpr(const CXXConstructExpr
*E
, AggValueSlot Dest
);
4952 void EmitSynthesizedCXXCopyCtor(Address Dest
, Address Src
, const Expr
*Exp
);
4954 void EmitCXXThrowExpr(const CXXThrowExpr
*E
, bool KeepInsertionPoint
= true);
4956 RValue
EmitAtomicExpr(AtomicExpr
*E
);
4958 //===--------------------------------------------------------------------===//
4959 // Annotations Emission
4960 //===--------------------------------------------------------------------===//
4962 /// Emit an annotation call (intrinsic).
4963 llvm::Value
*EmitAnnotationCall(llvm::Function
*AnnotationFn
,
4964 llvm::Value
*AnnotatedVal
,
4965 StringRef AnnotationStr
,
4966 SourceLocation Location
,
4967 const AnnotateAttr
*Attr
);
4969 /// Emit local annotations for the local variable V, declared by D.
4970 void EmitVarAnnotations(const VarDecl
*D
, llvm::Value
*V
);
4972 /// Emit field annotations for the given field & value. Returns the
4973 /// annotation result.
4974 Address
EmitFieldAnnotations(const FieldDecl
*D
, Address V
);
4976 //===--------------------------------------------------------------------===//
4978 //===--------------------------------------------------------------------===//
4980 /// ContainsLabel - Return true if the statement contains a label in it. If
4981 /// this statement is not executed normally, it not containing a label means
4982 /// that we can just remove the code.
4983 static bool ContainsLabel(const Stmt
*S
, bool IgnoreCaseStmts
= false);
4985 /// containsBreak - Return true if the statement contains a break out of it.
4986 /// If the statement (recursively) contains a switch or loop with a break
4987 /// inside of it, this is fine.
4988 static bool containsBreak(const Stmt
*S
);
4990 /// Determine if the given statement might introduce a declaration into the
4991 /// current scope, by being a (possibly-labelled) DeclStmt.
4992 static bool mightAddDeclToScope(const Stmt
*S
);
4994 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
4995 /// to a constant, or if it does but contains a label, return false. If it
4996 /// constant folds return true and set the boolean result in Result.
4997 bool ConstantFoldsToSimpleInteger(const Expr
*Cond
, bool &Result
,
4998 bool AllowLabels
= false);
5000 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
5001 /// to a constant, or if it does but contains a label, return false. If it
5002 /// constant folds return true and set the folded value.
5003 bool ConstantFoldsToSimpleInteger(const Expr
*Cond
, llvm::APSInt
&Result
,
5004 bool AllowLabels
= false);
5006 /// Ignore parentheses and logical-NOT to track conditions consistently.
5007 static const Expr
*stripCond(const Expr
*C
);
5009 /// isInstrumentedCondition - Determine whether the given condition is an
5010 /// instrumentable condition (i.e. no "&&" or "||").
5011 static bool isInstrumentedCondition(const Expr
*C
);
5013 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
5014 /// increments a profile counter based on the semantics of the given logical
5015 /// operator opcode. This is used to instrument branch condition coverage
5016 /// for logical operators.
5017 void EmitBranchToCounterBlock(const Expr
*Cond
, BinaryOperator::Opcode LOp
,
5018 llvm::BasicBlock
*TrueBlock
,
5019 llvm::BasicBlock
*FalseBlock
,
5020 uint64_t TrueCount
= 0,
5021 Stmt::Likelihood LH
= Stmt::LH_None
,
5022 const Expr
*CntrIdx
= nullptr);
5024 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
5025 /// if statement) to the specified blocks. Based on the condition, this might
5026 /// try to simplify the codegen of the conditional based on the branch.
5027 /// TrueCount should be the number of times we expect the condition to
5028 /// evaluate to true based on PGO data.
5029 void EmitBranchOnBoolExpr(const Expr
*Cond
, llvm::BasicBlock
*TrueBlock
,
5030 llvm::BasicBlock
*FalseBlock
, uint64_t TrueCount
,
5031 Stmt::Likelihood LH
= Stmt::LH_None
,
5032 const Expr
*ConditionalOp
= nullptr);
5034 /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
5035 /// nonnull, if \p LHS is marked _Nonnull.
5036 void EmitNullabilityCheck(LValue LHS
, llvm::Value
*RHS
, SourceLocation Loc
);
5038 /// An enumeration which makes it easier to specify whether or not an
5039 /// operation is a subtraction.
5040 enum { NotSubtraction
= false, IsSubtraction
= true };
5042 /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
5043 /// detect undefined behavior when the pointer overflow sanitizer is enabled.
5044 /// \p SignedIndices indicates whether any of the GEP indices are signed.
5045 /// \p IsSubtraction indicates whether the expression used to form the GEP
5046 /// is a subtraction.
5047 llvm::Value
*EmitCheckedInBoundsGEP(llvm::Type
*ElemTy
, llvm::Value
*Ptr
,
5048 ArrayRef
<llvm::Value
*> IdxList
,
5052 const Twine
&Name
= "");
5054 Address
EmitCheckedInBoundsGEP(Address Addr
, ArrayRef
<llvm::Value
*> IdxList
,
5055 llvm::Type
*elementType
, bool SignedIndices
,
5056 bool IsSubtraction
, SourceLocation Loc
,
5057 CharUnits Align
, const Twine
&Name
= "");
5059 /// Specifies which type of sanitizer check to apply when handling a
5060 /// particular builtin.
5061 enum BuiltinCheckKind
{
5066 /// Emits an argument for a call to a builtin. If the builtin sanitizer is
5067 /// enabled, a runtime check specified by \p Kind is also emitted.
5068 llvm::Value
*EmitCheckedArgForBuiltin(const Expr
*E
, BuiltinCheckKind Kind
);
5070 /// Emit a description of a type in a format suitable for passing to
5071 /// a runtime sanitizer handler.
5072 llvm::Constant
*EmitCheckTypeDescriptor(QualType T
);
5074 /// Convert a value into a format suitable for passing to a runtime
5075 /// sanitizer handler.
5076 llvm::Value
*EmitCheckValue(llvm::Value
*V
);
5078 /// Emit a description of a source location in a format suitable for
5079 /// passing to a runtime sanitizer handler.
5080 llvm::Constant
*EmitCheckSourceLocation(SourceLocation Loc
);
5082 void EmitKCFIOperandBundle(const CGCallee
&Callee
,
5083 SmallVectorImpl
<llvm::OperandBundleDef
> &Bundles
);
5085 /// Create a basic block that will either trap or call a handler function in
5086 /// the UBSan runtime with the provided arguments, and create a conditional
5088 void EmitCheck(ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
5089 SanitizerHandler Check
, ArrayRef
<llvm::Constant
*> StaticArgs
,
5090 ArrayRef
<llvm::Value
*> DynamicArgs
);
5092 /// Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
5093 /// if Cond if false.
5094 void EmitCfiSlowPathCheck(SanitizerMask Kind
, llvm::Value
*Cond
,
5095 llvm::ConstantInt
*TypeId
, llvm::Value
*Ptr
,
5096 ArrayRef
<llvm::Constant
*> StaticArgs
);
5098 /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime
5099 /// checking is enabled. Otherwise, just emit an unreachable instruction.
5100 void EmitUnreachable(SourceLocation Loc
);
5102 /// Create a basic block that will call the trap intrinsic, and emit a
5103 /// conditional branch to it, for the -ftrapv checks.
5104 void EmitTrapCheck(llvm::Value
*Checked
, SanitizerHandler CheckHandlerID
);
5106 /// Emit a call to trap or debugtrap and attach function attribute
5107 /// "trap-func-name" if specified.
5108 llvm::CallInst
*EmitTrapCall(llvm::Intrinsic::ID IntrID
);
5110 /// Emit a stub for the cross-DSO CFI check function.
5111 void EmitCfiCheckStub();
5113 /// Emit a cross-DSO CFI failure handling function.
5114 void EmitCfiCheckFail();
5116 /// Create a check for a function parameter that may potentially be
5117 /// declared as non-null.
5118 void EmitNonNullArgCheck(RValue RV
, QualType ArgType
, SourceLocation ArgLoc
,
5119 AbstractCallee AC
, unsigned ParmNum
);
5121 void EmitNonNullArgCheck(Address Addr
, QualType ArgType
,
5122 SourceLocation ArgLoc
, AbstractCallee AC
,
5125 /// EmitCallArg - Emit a single call argument.
5126 void EmitCallArg(CallArgList
&args
, const Expr
*E
, QualType ArgType
);
5128 /// EmitDelegateCallArg - We are performing a delegate call; that
5129 /// is, the current function is delegating to another one. Produce
5130 /// a r-value suitable for passing the given parameter.
5131 void EmitDelegateCallArg(CallArgList
&args
, const VarDecl
*param
,
5132 SourceLocation loc
);
5134 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
5135 /// point operation, expressed as the maximum relative error in ulp.
5136 void SetFPAccuracy(llvm::Value
*Val
, float Accuracy
);
5138 /// Set the minimum required accuracy of the given sqrt operation
5139 /// based on CodeGenOpts.
5140 void SetSqrtFPAccuracy(llvm::Value
*Val
);
5142 /// Set the minimum required accuracy of the given sqrt operation based on
5144 void SetDivFPAccuracy(llvm::Value
*Val
);
5146 /// Set the codegen fast-math flags.
5147 void SetFastMathFlags(FPOptions FPFeatures
);
5149 // Truncate or extend a boolean vector to the requested number of elements.
5150 llvm::Value
*emitBoolVecConversion(llvm::Value
*SrcVec
,
5151 unsigned NumElementsDst
,
5152 const llvm::Twine
&Name
= "");
5153 // Adds a convergence_ctrl token to |Input| and emits the required parent
5154 // convergence instructions.
5155 template <typename CallType
>
5156 CallType
*addControlledConvergenceToken(CallType
*Input
) {
5157 return cast
<CallType
>(
5158 addConvergenceControlToken(Input
, ConvergenceTokenStack
.back()));
5162 // Emits a convergence_loop instruction for the given |BB|, with |ParentToken|
5163 // as it's parent convergence instr.
5164 llvm::IntrinsicInst
*emitConvergenceLoopToken(llvm::BasicBlock
*BB
,
5165 llvm::Value
*ParentToken
);
5166 // Adds a convergence_ctrl token with |ParentToken| as parent convergence
5167 // instr to the call |Input|.
5168 llvm::CallBase
*addConvergenceControlToken(llvm::CallBase
*Input
,
5169 llvm::Value
*ParentToken
);
5170 // Find the convergence_entry instruction |F|, or emits ones if none exists.
5171 // Returns the convergence instruction.
5172 llvm::IntrinsicInst
*getOrEmitConvergenceEntryToken(llvm::Function
*F
);
5173 // Find the convergence_loop instruction for the loop defined by |LI|, or
5174 // emits one if none exists. Returns the convergence instruction.
5175 llvm::IntrinsicInst
*getOrEmitConvergenceLoopToken(const LoopInfo
*LI
);
5178 llvm::MDNode
*getRangeForLoadFromType(QualType Ty
);
5179 void EmitReturnOfRValue(RValue RV
, QualType Ty
);
5181 void deferPlaceholderReplacement(llvm::Instruction
*Old
, llvm::Value
*New
);
5183 llvm::SmallVector
<std::pair
<llvm::WeakTrackingVH
, llvm::Value
*>, 4>
5184 DeferredReplacements
;
5186 /// Set the address of a local variable.
5187 void setAddrOfLocalVar(const VarDecl
*VD
, Address Addr
) {
5188 assert(!LocalDeclMap
.count(VD
) && "Decl already exists in LocalDeclMap!");
5189 LocalDeclMap
.insert({VD
, Addr
});
5192 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
5193 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
5195 /// \param AI - The first function argument of the expansion.
5196 void ExpandTypeFromArgs(QualType Ty
, LValue Dst
,
5197 llvm::Function::arg_iterator
&AI
);
5199 /// ExpandTypeToArgs - Expand an CallArg \arg Arg, with the LLVM type for \arg
5200 /// Ty, into individual arguments on the provided vector \arg IRCallArgs,
5201 /// starting at index \arg IRCallArgPos. See ABIArgInfo::Expand.
5202 void ExpandTypeToArgs(QualType Ty
, CallArg Arg
, llvm::FunctionType
*IRFuncTy
,
5203 SmallVectorImpl
<llvm::Value
*> &IRCallArgs
,
5204 unsigned &IRCallArgPos
);
5206 std::pair
<llvm::Value
*, llvm::Type
*>
5207 EmitAsmInput(const TargetInfo::ConstraintInfo
&Info
, const Expr
*InputExpr
,
5208 std::string
&ConstraintStr
);
5210 std::pair
<llvm::Value
*, llvm::Type
*>
5211 EmitAsmInputLValue(const TargetInfo::ConstraintInfo
&Info
, LValue InputValue
,
5212 QualType InputType
, std::string
&ConstraintStr
,
5213 SourceLocation Loc
);
5215 /// Attempts to statically evaluate the object size of E. If that
5216 /// fails, emits code to figure the size of E out for us. This is
5217 /// pass_object_size aware.
5219 /// If EmittedExpr is non-null, this will use that instead of re-emitting E.
5220 llvm::Value
*evaluateOrEmitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
5221 llvm::IntegerType
*ResType
,
5222 llvm::Value
*EmittedE
,
5225 /// Emits the size of E, as required by __builtin_object_size. This
5226 /// function is aware of pass_object_size parameters, and will act accordingly
5227 /// if E is a parameter with the pass_object_size attribute.
5228 llvm::Value
*emitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
5229 llvm::IntegerType
*ResType
,
5230 llvm::Value
*EmittedE
,
5233 llvm::Value
*emitFlexibleArrayMemberSize(const Expr
*E
, unsigned Type
,
5234 llvm::IntegerType
*ResType
);
5236 void emitZeroOrPatternForAutoVarInit(QualType type
, const VarDecl
&D
,
5240 enum class EvaluationOrder
{
5241 ///! No language constraints on evaluation order.
5243 ///! Language semantics require left-to-right evaluation.
5245 ///! Language semantics require right-to-left evaluation.
5249 // Wrapper for function prototype sources. Wraps either a FunctionProtoType or
5250 // an ObjCMethodDecl.
5251 struct PrototypeWrapper
{
5252 llvm::PointerUnion
<const FunctionProtoType
*, const ObjCMethodDecl
*> P
;
5254 PrototypeWrapper(const FunctionProtoType
*FT
) : P(FT
) {}
5255 PrototypeWrapper(const ObjCMethodDecl
*MD
) : P(MD
) {}
5258 void EmitCallArgs(CallArgList
&Args
, PrototypeWrapper Prototype
,
5259 llvm::iterator_range
<CallExpr::const_arg_iterator
> ArgRange
,
5260 AbstractCallee AC
= AbstractCallee(),
5261 unsigned ParamsToSkip
= 0,
5262 EvaluationOrder Order
= EvaluationOrder::Default
);
5264 /// EmitPointerWithAlignment - Given an expression with a pointer type,
5265 /// emit the value and compute our best estimate of the alignment of the
5268 /// \param BaseInfo - If non-null, this will be initialized with
5269 /// information about the source of the alignment and the may-alias
5270 /// attribute. Note that this function will conservatively fall back on
5271 /// the type when it doesn't recognize the expression and may-alias will
5272 /// be set to false.
5274 /// One reasonable way to use this information is when there's a language
5275 /// guarantee that the pointer must be aligned to some stricter value, and
5276 /// we're simply trying to ensure that sufficiently obvious uses of under-
5277 /// aligned objects don't get miscompiled; for example, a placement new
5278 /// into the address of a local variable. In such a case, it's quite
5279 /// reasonable to just ignore the returned alignment when it isn't from an
5280 /// explicit source.
5282 EmitPointerWithAlignment(const Expr
*Addr
, LValueBaseInfo
*BaseInfo
= nullptr,
5283 TBAAAccessInfo
*TBAAInfo
= nullptr,
5284 KnownNonNull_t IsKnownNonNull
= NotKnownNonNull
);
5286 /// If \p E references a parameter with pass_object_size info or a constant
5287 /// array size modifier, emit the object size divided by the size of \p EltTy.
5288 /// Otherwise return null.
5289 llvm::Value
*LoadPassedObjectSize(const Expr
*E
, QualType EltTy
);
5291 void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK
);
5293 struct MultiVersionResolverOption
{
5294 llvm::Function
*Function
;
5296 StringRef Architecture
;
5297 llvm::SmallVector
<StringRef
, 8> Features
;
5299 Conds(StringRef Arch
, ArrayRef
<StringRef
> Feats
)
5300 : Architecture(Arch
), Features(Feats
.begin(), Feats
.end()) {}
5303 MultiVersionResolverOption(llvm::Function
*F
, StringRef Arch
,
5304 ArrayRef
<StringRef
> Feats
)
5305 : Function(F
), Conditions(Arch
, Feats
) {}
5308 // Emits the body of a multiversion function's resolver. Assumes that the
5309 // options are already sorted in the proper order, with the 'default' option
5310 // last (if it exists).
5311 void EmitMultiVersionResolver(llvm::Function
*Resolver
,
5312 ArrayRef
<MultiVersionResolverOption
> Options
);
5314 EmitX86MultiVersionResolver(llvm::Function
*Resolver
,
5315 ArrayRef
<MultiVersionResolverOption
> Options
);
5317 EmitAArch64MultiVersionResolver(llvm::Function
*Resolver
,
5318 ArrayRef
<MultiVersionResolverOption
> Options
);
5321 QualType
getVarArgType(const Expr
*Arg
);
5323 void EmitDeclMetadata();
5325 BlockByrefHelpers
*buildByrefHelpers(llvm::StructType
&byrefType
,
5326 const AutoVarEmission
&emission
);
5328 void AddObjCARCExceptionMetadata(llvm::Instruction
*Inst
);
5330 llvm::Value
*GetValueForARMHint(unsigned BuiltinID
);
5331 llvm::Value
*EmitX86CpuIs(const CallExpr
*E
);
5332 llvm::Value
*EmitX86CpuIs(StringRef CPUStr
);
5333 llvm::Value
*EmitX86CpuSupports(const CallExpr
*E
);
5334 llvm::Value
*EmitX86CpuSupports(ArrayRef
<StringRef
> FeatureStrs
);
5335 llvm::Value
*EmitX86CpuSupports(std::array
<uint32_t, 4> FeatureMask
);
5336 llvm::Value
*EmitX86CpuInit();
5337 llvm::Value
*FormX86ResolverCondition(const MultiVersionResolverOption
&RO
);
5338 llvm::Value
*EmitAArch64CpuInit();
5340 FormAArch64ResolverCondition(const MultiVersionResolverOption
&RO
);
5341 llvm::Value
*EmitAArch64CpuSupports(const CallExpr
*E
);
5342 llvm::Value
*EmitAArch64CpuSupports(ArrayRef
<StringRef
> FeatureStrs
);
5345 inline DominatingLLVMValue::saved_type
5346 DominatingLLVMValue::save(CodeGenFunction
&CGF
, llvm::Value
*value
) {
5347 if (!needsSaving(value
)) return saved_type(value
, false);
5349 // Otherwise, we need an alloca.
5350 auto align
= CharUnits::fromQuantity(
5351 CGF
.CGM
.getDataLayout().getPrefTypeAlign(value
->getType()));
5353 CGF
.CreateTempAlloca(value
->getType(), align
, "cond-cleanup.save");
5354 CGF
.Builder
.CreateStore(value
, alloca
);
5356 return saved_type(alloca
.emitRawPointer(CGF
), true);
5359 inline llvm::Value
*DominatingLLVMValue::restore(CodeGenFunction
&CGF
,
5361 // If the value says it wasn't saved, trust that it's still dominating.
5362 if (!value
.getInt()) return value
.getPointer();
5364 // Otherwise, it should be an alloca instruction, as set up in save().
5365 auto alloca
= cast
<llvm::AllocaInst
>(value
.getPointer());
5366 return CGF
.Builder
.CreateAlignedLoad(alloca
->getAllocatedType(), alloca
,
5367 alloca
->getAlign());
5370 } // end namespace CodeGen
5372 // Map the LangOption for floating point exception behavior into
5373 // the corresponding enum in the IR.
5374 llvm::fp::ExceptionBehavior
5375 ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind
);
5376 } // end namespace clang