1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code dealing with code generation of C++ expressions
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
24 using namespace clang
;
25 using namespace CodeGen
;
28 struct MemberCallInfo
{
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction
&CGF
, GlobalDecl GD
,
37 llvm::Value
*This
, llvm::Value
*ImplicitParam
,
38 QualType ImplicitParamTy
, const CallExpr
*CE
,
39 CallArgList
&Args
, CallArgList
*RtlArgs
) {
40 auto *MD
= cast
<CXXMethodDecl
>(GD
.getDecl());
42 assert(CE
== nullptr || isa
<CXXMemberCallExpr
>(CE
) ||
43 isa
<CXXOperatorCallExpr
>(CE
));
44 assert(MD
->isImplicitObjectMemberFunction() &&
45 "Trying to emit a member or operator call expr on a static method!");
48 const CXXRecordDecl
*RD
=
49 CGF
.CGM
.getCXXABI().getThisArgumentTypeForMethod(GD
);
50 Args
.add(RValue::get(This
), CGF
.getTypes().DeriveThisType(RD
, MD
));
52 // If there is an implicit parameter (e.g. VTT), emit it.
54 Args
.add(RValue::get(ImplicitParam
), ImplicitParamTy
);
57 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
58 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, Args
.size());
59 unsigned PrefixSize
= Args
.size() - 1;
61 // And the rest of the call args.
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args
.addFrom(*RtlArgs
);
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip
= 0;
70 if (const auto *Op
= dyn_cast
<CXXOperatorCallExpr
>(CE
)) {
71 if (const auto *M
= dyn_cast
<CXXMethodDecl
>(Op
->getCalleeDecl()))
73 static_cast<unsigned>(!M
->isExplicitObjectMemberFunction());
75 CGF
.EmitCallArgs(Args
, FPT
, drop_begin(CE
->arguments(), ArgsToSkip
),
76 CE
->getDirectCallee());
79 FPT
->getNumParams() == 0 &&
80 "No CallExpr specified for function with non-zero number of arguments");
82 return {required
, PrefixSize
};
85 RValue
CodeGenFunction::EmitCXXMemberOrOperatorCall(
86 const CXXMethodDecl
*MD
, const CGCallee
&Callee
,
87 ReturnValueSlot ReturnValue
,
88 llvm::Value
*This
, llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
,
89 const CallExpr
*CE
, CallArgList
*RtlArgs
) {
90 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
92 MemberCallInfo CallInfo
= commonEmitCXXMemberOrOperatorCall(
93 *this, MD
, This
, ImplicitParam
, ImplicitParamTy
, CE
, Args
, RtlArgs
);
94 auto &FnInfo
= CGM
.getTypes().arrangeCXXMethodCall(
95 Args
, FPT
, CallInfo
.ReqArgs
, CallInfo
.PrefixSize
);
96 return EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, nullptr,
97 CE
&& CE
== MustTailCall
,
98 CE
? CE
->getExprLoc() : SourceLocation());
101 RValue
CodeGenFunction::EmitCXXDestructorCall(
102 GlobalDecl Dtor
, const CGCallee
&Callee
, llvm::Value
*This
, QualType ThisTy
,
103 llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
, const CallExpr
*CE
) {
104 const CXXMethodDecl
*DtorDecl
= cast
<CXXMethodDecl
>(Dtor
.getDecl());
106 assert(!ThisTy
.isNull());
107 assert(ThisTy
->getAsCXXRecordDecl() == DtorDecl
->getParent() &&
108 "Pointer/Object mixup");
110 LangAS SrcAS
= ThisTy
.getAddressSpace();
111 LangAS DstAS
= DtorDecl
->getMethodQualifiers().getAddressSpace();
112 if (SrcAS
!= DstAS
) {
113 QualType DstTy
= DtorDecl
->getThisType();
114 llvm::Type
*NewType
= CGM
.getTypes().ConvertType(DstTy
);
115 This
= getTargetHooks().performAddrSpaceCast(*this, This
, SrcAS
, DstAS
,
120 commonEmitCXXMemberOrOperatorCall(*this, Dtor
, This
, ImplicitParam
,
121 ImplicitParamTy
, CE
, Args
, nullptr);
122 return EmitCall(CGM
.getTypes().arrangeCXXStructorDeclaration(Dtor
), Callee
,
123 ReturnValueSlot(), Args
, nullptr, CE
&& CE
== MustTailCall
,
124 CE
? CE
->getExprLoc() : SourceLocation
{});
127 RValue
CodeGenFunction::EmitCXXPseudoDestructorExpr(
128 const CXXPseudoDestructorExpr
*E
) {
129 QualType DestroyedType
= E
->getDestroyedType();
130 if (DestroyedType
.hasStrongOrWeakObjCLifetime()) {
131 // Automatic Reference Counting:
132 // If the pseudo-expression names a retainable object with weak or
133 // strong lifetime, the object shall be released.
134 Expr
*BaseExpr
= E
->getBase();
135 Address BaseValue
= Address::invalid();
136 Qualifiers BaseQuals
;
138 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
140 BaseValue
= EmitPointerWithAlignment(BaseExpr
);
141 const auto *PTy
= BaseExpr
->getType()->castAs
<PointerType
>();
142 BaseQuals
= PTy
->getPointeeType().getQualifiers();
144 LValue BaseLV
= EmitLValue(BaseExpr
);
145 BaseValue
= BaseLV
.getAddress();
146 QualType BaseTy
= BaseExpr
->getType();
147 BaseQuals
= BaseTy
.getQualifiers();
150 switch (DestroyedType
.getObjCLifetime()) {
151 case Qualifiers::OCL_None
:
152 case Qualifiers::OCL_ExplicitNone
:
153 case Qualifiers::OCL_Autoreleasing
:
156 case Qualifiers::OCL_Strong
:
157 EmitARCRelease(Builder
.CreateLoad(BaseValue
,
158 DestroyedType
.isVolatileQualified()),
162 case Qualifiers::OCL_Weak
:
163 EmitARCDestroyWeak(BaseValue
);
167 // C++ [expr.pseudo]p1:
168 // The result shall only be used as the operand for the function call
169 // operator (), and the result of such a call has type void. The only
170 // effect is the evaluation of the postfix-expression before the dot or
172 EmitIgnoredExpr(E
->getBase());
175 return RValue::get(nullptr);
178 static CXXRecordDecl
*getCXXRecord(const Expr
*E
) {
179 QualType T
= E
->getType();
180 if (const PointerType
*PTy
= T
->getAs
<PointerType
>())
181 T
= PTy
->getPointeeType();
182 const RecordType
*Ty
= T
->castAs
<RecordType
>();
183 return cast
<CXXRecordDecl
>(Ty
->getDecl());
186 // Note: This function also emit constructor calls to support a MSVC
187 // extensions allowing explicit constructor function call.
188 RValue
CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr
*CE
,
189 ReturnValueSlot ReturnValue
) {
190 const Expr
*callee
= CE
->getCallee()->IgnoreParens();
192 if (isa
<BinaryOperator
>(callee
))
193 return EmitCXXMemberPointerCallExpr(CE
, ReturnValue
);
195 const MemberExpr
*ME
= cast
<MemberExpr
>(callee
);
196 const CXXMethodDecl
*MD
= cast
<CXXMethodDecl
>(ME
->getMemberDecl());
198 if (MD
->isStatic()) {
199 // The method is static, emit it as we would a regular call.
201 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
), GlobalDecl(MD
));
202 return EmitCall(getContext().getPointerType(MD
->getType()), callee
, CE
,
206 bool HasQualifier
= ME
->hasQualifier();
207 NestedNameSpecifier
*Qualifier
= HasQualifier
? ME
->getQualifier() : nullptr;
208 bool IsArrow
= ME
->isArrow();
209 const Expr
*Base
= ME
->getBase();
211 return EmitCXXMemberOrOperatorMemberCallExpr(
212 CE
, MD
, ReturnValue
, HasQualifier
, Qualifier
, IsArrow
, Base
);
215 RValue
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
216 const CallExpr
*CE
, const CXXMethodDecl
*MD
, ReturnValueSlot ReturnValue
,
217 bool HasQualifier
, NestedNameSpecifier
*Qualifier
, bool IsArrow
,
219 assert(isa
<CXXMemberCallExpr
>(CE
) || isa
<CXXOperatorCallExpr
>(CE
));
221 // Compute the object pointer.
222 bool CanUseVirtualCall
= MD
->isVirtual() && !HasQualifier
;
224 const CXXMethodDecl
*DevirtualizedMethod
= nullptr;
225 if (CanUseVirtualCall
&&
226 MD
->getDevirtualizedMethod(Base
, getLangOpts().AppleKext
)) {
227 const CXXRecordDecl
*BestDynamicDecl
= Base
->getBestDynamicClassType();
228 DevirtualizedMethod
= MD
->getCorrespondingMethodInClass(BestDynamicDecl
);
229 assert(DevirtualizedMethod
);
230 const CXXRecordDecl
*DevirtualizedClass
= DevirtualizedMethod
->getParent();
231 const Expr
*Inner
= Base
->IgnoreParenBaseCasts();
232 if (DevirtualizedMethod
->getReturnType().getCanonicalType() !=
233 MD
->getReturnType().getCanonicalType())
234 // If the return types are not the same, this might be a case where more
235 // code needs to run to compensate for it. For example, the derived
236 // method might return a type that inherits form from the return
237 // type of MD and has a prefix.
238 // For now we just avoid devirtualizing these covariant cases.
239 DevirtualizedMethod
= nullptr;
240 else if (getCXXRecord(Inner
) == DevirtualizedClass
)
241 // If the class of the Inner expression is where the dynamic method
242 // is defined, build the this pointer from it.
244 else if (getCXXRecord(Base
) != DevirtualizedClass
) {
245 // If the method is defined in a class that is not the best dynamic
246 // one or the one of the full expression, we would have to build
247 // a derived-to-base cast to compute the correct this pointer, but
248 // we don't have support for that yet, so do a virtual call.
249 DevirtualizedMethod
= nullptr;
253 bool TrivialForCodegen
=
254 MD
->isTrivial() || (MD
->isDefaulted() && MD
->getParent()->isUnion());
255 bool TrivialAssignment
=
257 (MD
->isCopyAssignmentOperator() || MD
->isMoveAssignmentOperator()) &&
258 !MD
->getParent()->mayInsertExtraPadding();
260 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
261 // operator before the LHS.
262 CallArgList RtlArgStorage
;
263 CallArgList
*RtlArgs
= nullptr;
264 LValue TrivialAssignmentRHS
;
265 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(CE
)) {
266 if (OCE
->isAssignmentOp()) {
267 if (TrivialAssignment
) {
268 TrivialAssignmentRHS
= EmitLValue(CE
->getArg(1));
270 RtlArgs
= &RtlArgStorage
;
271 EmitCallArgs(*RtlArgs
, MD
->getType()->castAs
<FunctionProtoType
>(),
272 drop_begin(CE
->arguments(), 1), CE
->getDirectCallee(),
273 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft
);
280 LValueBaseInfo BaseInfo
;
281 TBAAAccessInfo TBAAInfo
;
282 Address ThisValue
= EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
283 This
= MakeAddrLValue(ThisValue
, Base
->getType()->getPointeeType(),
286 This
= EmitLValue(Base
);
289 if (const CXXConstructorDecl
*Ctor
= dyn_cast
<CXXConstructorDecl
>(MD
)) {
290 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
291 // constructing a new complete object of type Ctor.
293 assert(ReturnValue
.isNull() && "Constructor shouldn't have return value");
295 commonEmitCXXMemberOrOperatorCall(
296 *this, {Ctor
, Ctor_Complete
}, This
.getPointer(*this),
297 /*ImplicitParam=*/nullptr,
298 /*ImplicitParamTy=*/QualType(), CE
, Args
, nullptr);
300 EmitCXXConstructorCall(Ctor
, Ctor_Complete
, /*ForVirtualBase=*/false,
301 /*Delegating=*/false, This
.getAddress(), Args
,
302 AggValueSlot::DoesNotOverlap
, CE
->getExprLoc(),
303 /*NewPointerIsChecked=*/false);
304 return RValue::get(nullptr);
307 if (TrivialForCodegen
) {
308 if (isa
<CXXDestructorDecl
>(MD
))
309 return RValue::get(nullptr);
311 if (TrivialAssignment
) {
312 // We don't like to generate the trivial copy/move assignment operator
313 // when it isn't necessary; just produce the proper effect here.
314 // It's important that we use the result of EmitLValue here rather than
315 // emitting call arguments, in order to preserve TBAA information from
317 LValue RHS
= isa
<CXXOperatorCallExpr
>(CE
)
318 ? TrivialAssignmentRHS
319 : EmitLValue(*CE
->arg_begin());
320 EmitAggregateAssign(This
, RHS
, CE
->getType());
321 return RValue::get(This
.getPointer(*this));
324 assert(MD
->getParent()->mayInsertExtraPadding() &&
325 "unknown trivial member function");
328 // Compute the function type we're calling.
329 const CXXMethodDecl
*CalleeDecl
=
330 DevirtualizedMethod
? DevirtualizedMethod
: MD
;
331 const CGFunctionInfo
*FInfo
= nullptr;
332 if (const auto *Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
))
333 FInfo
= &CGM
.getTypes().arrangeCXXStructorDeclaration(
334 GlobalDecl(Dtor
, Dtor_Complete
));
336 FInfo
= &CGM
.getTypes().arrangeCXXMethodDeclaration(CalleeDecl
);
338 llvm::FunctionType
*Ty
= CGM
.getTypes().GetFunctionType(*FInfo
);
340 // C++11 [class.mfct.non-static]p2:
341 // If a non-static member function of a class X is called for an object that
342 // is not of type X, or of a type derived from X, the behavior is undefined.
343 SourceLocation CallLoc
;
344 ASTContext
&C
= getContext();
346 CallLoc
= CE
->getExprLoc();
348 SanitizerSet SkippedChecks
;
349 if (const auto *CMCE
= dyn_cast
<CXXMemberCallExpr
>(CE
)) {
350 auto *IOA
= CMCE
->getImplicitObjectArgument();
351 bool IsImplicitObjectCXXThis
= IsWrappedCXXThis(IOA
);
352 if (IsImplicitObjectCXXThis
)
353 SkippedChecks
.set(SanitizerKind::Alignment
, true);
354 if (IsImplicitObjectCXXThis
|| isa
<DeclRefExpr
>(IOA
))
355 SkippedChecks
.set(SanitizerKind::Null
, true);
358 if (sanitizePerformTypeCheck())
359 EmitTypeCheck(CodeGenFunction::TCK_MemberCall
, CallLoc
,
360 This
.emitRawPointer(*this),
361 C
.getRecordType(CalleeDecl
->getParent()),
362 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
364 // C++ [class.virtual]p12:
365 // Explicit qualification with the scope operator (5.1) suppresses the
366 // virtual call mechanism.
368 // We also don't emit a virtual call if the base expression has a record type
369 // because then we know what the type is.
370 bool UseVirtualCall
= CanUseVirtualCall
&& !DevirtualizedMethod
;
372 if (const CXXDestructorDecl
*Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
)) {
373 assert(CE
->arg_begin() == CE
->arg_end() &&
374 "Destructor shouldn't have explicit parameters");
375 assert(ReturnValue
.isNull() && "Destructor shouldn't have return value");
376 if (UseVirtualCall
) {
377 CGM
.getCXXABI().EmitVirtualDestructorCall(*this, Dtor
, Dtor_Complete
,
379 cast
<CXXMemberCallExpr
>(CE
));
381 GlobalDecl
GD(Dtor
, Dtor_Complete
);
383 if (getLangOpts().AppleKext
&& Dtor
->isVirtual() && HasQualifier
)
384 Callee
= BuildAppleKextVirtualCall(Dtor
, Qualifier
, Ty
);
385 else if (!DevirtualizedMethod
)
387 CGCallee::forDirect(CGM
.getAddrOfCXXStructor(GD
, FInfo
, Ty
), GD
);
389 Callee
= CGCallee::forDirect(CGM
.GetAddrOfFunction(GD
, Ty
), GD
);
393 IsArrow
? Base
->getType()->getPointeeType() : Base
->getType();
394 EmitCXXDestructorCall(GD
, Callee
, This
.getPointer(*this), ThisTy
,
395 /*ImplicitParam=*/nullptr,
396 /*ImplicitParamTy=*/QualType(), CE
);
398 return RValue::get(nullptr);
401 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
402 // 'CalleeDecl' instead.
405 if (UseVirtualCall
) {
406 Callee
= CGCallee::forVirtual(CE
, MD
, This
.getAddress(), Ty
);
408 if (SanOpts
.has(SanitizerKind::CFINVCall
) &&
409 MD
->getParent()->isDynamicClass()) {
411 const CXXRecordDecl
*RD
;
412 std::tie(VTable
, RD
) = CGM
.getCXXABI().LoadVTablePtr(
413 *this, This
.getAddress(), CalleeDecl
->getParent());
414 EmitVTablePtrCheckForCall(RD
, VTable
, CFITCK_NVCall
, CE
->getBeginLoc());
417 if (getLangOpts().AppleKext
&& MD
->isVirtual() && HasQualifier
)
418 Callee
= BuildAppleKextVirtualCall(MD
, Qualifier
, Ty
);
419 else if (!DevirtualizedMethod
)
421 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
, Ty
), GlobalDecl(MD
));
424 CGCallee::forDirect(CGM
.GetAddrOfFunction(DevirtualizedMethod
, Ty
),
425 GlobalDecl(DevirtualizedMethod
));
429 if (MD
->isVirtual()) {
430 Address NewThisAddr
=
431 CGM
.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
432 *this, CalleeDecl
, This
.getAddress(), UseVirtualCall
);
433 This
.setAddress(NewThisAddr
);
436 return EmitCXXMemberOrOperatorCall(
437 CalleeDecl
, Callee
, ReturnValue
, This
.getPointer(*this),
438 /*ImplicitParam=*/nullptr, QualType(), CE
, RtlArgs
);
442 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr
*E
,
443 ReturnValueSlot ReturnValue
) {
444 const BinaryOperator
*BO
=
445 cast
<BinaryOperator
>(E
->getCallee()->IgnoreParens());
446 const Expr
*BaseExpr
= BO
->getLHS();
447 const Expr
*MemFnExpr
= BO
->getRHS();
449 const auto *MPT
= MemFnExpr
->getType()->castAs
<MemberPointerType
>();
450 const auto *FPT
= MPT
->getPointeeType()->castAs
<FunctionProtoType
>();
452 cast
<CXXRecordDecl
>(MPT
->getClass()->castAs
<RecordType
>()->getDecl());
454 // Emit the 'this' pointer.
455 Address This
= Address::invalid();
456 if (BO
->getOpcode() == BO_PtrMemI
)
457 This
= EmitPointerWithAlignment(BaseExpr
, nullptr, nullptr, KnownNonNull
);
459 This
= EmitLValue(BaseExpr
, KnownNonNull
).getAddress();
461 EmitTypeCheck(TCK_MemberCall
, E
->getExprLoc(), This
.emitRawPointer(*this),
462 QualType(MPT
->getClass(), 0));
464 // Get the member function pointer.
465 llvm::Value
*MemFnPtr
= EmitScalarExpr(MemFnExpr
);
467 // Ask the ABI to load the callee. Note that This is modified.
468 llvm::Value
*ThisPtrForCall
= nullptr;
470 CGM
.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO
, This
,
471 ThisPtrForCall
, MemFnPtr
, MPT
);
476 getContext().getPointerType(getContext().getTagDeclType(RD
));
478 // Push the this ptr.
479 Args
.add(RValue::get(ThisPtrForCall
), ThisType
);
481 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, 1);
483 // And the rest of the call args
484 EmitCallArgs(Args
, FPT
, E
->arguments());
485 return EmitCall(CGM
.getTypes().arrangeCXXMethodCall(Args
, FPT
, required
,
487 Callee
, ReturnValue
, Args
, nullptr, E
== MustTailCall
,
492 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr
*E
,
493 const CXXMethodDecl
*MD
,
494 ReturnValueSlot ReturnValue
) {
495 assert(MD
->isImplicitObjectMemberFunction() &&
496 "Trying to emit a member call expr on a static method!");
497 return EmitCXXMemberOrOperatorMemberCallExpr(
498 E
, MD
, ReturnValue
, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
499 /*IsArrow=*/false, E
->getArg(0));
502 RValue
CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr
*E
,
503 ReturnValueSlot ReturnValue
) {
504 return CGM
.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E
, ReturnValue
);
507 static void EmitNullBaseClassInitialization(CodeGenFunction
&CGF
,
509 const CXXRecordDecl
*Base
) {
513 DestPtr
= DestPtr
.withElementType(CGF
.Int8Ty
);
515 const ASTRecordLayout
&Layout
= CGF
.getContext().getASTRecordLayout(Base
);
516 CharUnits NVSize
= Layout
.getNonVirtualSize();
518 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
519 // present, they are initialized by the most derived class before calling the
521 SmallVector
<std::pair
<CharUnits
, CharUnits
>, 1> Stores
;
522 Stores
.emplace_back(CharUnits::Zero(), NVSize
);
524 // Each store is split by the existence of a vbptr.
525 CharUnits VBPtrWidth
= CGF
.getPointerSize();
526 std::vector
<CharUnits
> VBPtrOffsets
=
527 CGF
.CGM
.getCXXABI().getVBPtrOffsets(Base
);
528 for (CharUnits VBPtrOffset
: VBPtrOffsets
) {
529 // Stop before we hit any virtual base pointers located in virtual bases.
530 if (VBPtrOffset
>= NVSize
)
532 std::pair
<CharUnits
, CharUnits
> LastStore
= Stores
.pop_back_val();
533 CharUnits LastStoreOffset
= LastStore
.first
;
534 CharUnits LastStoreSize
= LastStore
.second
;
536 CharUnits SplitBeforeOffset
= LastStoreOffset
;
537 CharUnits SplitBeforeSize
= VBPtrOffset
- SplitBeforeOffset
;
538 assert(!SplitBeforeSize
.isNegative() && "negative store size!");
539 if (!SplitBeforeSize
.isZero())
540 Stores
.emplace_back(SplitBeforeOffset
, SplitBeforeSize
);
542 CharUnits SplitAfterOffset
= VBPtrOffset
+ VBPtrWidth
;
543 CharUnits SplitAfterSize
= LastStoreSize
- SplitAfterOffset
;
544 assert(!SplitAfterSize
.isNegative() && "negative store size!");
545 if (!SplitAfterSize
.isZero())
546 Stores
.emplace_back(SplitAfterOffset
, SplitAfterSize
);
549 // If the type contains a pointer to data member we can't memset it to zero.
550 // Instead, create a null constant and copy it to the destination.
551 // TODO: there are other patterns besides zero that we can usefully memset,
552 // like -1, which happens to be the pattern used by member-pointers.
553 // TODO: isZeroInitializable can be over-conservative in the case where a
554 // virtual base contains a member pointer.
555 llvm::Constant
*NullConstantForBase
= CGF
.CGM
.EmitNullConstantForBase(Base
);
556 if (!NullConstantForBase
->isNullValue()) {
557 llvm::GlobalVariable
*NullVariable
= new llvm::GlobalVariable(
558 CGF
.CGM
.getModule(), NullConstantForBase
->getType(),
559 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
,
560 NullConstantForBase
, Twine());
563 std::max(Layout
.getNonVirtualAlignment(), DestPtr
.getAlignment());
564 NullVariable
->setAlignment(Align
.getAsAlign());
566 Address
SrcPtr(NullVariable
, CGF
.Int8Ty
, Align
);
568 // Get and call the appropriate llvm.memcpy overload.
569 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
570 CharUnits StoreOffset
= Store
.first
;
571 CharUnits StoreSize
= Store
.second
;
572 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
573 CGF
.Builder
.CreateMemCpy(
574 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
575 CGF
.Builder
.CreateConstInBoundsByteGEP(SrcPtr
, StoreOffset
),
579 // Otherwise, just memset the whole thing to zero. This is legal
580 // because in LLVM, all default initializers (other than the ones we just
581 // handled above) are guaranteed to have a bit pattern of all zeros.
583 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
584 CharUnits StoreOffset
= Store
.first
;
585 CharUnits StoreSize
= Store
.second
;
586 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
587 CGF
.Builder
.CreateMemSet(
588 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
589 CGF
.Builder
.getInt8(0), StoreSizeVal
);
595 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr
*E
,
597 assert(!Dest
.isIgnored() && "Must have a destination!");
598 const CXXConstructorDecl
*CD
= E
->getConstructor();
600 // If we require zero initialization before (or instead of) calling the
601 // constructor, as can be the case with a non-user-provided default
602 // constructor, emit the zero initialization now, unless destination is
604 if (E
->requiresZeroInitialization() && !Dest
.isZeroed()) {
605 switch (E
->getConstructionKind()) {
606 case CXXConstructionKind::Delegating
:
607 case CXXConstructionKind::Complete
:
608 EmitNullInitialization(Dest
.getAddress(), E
->getType());
610 case CXXConstructionKind::VirtualBase
:
611 case CXXConstructionKind::NonVirtualBase
:
612 EmitNullBaseClassInitialization(*this, Dest
.getAddress(),
618 // If this is a call to a trivial default constructor, do nothing.
619 if (CD
->isTrivial() && CD
->isDefaultConstructor())
622 // Elide the constructor if we're constructing from a temporary.
623 if (getLangOpts().ElideConstructors
&& E
->isElidable()) {
624 // FIXME: This only handles the simplest case, where the source object
625 // is passed directly as the first argument to the constructor.
626 // This should also handle stepping though implicit casts and
627 // conversion sequences which involve two steps, with a
628 // conversion operator followed by a converting constructor.
629 const Expr
*SrcObj
= E
->getArg(0);
630 assert(SrcObj
->isTemporaryObject(getContext(), CD
->getParent()));
632 getContext().hasSameUnqualifiedType(E
->getType(), SrcObj
->getType()));
633 EmitAggExpr(SrcObj
, Dest
);
637 if (const ArrayType
*arrayType
638 = getContext().getAsArrayType(E
->getType())) {
639 EmitCXXAggrConstructorCall(CD
, arrayType
, Dest
.getAddress(), E
,
640 Dest
.isSanitizerChecked());
642 CXXCtorType Type
= Ctor_Complete
;
643 bool ForVirtualBase
= false;
644 bool Delegating
= false;
646 switch (E
->getConstructionKind()) {
647 case CXXConstructionKind::Delegating
:
648 // We should be emitting a constructor; GlobalDecl will assert this
649 Type
= CurGD
.getCtorType();
653 case CXXConstructionKind::Complete
:
654 Type
= Ctor_Complete
;
657 case CXXConstructionKind::VirtualBase
:
658 ForVirtualBase
= true;
661 case CXXConstructionKind::NonVirtualBase
:
665 // Call the constructor.
666 EmitCXXConstructorCall(CD
, Type
, ForVirtualBase
, Delegating
, Dest
, E
);
670 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest
, Address Src
,
672 if (const ExprWithCleanups
*E
= dyn_cast
<ExprWithCleanups
>(Exp
))
673 Exp
= E
->getSubExpr();
674 assert(isa
<CXXConstructExpr
>(Exp
) &&
675 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
676 const CXXConstructExpr
* E
= cast
<CXXConstructExpr
>(Exp
);
677 const CXXConstructorDecl
*CD
= E
->getConstructor();
678 RunCleanupsScope
Scope(*this);
680 // If we require zero initialization before (or instead of) calling the
681 // constructor, as can be the case with a non-user-provided default
682 // constructor, emit the zero initialization now.
683 // FIXME. Do I still need this for a copy ctor synthesis?
684 if (E
->requiresZeroInitialization())
685 EmitNullInitialization(Dest
, E
->getType());
687 assert(!getContext().getAsConstantArrayType(E
->getType())
688 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
689 EmitSynthesizedCXXCopyCtorCall(CD
, Dest
, Src
, E
);
692 static CharUnits
CalculateCookiePadding(CodeGenFunction
&CGF
,
693 const CXXNewExpr
*E
) {
695 return CharUnits::Zero();
697 // No cookie is required if the operator new[] being used is the
698 // reserved placement operator new[].
699 if (E
->getOperatorNew()->isReservedGlobalPlacementOperator())
700 return CharUnits::Zero();
702 return CGF
.CGM
.getCXXABI().GetArrayCookieSize(E
);
705 static llvm::Value
*EmitCXXNewAllocSize(CodeGenFunction
&CGF
,
707 unsigned minElements
,
708 llvm::Value
*&numElements
,
709 llvm::Value
*&sizeWithoutCookie
) {
710 QualType type
= e
->getAllocatedType();
713 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
715 = llvm::ConstantInt::get(CGF
.SizeTy
, typeSize
.getQuantity());
716 return sizeWithoutCookie
;
719 // The width of size_t.
720 unsigned sizeWidth
= CGF
.SizeTy
->getBitWidth();
722 // Figure out the cookie size.
723 llvm::APInt
cookieSize(sizeWidth
,
724 CalculateCookiePadding(CGF
, e
).getQuantity());
726 // Emit the array size expression.
727 // We multiply the size of all dimensions for NumElements.
728 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
730 ConstantEmitter(CGF
).tryEmitAbstract(*e
->getArraySize(), e
->getType());
732 numElements
= CGF
.EmitScalarExpr(*e
->getArraySize());
733 assert(isa
<llvm::IntegerType
>(numElements
->getType()));
735 // The number of elements can be have an arbitrary integer type;
736 // essentially, we need to multiply it by a constant factor, add a
737 // cookie size, and verify that the result is representable as a
738 // size_t. That's just a gloss, though, and it's wrong in one
739 // important way: if the count is negative, it's an error even if
740 // the cookie size would bring the total size >= 0.
742 = (*e
->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
743 llvm::IntegerType
*numElementsType
744 = cast
<llvm::IntegerType
>(numElements
->getType());
745 unsigned numElementsWidth
= numElementsType
->getBitWidth();
747 // Compute the constant factor.
748 llvm::APInt
arraySizeMultiplier(sizeWidth
, 1);
749 while (const ConstantArrayType
*CAT
750 = CGF
.getContext().getAsConstantArrayType(type
)) {
751 type
= CAT
->getElementType();
752 arraySizeMultiplier
*= CAT
->getSize();
755 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
756 llvm::APInt
typeSizeMultiplier(sizeWidth
, typeSize
.getQuantity());
757 typeSizeMultiplier
*= arraySizeMultiplier
;
759 // This will be a size_t.
762 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
763 // Don't bloat the -O0 code.
764 if (llvm::ConstantInt
*numElementsC
=
765 dyn_cast
<llvm::ConstantInt
>(numElements
)) {
766 const llvm::APInt
&count
= numElementsC
->getValue();
768 bool hasAnyOverflow
= false;
770 // If 'count' was a negative number, it's an overflow.
771 if (isSigned
&& count
.isNegative())
772 hasAnyOverflow
= true;
774 // We want to do all this arithmetic in size_t. If numElements is
775 // wider than that, check whether it's already too big, and if so,
777 else if (numElementsWidth
> sizeWidth
&&
778 numElementsWidth
- sizeWidth
> count
.countl_zero())
779 hasAnyOverflow
= true;
781 // Okay, compute a count at the right width.
782 llvm::APInt adjustedCount
= count
.zextOrTrunc(sizeWidth
);
784 // If there is a brace-initializer, we cannot allocate fewer elements than
785 // there are initializers. If we do, that's treated like an overflow.
786 if (adjustedCount
.ult(minElements
))
787 hasAnyOverflow
= true;
789 // Scale numElements by that. This might overflow, but we don't
790 // care because it only overflows if allocationSize does, too, and
791 // if that overflows then we shouldn't use this.
792 numElements
= llvm::ConstantInt::get(CGF
.SizeTy
,
793 adjustedCount
* arraySizeMultiplier
);
795 // Compute the size before cookie, and track whether it overflowed.
797 llvm::APInt allocationSize
798 = adjustedCount
.umul_ov(typeSizeMultiplier
, overflow
);
799 hasAnyOverflow
|= overflow
;
801 // Add in the cookie, and check whether it's overflowed.
802 if (cookieSize
!= 0) {
803 // Save the current size without a cookie. This shouldn't be
804 // used if there was overflow.
805 sizeWithoutCookie
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
807 allocationSize
= allocationSize
.uadd_ov(cookieSize
, overflow
);
808 hasAnyOverflow
|= overflow
;
811 // On overflow, produce a -1 so operator new will fail.
812 if (hasAnyOverflow
) {
813 size
= llvm::Constant::getAllOnesValue(CGF
.SizeTy
);
815 size
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
818 // Otherwise, we might need to use the overflow intrinsics.
820 // There are up to five conditions we need to test for:
821 // 1) if isSigned, we need to check whether numElements is negative;
822 // 2) if numElementsWidth > sizeWidth, we need to check whether
823 // numElements is larger than something representable in size_t;
824 // 3) if minElements > 0, we need to check whether numElements is smaller
826 // 4) we need to compute
827 // sizeWithoutCookie := numElements * typeSizeMultiplier
828 // and check whether it overflows; and
829 // 5) if we need a cookie, we need to compute
830 // size := sizeWithoutCookie + cookieSize
831 // and check whether it overflows.
833 llvm::Value
*hasOverflow
= nullptr;
835 // If numElementsWidth > sizeWidth, then one way or another, we're
836 // going to have to do a comparison for (2), and this happens to
837 // take care of (1), too.
838 if (numElementsWidth
> sizeWidth
) {
839 llvm::APInt threshold
=
840 llvm::APInt::getOneBitSet(numElementsWidth
, sizeWidth
);
842 llvm::Value
*thresholdV
843 = llvm::ConstantInt::get(numElementsType
, threshold
);
845 hasOverflow
= CGF
.Builder
.CreateICmpUGE(numElements
, thresholdV
);
846 numElements
= CGF
.Builder
.CreateTrunc(numElements
, CGF
.SizeTy
);
848 // Otherwise, if we're signed, we want to sext up to size_t.
849 } else if (isSigned
) {
850 if (numElementsWidth
< sizeWidth
)
851 numElements
= CGF
.Builder
.CreateSExt(numElements
, CGF
.SizeTy
);
853 // If there's a non-1 type size multiplier, then we can do the
854 // signedness check at the same time as we do the multiply
855 // because a negative number times anything will cause an
856 // unsigned overflow. Otherwise, we have to do it here. But at least
857 // in this case, we can subsume the >= minElements check.
858 if (typeSizeMultiplier
== 1)
859 hasOverflow
= CGF
.Builder
.CreateICmpSLT(numElements
,
860 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
862 // Otherwise, zext up to size_t if necessary.
863 } else if (numElementsWidth
< sizeWidth
) {
864 numElements
= CGF
.Builder
.CreateZExt(numElements
, CGF
.SizeTy
);
867 assert(numElements
->getType() == CGF
.SizeTy
);
870 // Don't allow allocation of fewer elements than we have initializers.
872 hasOverflow
= CGF
.Builder
.CreateICmpULT(numElements
,
873 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
874 } else if (numElementsWidth
> sizeWidth
) {
875 // The other existing overflow subsumes this check.
876 // We do an unsigned comparison, since any signed value < -1 is
877 // taken care of either above or below.
878 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
,
879 CGF
.Builder
.CreateICmpULT(numElements
,
880 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
)));
886 // Multiply by the type size if necessary. This multiplier
887 // includes all the factors for nested arrays.
889 // This step also causes numElements to be scaled up by the
890 // nested-array factor if necessary. Overflow on this computation
891 // can be ignored because the result shouldn't be used if
893 if (typeSizeMultiplier
!= 1) {
894 llvm::Function
*umul_with_overflow
895 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::umul_with_overflow
, CGF
.SizeTy
);
898 llvm::ConstantInt::get(CGF
.SizeTy
, typeSizeMultiplier
);
899 llvm::Value
*result
=
900 CGF
.Builder
.CreateCall(umul_with_overflow
, {size
, tsmV
});
902 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
904 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
906 hasOverflow
= overflowed
;
908 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
910 // Also scale up numElements by the array size multiplier.
911 if (arraySizeMultiplier
!= 1) {
912 // If the base element type size is 1, then we can re-use the
913 // multiply we just did.
914 if (typeSize
.isOne()) {
915 assert(arraySizeMultiplier
== typeSizeMultiplier
);
918 // Otherwise we need a separate multiply.
921 llvm::ConstantInt::get(CGF
.SizeTy
, arraySizeMultiplier
);
922 numElements
= CGF
.Builder
.CreateMul(numElements
, asmV
);
926 // numElements doesn't need to be scaled.
927 assert(arraySizeMultiplier
== 1);
930 // Add in the cookie size if necessary.
931 if (cookieSize
!= 0) {
932 sizeWithoutCookie
= size
;
934 llvm::Function
*uadd_with_overflow
935 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::uadd_with_overflow
, CGF
.SizeTy
);
937 llvm::Value
*cookieSizeV
= llvm::ConstantInt::get(CGF
.SizeTy
, cookieSize
);
938 llvm::Value
*result
=
939 CGF
.Builder
.CreateCall(uadd_with_overflow
, {size
, cookieSizeV
});
941 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
943 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
945 hasOverflow
= overflowed
;
947 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
950 // If we had any possibility of dynamic overflow, make a select to
951 // overwrite 'size' with an all-ones value, which should cause
952 // operator new to throw.
954 size
= CGF
.Builder
.CreateSelect(hasOverflow
,
955 llvm::Constant::getAllOnesValue(CGF
.SizeTy
),
960 sizeWithoutCookie
= size
;
962 assert(sizeWithoutCookie
&& "didn't set sizeWithoutCookie?");
967 static void StoreAnyExprIntoOneUnit(CodeGenFunction
&CGF
, const Expr
*Init
,
968 QualType AllocType
, Address NewPtr
,
969 AggValueSlot::Overlap_t MayOverlap
) {
970 // FIXME: Refactor with EmitExprAsInit.
971 switch (CGF
.getEvaluationKind(AllocType
)) {
973 CGF
.EmitScalarInit(Init
, nullptr,
974 CGF
.MakeAddrLValue(NewPtr
, AllocType
), false);
977 CGF
.EmitComplexExprIntoLValue(Init
, CGF
.MakeAddrLValue(NewPtr
, AllocType
),
980 case TEK_Aggregate
: {
982 = AggValueSlot::forAddr(NewPtr
, AllocType
.getQualifiers(),
983 AggValueSlot::IsDestructed
,
984 AggValueSlot::DoesNotNeedGCBarriers
,
985 AggValueSlot::IsNotAliased
,
986 MayOverlap
, AggValueSlot::IsNotZeroed
,
987 AggValueSlot::IsSanitizerChecked
);
988 CGF
.EmitAggExpr(Init
, Slot
);
992 llvm_unreachable("bad evaluation kind");
995 void CodeGenFunction::EmitNewArrayInitializer(
996 const CXXNewExpr
*E
, QualType ElementType
, llvm::Type
*ElementTy
,
997 Address BeginPtr
, llvm::Value
*NumElements
,
998 llvm::Value
*AllocSizeWithoutCookie
) {
999 // If we have a type with trivial initialization and no initializer,
1000 // there's nothing to do.
1001 if (!E
->hasInitializer())
1004 Address CurPtr
= BeginPtr
;
1006 unsigned InitListElements
= 0;
1008 const Expr
*Init
= E
->getInitializer();
1009 Address EndOfInit
= Address::invalid();
1010 QualType::DestructionKind DtorKind
= ElementType
.isDestructedType();
1011 CleanupDeactivationScope
deactivation(*this);
1012 bool pushedCleanup
= false;
1014 CharUnits ElementSize
= getContext().getTypeSizeInChars(ElementType
);
1015 CharUnits ElementAlign
=
1016 BeginPtr
.getAlignment().alignmentOfArrayElement(ElementSize
);
1018 // Attempt to perform zero-initialization using memset.
1019 auto TryMemsetInitialization
= [&]() -> bool {
1020 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1021 // we can initialize with a memset to -1.
1022 if (!CGM
.getTypes().isZeroInitializable(ElementType
))
1025 // Optimization: since zero initialization will just set the memory
1026 // to all zeroes, generate a single memset to do it in one shot.
1028 // Subtract out the size of any elements we've already initialized.
1029 auto *RemainingSize
= AllocSizeWithoutCookie
;
1030 if (InitListElements
) {
1031 // We know this can't overflow; we check this when doing the allocation.
1032 auto *InitializedSize
= llvm::ConstantInt::get(
1033 RemainingSize
->getType(),
1034 getContext().getTypeSizeInChars(ElementType
).getQuantity() *
1036 RemainingSize
= Builder
.CreateSub(RemainingSize
, InitializedSize
);
1039 // Create the memset.
1040 Builder
.CreateMemSet(CurPtr
, Builder
.getInt8(0), RemainingSize
, false);
1044 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(Init
);
1045 const CXXParenListInitExpr
*CPLIE
= nullptr;
1046 const StringLiteral
*SL
= nullptr;
1047 const ObjCEncodeExpr
*OCEE
= nullptr;
1048 const Expr
*IgnoreParen
= nullptr;
1050 IgnoreParen
= Init
->IgnoreParenImpCasts();
1051 CPLIE
= dyn_cast
<CXXParenListInitExpr
>(IgnoreParen
);
1052 SL
= dyn_cast
<StringLiteral
>(IgnoreParen
);
1053 OCEE
= dyn_cast
<ObjCEncodeExpr
>(IgnoreParen
);
1056 // If the initializer is an initializer list, first do the explicit elements.
1057 if (ILE
|| CPLIE
|| SL
|| OCEE
) {
1058 // Initializing from a (braced) string literal is a special case; the init
1059 // list element does not initialize a (single) array element.
1060 if ((ILE
&& ILE
->isStringLiteralInit()) || SL
|| OCEE
) {
1063 // Initialize the initial portion of length equal to that of the string
1064 // literal. The allocation must be for at least this much; we emitted a
1065 // check for that earlier.
1067 AggValueSlot::forAddr(CurPtr
, ElementType
.getQualifiers(),
1068 AggValueSlot::IsDestructed
,
1069 AggValueSlot::DoesNotNeedGCBarriers
,
1070 AggValueSlot::IsNotAliased
,
1071 AggValueSlot::DoesNotOverlap
,
1072 AggValueSlot::IsNotZeroed
,
1073 AggValueSlot::IsSanitizerChecked
);
1074 EmitAggExpr(ILE
? ILE
->getInit(0) : Init
, Slot
);
1076 // Move past these elements.
1078 cast
<ConstantArrayType
>(Init
->getType()->getAsArrayTypeUnsafe())
1080 CurPtr
= Builder
.CreateConstInBoundsGEP(
1081 CurPtr
, InitListElements
, "string.init.end");
1083 // Zero out the rest, if any remain.
1084 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1085 if (!ConstNum
|| !ConstNum
->equalsInt(InitListElements
)) {
1086 bool OK
= TryMemsetInitialization();
1088 assert(OK
&& "couldn't memset character type?");
1093 ArrayRef
<const Expr
*> InitExprs
=
1094 ILE
? ILE
->inits() : CPLIE
->getInitExprs();
1095 InitListElements
= InitExprs
.size();
1097 // If this is a multi-dimensional array new, we will initialize multiple
1098 // elements with each init list element.
1099 QualType AllocType
= E
->getAllocatedType();
1100 if (const ConstantArrayType
*CAT
= dyn_cast_or_null
<ConstantArrayType
>(
1101 AllocType
->getAsArrayTypeUnsafe())) {
1102 ElementTy
= ConvertTypeForMem(AllocType
);
1103 CurPtr
= CurPtr
.withElementType(ElementTy
);
1104 InitListElements
*= getContext().getConstantArrayElementCount(CAT
);
1107 // Enter a partial-destruction Cleanup if necessary.
1109 AllocaTrackerRAII
AllocaTracker(*this);
1110 // In principle we could tell the Cleanup where we are more
1111 // directly, but the control flow can get so varied here that it
1112 // would actually be quite complex. Therefore we go through an
1114 llvm::Instruction
*DominatingIP
=
1115 Builder
.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy
));
1116 EndOfInit
= CreateTempAlloca(BeginPtr
.getType(), getPointerAlign(),
1118 pushIrregularPartialArrayCleanup(BeginPtr
.emitRawPointer(*this),
1119 EndOfInit
, ElementType
, ElementAlign
,
1120 getDestroyer(DtorKind
));
1121 cast
<EHCleanupScope
>(*EHStack
.find(EHStack
.stable_begin()))
1122 .AddAuxAllocas(AllocaTracker
.Take());
1123 DeferredDeactivationCleanupStack
.push_back(
1124 {EHStack
.stable_begin(), DominatingIP
});
1125 pushedCleanup
= true;
1128 CharUnits StartAlign
= CurPtr
.getAlignment();
1130 for (const Expr
*IE
: InitExprs
) {
1131 // Tell the cleanup that it needs to destroy up to this
1132 // element. TODO: some of these stores can be trivially
1133 // observed to be unnecessary.
1134 if (EndOfInit
.isValid()) {
1135 Builder
.CreateStore(CurPtr
.emitRawPointer(*this), EndOfInit
);
1137 // FIXME: If the last initializer is an incomplete initializer list for
1138 // an array, and we have an array filler, we can fold together the two
1139 // initialization loops.
1140 StoreAnyExprIntoOneUnit(*this, IE
, IE
->getType(), CurPtr
,
1141 AggValueSlot::DoesNotOverlap
);
1142 CurPtr
= Address(Builder
.CreateInBoundsGEP(CurPtr
.getElementType(),
1143 CurPtr
.emitRawPointer(*this),
1146 CurPtr
.getElementType(),
1147 StartAlign
.alignmentAtOffset((++i
) * ElementSize
));
1150 // The remaining elements are filled with the array filler expression.
1151 Init
= ILE
? ILE
->getArrayFiller() : CPLIE
->getArrayFiller();
1153 // Extract the initializer for the individual array elements by pulling
1154 // out the array filler from all the nested initializer lists. This avoids
1155 // generating a nested loop for the initialization.
1156 while (Init
&& Init
->getType()->isConstantArrayType()) {
1157 auto *SubILE
= dyn_cast
<InitListExpr
>(Init
);
1160 assert(SubILE
->getNumInits() == 0 && "explicit inits in array filler?");
1161 Init
= SubILE
->getArrayFiller();
1164 // Switch back to initializing one base element at a time.
1165 CurPtr
= CurPtr
.withElementType(BeginPtr
.getElementType());
1168 // If all elements have already been initialized, skip any further
1170 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1171 if (ConstNum
&& ConstNum
->getZExtValue() <= InitListElements
) {
1175 assert(Init
&& "have trailing elements to initialize but no initializer");
1177 // If this is a constructor call, try to optimize it out, and failing that
1178 // emit a single loop to initialize all remaining elements.
1179 if (const CXXConstructExpr
*CCE
= dyn_cast
<CXXConstructExpr
>(Init
)) {
1180 CXXConstructorDecl
*Ctor
= CCE
->getConstructor();
1181 if (Ctor
->isTrivial()) {
1182 // If new expression did not specify value-initialization, then there
1183 // is no initialization.
1184 if (!CCE
->requiresZeroInitialization() || Ctor
->getParent()->isEmpty())
1187 if (TryMemsetInitialization())
1191 // Store the new Cleanup position for irregular Cleanups.
1193 // FIXME: Share this cleanup with the constructor call emission rather than
1194 // having it create a cleanup of its own.
1195 if (EndOfInit
.isValid())
1196 Builder
.CreateStore(CurPtr
.emitRawPointer(*this), EndOfInit
);
1198 // Emit a constructor call loop to initialize the remaining elements.
1199 if (InitListElements
)
1200 NumElements
= Builder
.CreateSub(
1202 llvm::ConstantInt::get(NumElements
->getType(), InitListElements
));
1203 EmitCXXAggrConstructorCall(Ctor
, NumElements
, CurPtr
, CCE
,
1204 /*NewPointerIsChecked*/true,
1205 CCE
->requiresZeroInitialization());
1209 // If this is value-initialization, we can usually use memset.
1210 ImplicitValueInitExpr
IVIE(ElementType
);
1211 if (isa
<ImplicitValueInitExpr
>(Init
)) {
1212 if (TryMemsetInitialization())
1215 // Switch to an ImplicitValueInitExpr for the element type. This handles
1216 // only one case: multidimensional array new of pointers to members. In
1217 // all other cases, we already have an initializer for the array element.
1221 // At this point we should have found an initializer for the individual
1222 // elements of the array.
1223 assert(getContext().hasSameUnqualifiedType(ElementType
, Init
->getType()) &&
1224 "got wrong type of element to initialize");
1226 // If we have an empty initializer list, we can usually use memset.
1227 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
))
1228 if (ILE
->getNumInits() == 0 && TryMemsetInitialization())
1231 // If we have a struct whose every field is value-initialized, we can
1232 // usually use memset.
1233 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
)) {
1234 if (const RecordType
*RType
= ILE
->getType()->getAs
<RecordType
>()) {
1235 if (RType
->getDecl()->isStruct()) {
1236 unsigned NumElements
= 0;
1237 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RType
->getDecl()))
1238 NumElements
= CXXRD
->getNumBases();
1239 for (auto *Field
: RType
->getDecl()->fields())
1240 if (!Field
->isUnnamedBitField())
1242 // FIXME: Recurse into nested InitListExprs.
1243 if (ILE
->getNumInits() == NumElements
)
1244 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1245 if (!isa
<ImplicitValueInitExpr
>(ILE
->getInit(i
)))
1247 if (ILE
->getNumInits() == NumElements
&& TryMemsetInitialization())
1253 // Create the loop blocks.
1254 llvm::BasicBlock
*EntryBB
= Builder
.GetInsertBlock();
1255 llvm::BasicBlock
*LoopBB
= createBasicBlock("new.loop");
1256 llvm::BasicBlock
*ContBB
= createBasicBlock("new.loop.end");
1258 // Find the end of the array, hoisted out of the loop.
1259 llvm::Value
*EndPtr
= Builder
.CreateInBoundsGEP(
1260 BeginPtr
.getElementType(), BeginPtr
.emitRawPointer(*this), NumElements
,
1263 // If the number of elements isn't constant, we have to now check if there is
1264 // anything left to initialize.
1266 llvm::Value
*IsEmpty
= Builder
.CreateICmpEQ(CurPtr
.emitRawPointer(*this),
1267 EndPtr
, "array.isempty");
1268 Builder
.CreateCondBr(IsEmpty
, ContBB
, LoopBB
);
1274 // Set up the current-element phi.
1275 llvm::PHINode
*CurPtrPhi
=
1276 Builder
.CreatePHI(CurPtr
.getType(), 2, "array.cur");
1277 CurPtrPhi
->addIncoming(CurPtr
.emitRawPointer(*this), EntryBB
);
1279 CurPtr
= Address(CurPtrPhi
, CurPtr
.getElementType(), ElementAlign
);
1281 // Store the new Cleanup position for irregular Cleanups.
1282 if (EndOfInit
.isValid())
1283 Builder
.CreateStore(CurPtr
.emitRawPointer(*this), EndOfInit
);
1285 // Enter a partial-destruction Cleanup if necessary.
1286 if (!pushedCleanup
&& needsEHCleanup(DtorKind
)) {
1287 llvm::Instruction
*DominatingIP
=
1288 Builder
.CreateFlagLoad(llvm::ConstantInt::getNullValue(Int8PtrTy
));
1289 pushRegularPartialArrayCleanup(BeginPtr
.emitRawPointer(*this),
1290 CurPtr
.emitRawPointer(*this), ElementType
,
1291 ElementAlign
, getDestroyer(DtorKind
));
1292 DeferredDeactivationCleanupStack
.push_back(
1293 {EHStack
.stable_begin(), DominatingIP
});
1296 // Emit the initializer into this element.
1297 StoreAnyExprIntoOneUnit(*this, Init
, Init
->getType(), CurPtr
,
1298 AggValueSlot::DoesNotOverlap
);
1300 // Leave the Cleanup if we entered one.
1301 deactivation
.ForceDeactivate();
1303 // Advance to the next element by adjusting the pointer type as necessary.
1304 llvm::Value
*NextPtr
= Builder
.CreateConstInBoundsGEP1_32(
1305 ElementTy
, CurPtr
.emitRawPointer(*this), 1, "array.next");
1307 // Check whether we've gotten to the end of the array and, if so,
1309 llvm::Value
*IsEnd
= Builder
.CreateICmpEQ(NextPtr
, EndPtr
, "array.atend");
1310 Builder
.CreateCondBr(IsEnd
, ContBB
, LoopBB
);
1311 CurPtrPhi
->addIncoming(NextPtr
, Builder
.GetInsertBlock());
1316 static void EmitNewInitializer(CodeGenFunction
&CGF
, const CXXNewExpr
*E
,
1317 QualType ElementType
, llvm::Type
*ElementTy
,
1318 Address NewPtr
, llvm::Value
*NumElements
,
1319 llvm::Value
*AllocSizeWithoutCookie
) {
1320 ApplyDebugLocation
DL(CGF
, E
);
1322 CGF
.EmitNewArrayInitializer(E
, ElementType
, ElementTy
, NewPtr
, NumElements
,
1323 AllocSizeWithoutCookie
);
1324 else if (const Expr
*Init
= E
->getInitializer())
1325 StoreAnyExprIntoOneUnit(CGF
, Init
, E
->getAllocatedType(), NewPtr
,
1326 AggValueSlot::DoesNotOverlap
);
1329 /// Emit a call to an operator new or operator delete function, as implicitly
1330 /// created by new-expressions and delete-expressions.
1331 static RValue
EmitNewDeleteCall(CodeGenFunction
&CGF
,
1332 const FunctionDecl
*CalleeDecl
,
1333 const FunctionProtoType
*CalleeType
,
1334 const CallArgList
&Args
) {
1335 llvm::CallBase
*CallOrInvoke
;
1336 llvm::Constant
*CalleePtr
= CGF
.CGM
.GetAddrOfFunction(CalleeDecl
);
1337 CGCallee Callee
= CGCallee::forDirect(CalleePtr
, GlobalDecl(CalleeDecl
));
1339 CGF
.EmitCall(CGF
.CGM
.getTypes().arrangeFreeFunctionCall(
1340 Args
, CalleeType
, /*ChainCall=*/false),
1341 Callee
, ReturnValueSlot(), Args
, &CallOrInvoke
);
1343 /// C++1y [expr.new]p10:
1344 /// [In a new-expression,] an implementation is allowed to omit a call
1345 /// to a replaceable global allocation function.
1347 /// We model such elidable calls with the 'builtin' attribute.
1348 llvm::Function
*Fn
= dyn_cast
<llvm::Function
>(CalleePtr
);
1349 if (CalleeDecl
->isReplaceableGlobalAllocationFunction() &&
1350 Fn
&& Fn
->hasFnAttribute(llvm::Attribute::NoBuiltin
)) {
1351 CallOrInvoke
->addFnAttr(llvm::Attribute::Builtin
);
1357 RValue
CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType
*Type
,
1358 const CallExpr
*TheCall
,
1361 EmitCallArgs(Args
, Type
, TheCall
->arguments());
1362 // Find the allocation or deallocation function that we're calling.
1363 ASTContext
&Ctx
= getContext();
1364 DeclarationName Name
= Ctx
.DeclarationNames
1365 .getCXXOperatorName(IsDelete
? OO_Delete
: OO_New
);
1367 for (auto *Decl
: Ctx
.getTranslationUnitDecl()->lookup(Name
))
1368 if (auto *FD
= dyn_cast
<FunctionDecl
>(Decl
))
1369 if (Ctx
.hasSameType(FD
->getType(), QualType(Type
, 0)))
1370 return EmitNewDeleteCall(*this, FD
, Type
, Args
);
1371 llvm_unreachable("predeclared global operator new/delete is missing");
1375 /// The parameters to pass to a usual operator delete.
1376 struct UsualDeleteParams
{
1377 bool DestroyingDelete
= false;
1379 bool Alignment
= false;
1383 static UsualDeleteParams
getUsualDeleteParams(const FunctionDecl
*FD
) {
1384 UsualDeleteParams Params
;
1386 const FunctionProtoType
*FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
1387 auto AI
= FPT
->param_type_begin(), AE
= FPT
->param_type_end();
1389 // The first argument is always a void*.
1392 // The next parameter may be a std::destroying_delete_t.
1393 if (FD
->isDestroyingOperatorDelete()) {
1394 Params
.DestroyingDelete
= true;
1399 // Figure out what other parameters we should be implicitly passing.
1400 if (AI
!= AE
&& (*AI
)->isIntegerType()) {
1405 if (AI
!= AE
&& (*AI
)->isAlignValT()) {
1406 Params
.Alignment
= true;
1410 assert(AI
== AE
&& "unexpected usual deallocation function parameter");
1415 /// A cleanup to call the given 'operator delete' function upon abnormal
1416 /// exit from a new expression. Templated on a traits type that deals with
1417 /// ensuring that the arguments dominate the cleanup if necessary.
1418 template<typename Traits
>
1419 class CallDeleteDuringNew final
: public EHScopeStack::Cleanup
{
1420 /// Type used to hold llvm::Value*s.
1421 typedef typename
Traits::ValueTy ValueTy
;
1422 /// Type used to hold RValues.
1423 typedef typename
Traits::RValueTy RValueTy
;
1424 struct PlacementArg
{
1429 unsigned NumPlacementArgs
: 31;
1430 LLVM_PREFERRED_TYPE(bool)
1431 unsigned PassAlignmentToPlacementDelete
: 1;
1432 const FunctionDecl
*OperatorDelete
;
1435 CharUnits AllocAlign
;
1437 PlacementArg
*getPlacementArgs() {
1438 return reinterpret_cast<PlacementArg
*>(this + 1);
1442 static size_t getExtraSize(size_t NumPlacementArgs
) {
1443 return NumPlacementArgs
* sizeof(PlacementArg
);
1446 CallDeleteDuringNew(size_t NumPlacementArgs
,
1447 const FunctionDecl
*OperatorDelete
, ValueTy Ptr
,
1448 ValueTy AllocSize
, bool PassAlignmentToPlacementDelete
,
1449 CharUnits AllocAlign
)
1450 : NumPlacementArgs(NumPlacementArgs
),
1451 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete
),
1452 OperatorDelete(OperatorDelete
), Ptr(Ptr
), AllocSize(AllocSize
),
1453 AllocAlign(AllocAlign
) {}
1455 void setPlacementArg(unsigned I
, RValueTy Arg
, QualType Type
) {
1456 assert(I
< NumPlacementArgs
&& "index out of range");
1457 getPlacementArgs()[I
] = {Arg
, Type
};
1460 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1461 const auto *FPT
= OperatorDelete
->getType()->castAs
<FunctionProtoType
>();
1462 CallArgList DeleteArgs
;
1464 // The first argument is always a void* (or C* for a destroying operator
1465 // delete for class type C).
1466 DeleteArgs
.add(Traits::get(CGF
, Ptr
), FPT
->getParamType(0));
1468 // Figure out what other parameters we should be implicitly passing.
1469 UsualDeleteParams Params
;
1470 if (NumPlacementArgs
) {
1471 // A placement deallocation function is implicitly passed an alignment
1472 // if the placement allocation function was, but is never passed a size.
1473 Params
.Alignment
= PassAlignmentToPlacementDelete
;
1475 // For a non-placement new-expression, 'operator delete' can take a
1476 // size and/or an alignment if it has the right parameters.
1477 Params
= getUsualDeleteParams(OperatorDelete
);
1480 assert(!Params
.DestroyingDelete
&&
1481 "should not call destroying delete in a new-expression");
1483 // The second argument can be a std::size_t (for non-placement delete).
1485 DeleteArgs
.add(Traits::get(CGF
, AllocSize
),
1486 CGF
.getContext().getSizeType());
1488 // The next (second or third) argument can be a std::align_val_t, which
1489 // is an enum whose underlying type is std::size_t.
1490 // FIXME: Use the right type as the parameter type. Note that in a call
1491 // to operator delete(size_t, ...), we may not have it available.
1492 if (Params
.Alignment
)
1493 DeleteArgs
.add(RValue::get(llvm::ConstantInt::get(
1494 CGF
.SizeTy
, AllocAlign
.getQuantity())),
1495 CGF
.getContext().getSizeType());
1497 // Pass the rest of the arguments, which must match exactly.
1498 for (unsigned I
= 0; I
!= NumPlacementArgs
; ++I
) {
1499 auto Arg
= getPlacementArgs()[I
];
1500 DeleteArgs
.add(Traits::get(CGF
, Arg
.ArgValue
), Arg
.ArgType
);
1503 // Call 'operator delete'.
1504 EmitNewDeleteCall(CGF
, OperatorDelete
, FPT
, DeleteArgs
);
1509 /// Enter a cleanup to call 'operator delete' if the initializer in a
1510 /// new-expression throws.
1511 static void EnterNewDeleteCleanup(CodeGenFunction
&CGF
,
1512 const CXXNewExpr
*E
,
1514 llvm::Value
*AllocSize
,
1515 CharUnits AllocAlign
,
1516 const CallArgList
&NewArgs
) {
1517 unsigned NumNonPlacementArgs
= E
->passAlignment() ? 2 : 1;
1519 // If we're not inside a conditional branch, then the cleanup will
1520 // dominate and we can do the easier (and more efficient) thing.
1521 if (!CGF
.isInConditionalBranch()) {
1522 struct DirectCleanupTraits
{
1523 typedef llvm::Value
*ValueTy
;
1524 typedef RValue RValueTy
;
1525 static RValue
get(CodeGenFunction
&, ValueTy V
) { return RValue::get(V
); }
1526 static RValue
get(CodeGenFunction
&, RValueTy V
) { return V
; }
1529 typedef CallDeleteDuringNew
<DirectCleanupTraits
> DirectCleanup
;
1531 DirectCleanup
*Cleanup
= CGF
.EHStack
.pushCleanupWithExtra
<DirectCleanup
>(
1532 EHCleanup
, E
->getNumPlacementArgs(), E
->getOperatorDelete(),
1533 NewPtr
.emitRawPointer(CGF
), AllocSize
, E
->passAlignment(), AllocAlign
);
1534 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1535 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1536 Cleanup
->setPlacementArg(I
, Arg
.getRValue(CGF
), Arg
.Ty
);
1542 // Otherwise, we need to save all this stuff.
1543 DominatingValue
<RValue
>::saved_type SavedNewPtr
=
1544 DominatingValue
<RValue
>::save(CGF
, RValue::get(NewPtr
, CGF
));
1545 DominatingValue
<RValue
>::saved_type SavedAllocSize
=
1546 DominatingValue
<RValue
>::save(CGF
, RValue::get(AllocSize
));
1548 struct ConditionalCleanupTraits
{
1549 typedef DominatingValue
<RValue
>::saved_type ValueTy
;
1550 typedef DominatingValue
<RValue
>::saved_type RValueTy
;
1551 static RValue
get(CodeGenFunction
&CGF
, ValueTy V
) {
1552 return V
.restore(CGF
);
1555 typedef CallDeleteDuringNew
<ConditionalCleanupTraits
> ConditionalCleanup
;
1557 ConditionalCleanup
*Cleanup
= CGF
.EHStack
1558 .pushCleanupWithExtra
<ConditionalCleanup
>(EHCleanup
,
1559 E
->getNumPlacementArgs(),
1560 E
->getOperatorDelete(),
1565 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1566 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1567 Cleanup
->setPlacementArg(
1568 I
, DominatingValue
<RValue
>::save(CGF
, Arg
.getRValue(CGF
)), Arg
.Ty
);
1571 CGF
.initFullExprCleanup();
1574 llvm::Value
*CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr
*E
) {
1575 // The element type being allocated.
1576 QualType allocType
= getContext().getBaseElementType(E
->getAllocatedType());
1578 // 1. Build a call to the allocation function.
1579 FunctionDecl
*allocator
= E
->getOperatorNew();
1581 // If there is a brace-initializer or C++20 parenthesized initializer, cannot
1582 // allocate fewer elements than inits.
1583 unsigned minElements
= 0;
1584 if (E
->isArray() && E
->hasInitializer()) {
1585 const Expr
*Init
= E
->getInitializer();
1586 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(Init
);
1587 const CXXParenListInitExpr
*CPLIE
= dyn_cast
<CXXParenListInitExpr
>(Init
);
1588 const Expr
*IgnoreParen
= Init
->IgnoreParenImpCasts();
1589 if ((ILE
&& ILE
->isStringLiteralInit()) ||
1590 isa
<StringLiteral
>(IgnoreParen
) || isa
<ObjCEncodeExpr
>(IgnoreParen
)) {
1592 cast
<ConstantArrayType
>(Init
->getType()->getAsArrayTypeUnsafe())
1594 } else if (ILE
|| CPLIE
) {
1595 minElements
= ILE
? ILE
->getNumInits() : CPLIE
->getInitExprs().size();
1599 llvm::Value
*numElements
= nullptr;
1600 llvm::Value
*allocSizeWithoutCookie
= nullptr;
1601 llvm::Value
*allocSize
=
1602 EmitCXXNewAllocSize(*this, E
, minElements
, numElements
,
1603 allocSizeWithoutCookie
);
1604 CharUnits allocAlign
= getContext().getTypeAlignInChars(allocType
);
1606 // Emit the allocation call. If the allocator is a global placement
1607 // operator, just "inline" it directly.
1608 Address allocation
= Address::invalid();
1609 CallArgList allocatorArgs
;
1610 if (allocator
->isReservedGlobalPlacementOperator()) {
1611 assert(E
->getNumPlacementArgs() == 1);
1612 const Expr
*arg
= *E
->placement_arguments().begin();
1614 LValueBaseInfo BaseInfo
;
1615 allocation
= EmitPointerWithAlignment(arg
, &BaseInfo
);
1617 // The pointer expression will, in many cases, be an opaque void*.
1618 // In these cases, discard the computed alignment and use the
1619 // formal alignment of the allocated type.
1620 if (BaseInfo
.getAlignmentSource() != AlignmentSource::Decl
)
1621 allocation
.setAlignment(allocAlign
);
1623 // Set up allocatorArgs for the call to operator delete if it's not
1624 // the reserved global operator.
1625 if (E
->getOperatorDelete() &&
1626 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1627 allocatorArgs
.add(RValue::get(allocSize
), getContext().getSizeType());
1628 allocatorArgs
.add(RValue::get(allocation
, *this), arg
->getType());
1632 const FunctionProtoType
*allocatorType
=
1633 allocator
->getType()->castAs
<FunctionProtoType
>();
1634 unsigned ParamsToSkip
= 0;
1636 // The allocation size is the first argument.
1637 QualType sizeType
= getContext().getSizeType();
1638 allocatorArgs
.add(RValue::get(allocSize
), sizeType
);
1641 if (allocSize
!= allocSizeWithoutCookie
) {
1642 CharUnits cookieAlign
= getSizeAlign(); // FIXME: Ask the ABI.
1643 allocAlign
= std::max(allocAlign
, cookieAlign
);
1646 // The allocation alignment may be passed as the second argument.
1647 if (E
->passAlignment()) {
1648 QualType AlignValT
= sizeType
;
1649 if (allocatorType
->getNumParams() > 1) {
1650 AlignValT
= allocatorType
->getParamType(1);
1651 assert(getContext().hasSameUnqualifiedType(
1652 AlignValT
->castAs
<EnumType
>()->getDecl()->getIntegerType(),
1654 "wrong type for alignment parameter");
1657 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1658 assert(allocator
->isVariadic() && "can't pass alignment to allocator");
1661 RValue::get(llvm::ConstantInt::get(SizeTy
, allocAlign
.getQuantity())),
1665 // FIXME: Why do we not pass a CalleeDecl here?
1666 EmitCallArgs(allocatorArgs
, allocatorType
, E
->placement_arguments(),
1667 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip
);
1670 EmitNewDeleteCall(*this, allocator
, allocatorType
, allocatorArgs
);
1672 // Set !heapallocsite metadata on the call to operator new.
1674 if (auto *newCall
= dyn_cast
<llvm::CallBase
>(RV
.getScalarVal()))
1675 getDebugInfo()->addHeapAllocSiteMetadata(newCall
, allocType
,
1678 // If this was a call to a global replaceable allocation function that does
1679 // not take an alignment argument, the allocator is known to produce
1680 // storage that's suitably aligned for any object that fits, up to a known
1681 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1682 CharUnits allocationAlign
= allocAlign
;
1683 if (!E
->passAlignment() &&
1684 allocator
->isReplaceableGlobalAllocationFunction()) {
1685 unsigned AllocatorAlign
= llvm::bit_floor(std::min
<uint64_t>(
1686 Target
.getNewAlign(), getContext().getTypeSize(allocType
)));
1687 allocationAlign
= std::max(
1688 allocationAlign
, getContext().toCharUnitsFromBits(AllocatorAlign
));
1691 allocation
= Address(RV
.getScalarVal(), Int8Ty
, allocationAlign
);
1694 // Emit a null check on the allocation result if the allocation
1695 // function is allowed to return null (because it has a non-throwing
1696 // exception spec or is the reserved placement new) and we have an
1697 // interesting initializer will be running sanitizers on the initialization.
1698 bool nullCheck
= E
->shouldNullCheckAllocation() &&
1699 (!allocType
.isPODType(getContext()) || E
->hasInitializer() ||
1700 sanitizePerformTypeCheck());
1702 llvm::BasicBlock
*nullCheckBB
= nullptr;
1703 llvm::BasicBlock
*contBB
= nullptr;
1705 // The null-check means that the initializer is conditionally
1707 ConditionalEvaluation
conditional(*this);
1710 conditional
.begin(*this);
1712 nullCheckBB
= Builder
.GetInsertBlock();
1713 llvm::BasicBlock
*notNullBB
= createBasicBlock("new.notnull");
1714 contBB
= createBasicBlock("new.cont");
1716 llvm::Value
*isNull
= Builder
.CreateIsNull(allocation
, "new.isnull");
1717 Builder
.CreateCondBr(isNull
, contBB
, notNullBB
);
1718 EmitBlock(notNullBB
);
1721 // If there's an operator delete, enter a cleanup to call it if an
1722 // exception is thrown.
1723 EHScopeStack::stable_iterator operatorDeleteCleanup
;
1724 llvm::Instruction
*cleanupDominator
= nullptr;
1725 if (E
->getOperatorDelete() &&
1726 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1727 EnterNewDeleteCleanup(*this, E
, allocation
, allocSize
, allocAlign
,
1729 operatorDeleteCleanup
= EHStack
.stable_begin();
1730 cleanupDominator
= Builder
.CreateUnreachable();
1733 assert((allocSize
== allocSizeWithoutCookie
) ==
1734 CalculateCookiePadding(*this, E
).isZero());
1735 if (allocSize
!= allocSizeWithoutCookie
) {
1736 assert(E
->isArray());
1737 allocation
= CGM
.getCXXABI().InitializeArrayCookie(*this, allocation
,
1742 llvm::Type
*elementTy
= ConvertTypeForMem(allocType
);
1743 Address result
= allocation
.withElementType(elementTy
);
1745 // Passing pointer through launder.invariant.group to avoid propagation of
1746 // vptrs information which may be included in previous type.
1747 // To not break LTO with different optimizations levels, we do it regardless
1748 // of optimization level.
1749 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
1750 allocator
->isReservedGlobalPlacementOperator())
1751 result
= Builder
.CreateLaunderInvariantGroup(result
);
1753 // Emit sanitizer checks for pointer value now, so that in the case of an
1754 // array it was checked only once and not at each constructor call. We may
1755 // have already checked that the pointer is non-null.
1756 // FIXME: If we have an array cookie and a potentially-throwing allocator,
1757 // we'll null check the wrong pointer here.
1758 SanitizerSet SkippedChecks
;
1759 SkippedChecks
.set(SanitizerKind::Null
, nullCheck
);
1760 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall
,
1761 E
->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1762 result
, allocType
, result
.getAlignment(), SkippedChecks
,
1765 EmitNewInitializer(*this, E
, allocType
, elementTy
, result
, numElements
,
1766 allocSizeWithoutCookie
);
1767 llvm::Value
*resultPtr
= result
.emitRawPointer(*this);
1769 // NewPtr is a pointer to the base element type. If we're
1770 // allocating an array of arrays, we'll need to cast back to the
1771 // array pointer type.
1772 llvm::Type
*resultType
= ConvertTypeForMem(E
->getType());
1773 if (resultPtr
->getType() != resultType
)
1774 resultPtr
= Builder
.CreateBitCast(resultPtr
, resultType
);
1777 // Deactivate the 'operator delete' cleanup if we finished
1779 if (operatorDeleteCleanup
.isValid()) {
1780 DeactivateCleanupBlock(operatorDeleteCleanup
, cleanupDominator
);
1781 cleanupDominator
->eraseFromParent();
1785 conditional
.end(*this);
1787 llvm::BasicBlock
*notNullBB
= Builder
.GetInsertBlock();
1790 llvm::PHINode
*PHI
= Builder
.CreatePHI(resultPtr
->getType(), 2);
1791 PHI
->addIncoming(resultPtr
, notNullBB
);
1792 PHI
->addIncoming(llvm::Constant::getNullValue(resultPtr
->getType()),
1801 void CodeGenFunction::EmitDeleteCall(const FunctionDecl
*DeleteFD
,
1802 llvm::Value
*Ptr
, QualType DeleteTy
,
1803 llvm::Value
*NumElements
,
1804 CharUnits CookieSize
) {
1805 assert((!NumElements
&& CookieSize
.isZero()) ||
1806 DeleteFD
->getOverloadedOperator() == OO_Array_Delete
);
1808 const auto *DeleteFTy
= DeleteFD
->getType()->castAs
<FunctionProtoType
>();
1809 CallArgList DeleteArgs
;
1811 auto Params
= getUsualDeleteParams(DeleteFD
);
1812 auto ParamTypeIt
= DeleteFTy
->param_type_begin();
1814 // Pass the pointer itself.
1815 QualType ArgTy
= *ParamTypeIt
++;
1816 llvm::Value
*DeletePtr
= Builder
.CreateBitCast(Ptr
, ConvertType(ArgTy
));
1817 DeleteArgs
.add(RValue::get(DeletePtr
), ArgTy
);
1819 // Pass the std::destroying_delete tag if present.
1820 llvm::AllocaInst
*DestroyingDeleteTag
= nullptr;
1821 if (Params
.DestroyingDelete
) {
1822 QualType DDTag
= *ParamTypeIt
++;
1823 llvm::Type
*Ty
= getTypes().ConvertType(DDTag
);
1824 CharUnits Align
= CGM
.getNaturalTypeAlignment(DDTag
);
1825 DestroyingDeleteTag
= CreateTempAlloca(Ty
, "destroying.delete.tag");
1826 DestroyingDeleteTag
->setAlignment(Align
.getAsAlign());
1828 RValue::getAggregate(Address(DestroyingDeleteTag
, Ty
, Align
)), DDTag
);
1831 // Pass the size if the delete function has a size_t parameter.
1833 QualType SizeType
= *ParamTypeIt
++;
1834 CharUnits DeleteTypeSize
= getContext().getTypeSizeInChars(DeleteTy
);
1835 llvm::Value
*Size
= llvm::ConstantInt::get(ConvertType(SizeType
),
1836 DeleteTypeSize
.getQuantity());
1838 // For array new, multiply by the number of elements.
1840 Size
= Builder
.CreateMul(Size
, NumElements
);
1842 // If there is a cookie, add the cookie size.
1843 if (!CookieSize
.isZero())
1844 Size
= Builder
.CreateAdd(
1845 Size
, llvm::ConstantInt::get(SizeTy
, CookieSize
.getQuantity()));
1847 DeleteArgs
.add(RValue::get(Size
), SizeType
);
1850 // Pass the alignment if the delete function has an align_val_t parameter.
1851 if (Params
.Alignment
) {
1852 QualType AlignValType
= *ParamTypeIt
++;
1853 CharUnits DeleteTypeAlign
=
1854 getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1855 DeleteTy
, true /* NeedsPreferredAlignment */));
1856 llvm::Value
*Align
= llvm::ConstantInt::get(ConvertType(AlignValType
),
1857 DeleteTypeAlign
.getQuantity());
1858 DeleteArgs
.add(RValue::get(Align
), AlignValType
);
1861 assert(ParamTypeIt
== DeleteFTy
->param_type_end() &&
1862 "unknown parameter to usual delete function");
1864 // Emit the call to delete.
1865 EmitNewDeleteCall(*this, DeleteFD
, DeleteFTy
, DeleteArgs
);
1867 // If call argument lowering didn't use the destroying_delete_t alloca,
1869 if (DestroyingDeleteTag
&& DestroyingDeleteTag
->use_empty())
1870 DestroyingDeleteTag
->eraseFromParent();
1874 /// Calls the given 'operator delete' on a single object.
1875 struct CallObjectDelete final
: EHScopeStack::Cleanup
{
1877 const FunctionDecl
*OperatorDelete
;
1878 QualType ElementType
;
1880 CallObjectDelete(llvm::Value
*Ptr
,
1881 const FunctionDecl
*OperatorDelete
,
1882 QualType ElementType
)
1883 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), ElementType(ElementType
) {}
1885 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1886 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
);
1892 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl
*OperatorDelete
,
1893 llvm::Value
*CompletePtr
,
1894 QualType ElementType
) {
1895 EHStack
.pushCleanup
<CallObjectDelete
>(NormalAndEHCleanup
, CompletePtr
,
1896 OperatorDelete
, ElementType
);
1899 /// Emit the code for deleting a single object with a destroying operator
1900 /// delete. If the element type has a non-virtual destructor, Ptr has already
1901 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1902 /// Ptr points to an object of the static type.
1903 static void EmitDestroyingObjectDelete(CodeGenFunction
&CGF
,
1904 const CXXDeleteExpr
*DE
, Address Ptr
,
1905 QualType ElementType
) {
1906 auto *Dtor
= ElementType
->getAsCXXRecordDecl()->getDestructor();
1907 if (Dtor
&& Dtor
->isVirtual())
1908 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1911 CGF
.EmitDeleteCall(DE
->getOperatorDelete(), Ptr
.emitRawPointer(CGF
),
1915 /// Emit the code for deleting a single object.
1916 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1918 static bool EmitObjectDelete(CodeGenFunction
&CGF
,
1919 const CXXDeleteExpr
*DE
,
1921 QualType ElementType
,
1922 llvm::BasicBlock
*UnconditionalDeleteBlock
) {
1923 // C++11 [expr.delete]p3:
1924 // If the static type of the object to be deleted is different from its
1925 // dynamic type, the static type shall be a base class of the dynamic type
1926 // of the object to be deleted and the static type shall have a virtual
1927 // destructor or the behavior is undefined.
1928 CGF
.EmitTypeCheck(CodeGenFunction::TCK_MemberCall
, DE
->getExprLoc(), Ptr
,
1931 const FunctionDecl
*OperatorDelete
= DE
->getOperatorDelete();
1932 assert(!OperatorDelete
->isDestroyingOperatorDelete());
1934 // Find the destructor for the type, if applicable. If the
1935 // destructor is virtual, we'll just emit the vcall and return.
1936 const CXXDestructorDecl
*Dtor
= nullptr;
1937 if (const RecordType
*RT
= ElementType
->getAs
<RecordType
>()) {
1938 CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
1939 if (RD
->hasDefinition() && !RD
->hasTrivialDestructor()) {
1940 Dtor
= RD
->getDestructor();
1942 if (Dtor
->isVirtual()) {
1943 bool UseVirtualCall
= true;
1944 const Expr
*Base
= DE
->getArgument();
1945 if (auto *DevirtualizedDtor
=
1946 dyn_cast_or_null
<const CXXDestructorDecl
>(
1947 Dtor
->getDevirtualizedMethod(
1948 Base
, CGF
.CGM
.getLangOpts().AppleKext
))) {
1949 UseVirtualCall
= false;
1950 const CXXRecordDecl
*DevirtualizedClass
=
1951 DevirtualizedDtor
->getParent();
1952 if (declaresSameEntity(getCXXRecord(Base
), DevirtualizedClass
)) {
1953 // Devirtualized to the class of the base type (the type of the
1954 // whole expression).
1955 Dtor
= DevirtualizedDtor
;
1957 // Devirtualized to some other type. Would need to cast the this
1958 // pointer to that type but we don't have support for that yet, so
1959 // do a virtual call. FIXME: handle the case where it is
1960 // devirtualized to the derived type (the type of the inner
1961 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1962 UseVirtualCall
= true;
1965 if (UseVirtualCall
) {
1966 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1974 // Make sure that we call delete even if the dtor throws.
1975 // This doesn't have to a conditional cleanup because we're going
1976 // to pop it off in a second.
1977 CGF
.EHStack
.pushCleanup
<CallObjectDelete
>(
1978 NormalAndEHCleanup
, Ptr
.emitRawPointer(CGF
), OperatorDelete
, ElementType
);
1981 CGF
.EmitCXXDestructorCall(Dtor
, Dtor_Complete
,
1982 /*ForVirtualBase=*/false,
1983 /*Delegating=*/false,
1985 else if (auto Lifetime
= ElementType
.getObjCLifetime()) {
1987 case Qualifiers::OCL_None
:
1988 case Qualifiers::OCL_ExplicitNone
:
1989 case Qualifiers::OCL_Autoreleasing
:
1992 case Qualifiers::OCL_Strong
:
1993 CGF
.EmitARCDestroyStrong(Ptr
, ARCPreciseLifetime
);
1996 case Qualifiers::OCL_Weak
:
1997 CGF
.EmitARCDestroyWeak(Ptr
);
2002 // When optimizing for size, call 'operator delete' unconditionally.
2003 if (CGF
.CGM
.getCodeGenOpts().OptimizeSize
> 1) {
2004 CGF
.EmitBlock(UnconditionalDeleteBlock
);
2005 CGF
.PopCleanupBlock();
2009 CGF
.PopCleanupBlock();
2014 /// Calls the given 'operator delete' on an array of objects.
2015 struct CallArrayDelete final
: EHScopeStack::Cleanup
{
2017 const FunctionDecl
*OperatorDelete
;
2018 llvm::Value
*NumElements
;
2019 QualType ElementType
;
2020 CharUnits CookieSize
;
2022 CallArrayDelete(llvm::Value
*Ptr
,
2023 const FunctionDecl
*OperatorDelete
,
2024 llvm::Value
*NumElements
,
2025 QualType ElementType
,
2026 CharUnits CookieSize
)
2027 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), NumElements(NumElements
),
2028 ElementType(ElementType
), CookieSize(CookieSize
) {}
2030 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2031 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
, NumElements
,
2037 /// Emit the code for deleting an array of objects.
2038 static void EmitArrayDelete(CodeGenFunction
&CGF
,
2039 const CXXDeleteExpr
*E
,
2041 QualType elementType
) {
2042 llvm::Value
*numElements
= nullptr;
2043 llvm::Value
*allocatedPtr
= nullptr;
2044 CharUnits cookieSize
;
2045 CGF
.CGM
.getCXXABI().ReadArrayCookie(CGF
, deletedPtr
, E
, elementType
,
2046 numElements
, allocatedPtr
, cookieSize
);
2048 assert(allocatedPtr
&& "ReadArrayCookie didn't set allocated pointer");
2050 // Make sure that we call delete even if one of the dtors throws.
2051 const FunctionDecl
*operatorDelete
= E
->getOperatorDelete();
2052 CGF
.EHStack
.pushCleanup
<CallArrayDelete
>(NormalAndEHCleanup
,
2053 allocatedPtr
, operatorDelete
,
2054 numElements
, elementType
,
2057 // Destroy the elements.
2058 if (QualType::DestructionKind dtorKind
= elementType
.isDestructedType()) {
2059 assert(numElements
&& "no element count for a type with a destructor!");
2061 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
2062 CharUnits elementAlign
=
2063 deletedPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
2065 llvm::Value
*arrayBegin
= deletedPtr
.emitRawPointer(CGF
);
2066 llvm::Value
*arrayEnd
= CGF
.Builder
.CreateInBoundsGEP(
2067 deletedPtr
.getElementType(), arrayBegin
, numElements
, "delete.end");
2069 // Note that it is legal to allocate a zero-length array, and we
2070 // can never fold the check away because the length should always
2071 // come from a cookie.
2072 CGF
.emitArrayDestroy(arrayBegin
, arrayEnd
, elementType
, elementAlign
,
2073 CGF
.getDestroyer(dtorKind
),
2074 /*checkZeroLength*/ true,
2075 CGF
.needsEHCleanup(dtorKind
));
2078 // Pop the cleanup block.
2079 CGF
.PopCleanupBlock();
2082 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr
*E
) {
2083 const Expr
*Arg
= E
->getArgument();
2084 Address Ptr
= EmitPointerWithAlignment(Arg
);
2086 // Null check the pointer.
2088 // We could avoid this null check if we can determine that the object
2089 // destruction is trivial and doesn't require an array cookie; we can
2090 // unconditionally perform the operator delete call in that case. For now, we
2091 // assume that deleted pointers are null rarely enough that it's better to
2092 // keep the branch. This might be worth revisiting for a -O0 code size win.
2093 llvm::BasicBlock
*DeleteNotNull
= createBasicBlock("delete.notnull");
2094 llvm::BasicBlock
*DeleteEnd
= createBasicBlock("delete.end");
2096 llvm::Value
*IsNull
= Builder
.CreateIsNull(Ptr
, "isnull");
2098 Builder
.CreateCondBr(IsNull
, DeleteEnd
, DeleteNotNull
);
2099 EmitBlock(DeleteNotNull
);
2100 Ptr
.setKnownNonNull();
2102 QualType DeleteTy
= E
->getDestroyedType();
2104 // A destroying operator delete overrides the entire operation of the
2105 // delete expression.
2106 if (E
->getOperatorDelete()->isDestroyingOperatorDelete()) {
2107 EmitDestroyingObjectDelete(*this, E
, Ptr
, DeleteTy
);
2108 EmitBlock(DeleteEnd
);
2112 // We might be deleting a pointer to array. If so, GEP down to the
2113 // first non-array element.
2114 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2115 if (DeleteTy
->isConstantArrayType()) {
2116 llvm::Value
*Zero
= Builder
.getInt32(0);
2117 SmallVector
<llvm::Value
*,8> GEP
;
2119 GEP
.push_back(Zero
); // point at the outermost array
2121 // For each layer of array type we're pointing at:
2122 while (const ConstantArrayType
*Arr
2123 = getContext().getAsConstantArrayType(DeleteTy
)) {
2124 // 1. Unpeel the array type.
2125 DeleteTy
= Arr
->getElementType();
2127 // 2. GEP to the first element of the array.
2128 GEP
.push_back(Zero
);
2131 Ptr
= Builder
.CreateInBoundsGEP(Ptr
, GEP
, ConvertTypeForMem(DeleteTy
),
2132 Ptr
.getAlignment(), "del.first");
2135 assert(ConvertTypeForMem(DeleteTy
) == Ptr
.getElementType());
2137 if (E
->isArrayForm()) {
2138 EmitArrayDelete(*this, E
, Ptr
, DeleteTy
);
2139 EmitBlock(DeleteEnd
);
2141 if (!EmitObjectDelete(*this, E
, Ptr
, DeleteTy
, DeleteEnd
))
2142 EmitBlock(DeleteEnd
);
2146 static llvm::Value
*EmitTypeidFromVTable(CodeGenFunction
&CGF
, const Expr
*E
,
2147 llvm::Type
*StdTypeInfoPtrTy
,
2148 bool HasNullCheck
) {
2149 // Get the vtable pointer.
2150 Address ThisPtr
= CGF
.EmitLValue(E
).getAddress();
2152 QualType SrcRecordTy
= E
->getType();
2154 // C++ [class.cdtor]p4:
2155 // If the operand of typeid refers to the object under construction or
2156 // destruction and the static type of the operand is neither the constructor
2157 // or destructor’s class nor one of its bases, the behavior is undefined.
2158 CGF
.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation
, E
->getExprLoc(),
2159 ThisPtr
, SrcRecordTy
);
2161 // Whether we need an explicit null pointer check. For example, with the
2162 // Microsoft ABI, if this is a call to __RTtypeid, the null pointer check and
2163 // exception throw is inside the __RTtypeid(nullptr) call
2165 CGF
.CGM
.getCXXABI().shouldTypeidBeNullChecked(SrcRecordTy
)) {
2166 llvm::BasicBlock
*BadTypeidBlock
=
2167 CGF
.createBasicBlock("typeid.bad_typeid");
2168 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock("typeid.end");
2170 llvm::Value
*IsNull
= CGF
.Builder
.CreateIsNull(ThisPtr
);
2171 CGF
.Builder
.CreateCondBr(IsNull
, BadTypeidBlock
, EndBlock
);
2173 CGF
.EmitBlock(BadTypeidBlock
);
2174 CGF
.CGM
.getCXXABI().EmitBadTypeidCall(CGF
);
2175 CGF
.EmitBlock(EndBlock
);
2178 return CGF
.CGM
.getCXXABI().EmitTypeid(CGF
, SrcRecordTy
, ThisPtr
,
2182 llvm::Value
*CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr
*E
) {
2183 // Ideally, we would like to use GlobalsInt8PtrTy here, however, we cannot,
2184 // primarily because the result of applying typeid is a value of type
2185 // type_info, which is declared & defined by the standard library
2186 // implementation and expects to operate on the generic (default) AS.
2187 // https://reviews.llvm.org/D157452 has more context, and a possible solution.
2188 llvm::Type
*PtrTy
= Int8PtrTy
;
2189 LangAS GlobAS
= CGM
.GetGlobalVarAddressSpace(nullptr);
2191 auto MaybeASCast
= [=](auto &&TypeInfo
) {
2192 if (GlobAS
== LangAS::Default
)
2194 return getTargetHooks().performAddrSpaceCast(CGM
,TypeInfo
, GlobAS
,
2195 LangAS::Default
, PtrTy
);
2198 if (E
->isTypeOperand()) {
2199 llvm::Constant
*TypeInfo
=
2200 CGM
.GetAddrOfRTTIDescriptor(E
->getTypeOperand(getContext()));
2201 return MaybeASCast(TypeInfo
);
2204 // C++ [expr.typeid]p2:
2205 // When typeid is applied to a glvalue expression whose type is a
2206 // polymorphic class type, the result refers to a std::type_info object
2207 // representing the type of the most derived object (that is, the dynamic
2208 // type) to which the glvalue refers.
2209 // If the operand is already most derived object, no need to look up vtable.
2210 if (E
->isPotentiallyEvaluated() && !E
->isMostDerived(getContext()))
2211 return EmitTypeidFromVTable(*this, E
->getExprOperand(), PtrTy
,
2214 QualType OperandTy
= E
->getExprOperand()->getType();
2215 return MaybeASCast(CGM
.GetAddrOfRTTIDescriptor(OperandTy
));
2218 static llvm::Value
*EmitDynamicCastToNull(CodeGenFunction
&CGF
,
2220 llvm::Type
*DestLTy
= CGF
.ConvertType(DestTy
);
2221 if (DestTy
->isPointerType())
2222 return llvm::Constant::getNullValue(DestLTy
);
2224 /// C++ [expr.dynamic.cast]p9:
2225 /// A failed cast to reference type throws std::bad_cast
2226 if (!CGF
.CGM
.getCXXABI().EmitBadCastCall(CGF
))
2229 CGF
.Builder
.ClearInsertionPoint();
2230 return llvm::PoisonValue::get(DestLTy
);
2233 llvm::Value
*CodeGenFunction::EmitDynamicCast(Address ThisAddr
,
2234 const CXXDynamicCastExpr
*DCE
) {
2235 CGM
.EmitExplicitCastExprType(DCE
, this);
2236 QualType DestTy
= DCE
->getTypeAsWritten();
2238 QualType SrcTy
= DCE
->getSubExpr()->getType();
2240 // C++ [expr.dynamic.cast]p7:
2241 // If T is "pointer to cv void," then the result is a pointer to the most
2242 // derived object pointed to by v.
2243 bool IsDynamicCastToVoid
= DestTy
->isVoidPointerType();
2244 QualType SrcRecordTy
;
2245 QualType DestRecordTy
;
2246 if (IsDynamicCastToVoid
) {
2247 SrcRecordTy
= SrcTy
->getPointeeType();
2249 } else if (const PointerType
*DestPTy
= DestTy
->getAs
<PointerType
>()) {
2250 SrcRecordTy
= SrcTy
->castAs
<PointerType
>()->getPointeeType();
2251 DestRecordTy
= DestPTy
->getPointeeType();
2253 SrcRecordTy
= SrcTy
;
2254 DestRecordTy
= DestTy
->castAs
<ReferenceType
>()->getPointeeType();
2257 // C++ [class.cdtor]p5:
2258 // If the operand of the dynamic_cast refers to the object under
2259 // construction or destruction and the static type of the operand is not a
2260 // pointer to or object of the constructor or destructor’s own class or one
2261 // of its bases, the dynamic_cast results in undefined behavior.
2262 EmitTypeCheck(TCK_DynamicOperation
, DCE
->getExprLoc(), ThisAddr
, SrcRecordTy
);
2264 if (DCE
->isAlwaysNull()) {
2265 if (llvm::Value
*T
= EmitDynamicCastToNull(*this, DestTy
)) {
2266 // Expression emission is expected to retain a valid insertion point.
2267 if (!Builder
.GetInsertBlock())
2268 EmitBlock(createBasicBlock("dynamic_cast.unreachable"));
2273 assert(SrcRecordTy
->isRecordType() && "source type must be a record type!");
2275 // If the destination is effectively final, the cast succeeds if and only
2276 // if the dynamic type of the pointer is exactly the destination type.
2277 bool IsExact
= !IsDynamicCastToVoid
&&
2278 CGM
.getCodeGenOpts().OptimizationLevel
> 0 &&
2279 DestRecordTy
->getAsCXXRecordDecl()->isEffectivelyFinal() &&
2280 CGM
.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy
);
2282 // C++ [expr.dynamic.cast]p4:
2283 // If the value of v is a null pointer value in the pointer case, the result
2284 // is the null pointer value of type T.
2285 bool ShouldNullCheckSrcValue
=
2286 IsExact
|| CGM
.getCXXABI().shouldDynamicCastCallBeNullChecked(
2287 SrcTy
->isPointerType(), SrcRecordTy
);
2289 llvm::BasicBlock
*CastNull
= nullptr;
2290 llvm::BasicBlock
*CastNotNull
= nullptr;
2291 llvm::BasicBlock
*CastEnd
= createBasicBlock("dynamic_cast.end");
2293 if (ShouldNullCheckSrcValue
) {
2294 CastNull
= createBasicBlock("dynamic_cast.null");
2295 CastNotNull
= createBasicBlock("dynamic_cast.notnull");
2297 llvm::Value
*IsNull
= Builder
.CreateIsNull(ThisAddr
);
2298 Builder
.CreateCondBr(IsNull
, CastNull
, CastNotNull
);
2299 EmitBlock(CastNotNull
);
2303 if (IsDynamicCastToVoid
) {
2304 Value
= CGM
.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr
, SrcRecordTy
);
2305 } else if (IsExact
) {
2306 // If the destination type is effectively final, this pointer points to the
2307 // right type if and only if its vptr has the right value.
2308 Value
= CGM
.getCXXABI().emitExactDynamicCast(
2309 *this, ThisAddr
, SrcRecordTy
, DestTy
, DestRecordTy
, CastEnd
, CastNull
);
2311 assert(DestRecordTy
->isRecordType() &&
2312 "destination type must be a record type!");
2313 Value
= CGM
.getCXXABI().emitDynamicCastCall(*this, ThisAddr
, SrcRecordTy
,
2314 DestTy
, DestRecordTy
, CastEnd
);
2316 CastNotNull
= Builder
.GetInsertBlock();
2318 llvm::Value
*NullValue
= nullptr;
2319 if (ShouldNullCheckSrcValue
) {
2320 EmitBranch(CastEnd
);
2322 EmitBlock(CastNull
);
2323 NullValue
= EmitDynamicCastToNull(*this, DestTy
);
2324 CastNull
= Builder
.GetInsertBlock();
2326 EmitBranch(CastEnd
);
2332 llvm::PHINode
*PHI
= Builder
.CreatePHI(Value
->getType(), 2);
2333 PHI
->addIncoming(Value
, CastNotNull
);
2334 PHI
->addIncoming(NullValue
, CastNull
);