1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code dealing with code generation of C++ expressions
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
24 using namespace clang
;
25 using namespace CodeGen
;
28 struct MemberCallInfo
{
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction
&CGF
, const CXXMethodDecl
*MD
,
37 llvm::Value
*This
, llvm::Value
*ImplicitParam
,
38 QualType ImplicitParamTy
, const CallExpr
*CE
,
39 CallArgList
&Args
, CallArgList
*RtlArgs
) {
40 assert(CE
== nullptr || isa
<CXXMemberCallExpr
>(CE
) ||
41 isa
<CXXOperatorCallExpr
>(CE
));
42 assert(MD
->isInstance() &&
43 "Trying to emit a member or operator call expr on a static method!");
46 const CXXRecordDecl
*RD
=
47 CGF
.CGM
.getCXXABI().getThisArgumentTypeForMethod(MD
);
48 Args
.add(RValue::get(This
), CGF
.getTypes().DeriveThisType(RD
, MD
));
50 // If there is an implicit parameter (e.g. VTT), emit it.
52 Args
.add(RValue::get(ImplicitParam
), ImplicitParamTy
);
55 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
56 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, Args
.size());
57 unsigned PrefixSize
= Args
.size() - 1;
59 // And the rest of the call args.
61 // Special case: if the caller emitted the arguments right-to-left already
62 // (prior to emitting the *this argument), we're done. This happens for
63 // assignment operators.
64 Args
.addFrom(*RtlArgs
);
66 // Special case: skip first argument of CXXOperatorCall (it is "this").
67 unsigned ArgsToSkip
= isa
<CXXOperatorCallExpr
>(CE
) ? 1 : 0;
68 CGF
.EmitCallArgs(Args
, FPT
, drop_begin(CE
->arguments(), ArgsToSkip
),
69 CE
->getDirectCallee());
72 FPT
->getNumParams() == 0 &&
73 "No CallExpr specified for function with non-zero number of arguments");
75 return {required
, PrefixSize
};
78 RValue
CodeGenFunction::EmitCXXMemberOrOperatorCall(
79 const CXXMethodDecl
*MD
, const CGCallee
&Callee
,
80 ReturnValueSlot ReturnValue
,
81 llvm::Value
*This
, llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
,
82 const CallExpr
*CE
, CallArgList
*RtlArgs
) {
83 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
85 MemberCallInfo CallInfo
= commonEmitCXXMemberOrOperatorCall(
86 *this, MD
, This
, ImplicitParam
, ImplicitParamTy
, CE
, Args
, RtlArgs
);
87 auto &FnInfo
= CGM
.getTypes().arrangeCXXMethodCall(
88 Args
, FPT
, CallInfo
.ReqArgs
, CallInfo
.PrefixSize
);
89 return EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, nullptr,
90 CE
&& CE
== MustTailCall
,
91 CE
? CE
->getExprLoc() : SourceLocation());
94 RValue
CodeGenFunction::EmitCXXDestructorCall(
95 GlobalDecl Dtor
, const CGCallee
&Callee
, llvm::Value
*This
, QualType ThisTy
,
96 llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
, const CallExpr
*CE
) {
97 const CXXMethodDecl
*DtorDecl
= cast
<CXXMethodDecl
>(Dtor
.getDecl());
99 assert(!ThisTy
.isNull());
100 assert(ThisTy
->getAsCXXRecordDecl() == DtorDecl
->getParent() &&
101 "Pointer/Object mixup");
103 LangAS SrcAS
= ThisTy
.getAddressSpace();
104 LangAS DstAS
= DtorDecl
->getMethodQualifiers().getAddressSpace();
105 if (SrcAS
!= DstAS
) {
106 QualType DstTy
= DtorDecl
->getThisType();
107 llvm::Type
*NewType
= CGM
.getTypes().ConvertType(DstTy
);
108 This
= getTargetHooks().performAddrSpaceCast(*this, This
, SrcAS
, DstAS
,
113 commonEmitCXXMemberOrOperatorCall(*this, DtorDecl
, This
, ImplicitParam
,
114 ImplicitParamTy
, CE
, Args
, nullptr);
115 return EmitCall(CGM
.getTypes().arrangeCXXStructorDeclaration(Dtor
), Callee
,
116 ReturnValueSlot(), Args
, nullptr, CE
&& CE
== MustTailCall
,
117 CE
? CE
->getExprLoc() : SourceLocation
{});
120 RValue
CodeGenFunction::EmitCXXPseudoDestructorExpr(
121 const CXXPseudoDestructorExpr
*E
) {
122 QualType DestroyedType
= E
->getDestroyedType();
123 if (DestroyedType
.hasStrongOrWeakObjCLifetime()) {
124 // Automatic Reference Counting:
125 // If the pseudo-expression names a retainable object with weak or
126 // strong lifetime, the object shall be released.
127 Expr
*BaseExpr
= E
->getBase();
128 Address BaseValue
= Address::invalid();
129 Qualifiers BaseQuals
;
131 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
133 BaseValue
= EmitPointerWithAlignment(BaseExpr
);
134 const auto *PTy
= BaseExpr
->getType()->castAs
<PointerType
>();
135 BaseQuals
= PTy
->getPointeeType().getQualifiers();
137 LValue BaseLV
= EmitLValue(BaseExpr
);
138 BaseValue
= BaseLV
.getAddress(*this);
139 QualType BaseTy
= BaseExpr
->getType();
140 BaseQuals
= BaseTy
.getQualifiers();
143 switch (DestroyedType
.getObjCLifetime()) {
144 case Qualifiers::OCL_None
:
145 case Qualifiers::OCL_ExplicitNone
:
146 case Qualifiers::OCL_Autoreleasing
:
149 case Qualifiers::OCL_Strong
:
150 EmitARCRelease(Builder
.CreateLoad(BaseValue
,
151 DestroyedType
.isVolatileQualified()),
155 case Qualifiers::OCL_Weak
:
156 EmitARCDestroyWeak(BaseValue
);
160 // C++ [expr.pseudo]p1:
161 // The result shall only be used as the operand for the function call
162 // operator (), and the result of such a call has type void. The only
163 // effect is the evaluation of the postfix-expression before the dot or
165 EmitIgnoredExpr(E
->getBase());
168 return RValue::get(nullptr);
171 static CXXRecordDecl
*getCXXRecord(const Expr
*E
) {
172 QualType T
= E
->getType();
173 if (const PointerType
*PTy
= T
->getAs
<PointerType
>())
174 T
= PTy
->getPointeeType();
175 const RecordType
*Ty
= T
->castAs
<RecordType
>();
176 return cast
<CXXRecordDecl
>(Ty
->getDecl());
179 // Note: This function also emit constructor calls to support a MSVC
180 // extensions allowing explicit constructor function call.
181 RValue
CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr
*CE
,
182 ReturnValueSlot ReturnValue
) {
183 const Expr
*callee
= CE
->getCallee()->IgnoreParens();
185 if (isa
<BinaryOperator
>(callee
))
186 return EmitCXXMemberPointerCallExpr(CE
, ReturnValue
);
188 const MemberExpr
*ME
= cast
<MemberExpr
>(callee
);
189 const CXXMethodDecl
*MD
= cast
<CXXMethodDecl
>(ME
->getMemberDecl());
191 if (MD
->isStatic()) {
192 // The method is static, emit it as we would a regular call.
194 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
), GlobalDecl(MD
));
195 return EmitCall(getContext().getPointerType(MD
->getType()), callee
, CE
,
199 bool HasQualifier
= ME
->hasQualifier();
200 NestedNameSpecifier
*Qualifier
= HasQualifier
? ME
->getQualifier() : nullptr;
201 bool IsArrow
= ME
->isArrow();
202 const Expr
*Base
= ME
->getBase();
204 return EmitCXXMemberOrOperatorMemberCallExpr(
205 CE
, MD
, ReturnValue
, HasQualifier
, Qualifier
, IsArrow
, Base
);
208 RValue
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
209 const CallExpr
*CE
, const CXXMethodDecl
*MD
, ReturnValueSlot ReturnValue
,
210 bool HasQualifier
, NestedNameSpecifier
*Qualifier
, bool IsArrow
,
212 assert(isa
<CXXMemberCallExpr
>(CE
) || isa
<CXXOperatorCallExpr
>(CE
));
214 // Compute the object pointer.
215 bool CanUseVirtualCall
= MD
->isVirtual() && !HasQualifier
;
217 const CXXMethodDecl
*DevirtualizedMethod
= nullptr;
218 if (CanUseVirtualCall
&&
219 MD
->getDevirtualizedMethod(Base
, getLangOpts().AppleKext
)) {
220 const CXXRecordDecl
*BestDynamicDecl
= Base
->getBestDynamicClassType();
221 DevirtualizedMethod
= MD
->getCorrespondingMethodInClass(BestDynamicDecl
);
222 assert(DevirtualizedMethod
);
223 const CXXRecordDecl
*DevirtualizedClass
= DevirtualizedMethod
->getParent();
224 const Expr
*Inner
= Base
->IgnoreParenBaseCasts();
225 if (DevirtualizedMethod
->getReturnType().getCanonicalType() !=
226 MD
->getReturnType().getCanonicalType())
227 // If the return types are not the same, this might be a case where more
228 // code needs to run to compensate for it. For example, the derived
229 // method might return a type that inherits form from the return
230 // type of MD and has a prefix.
231 // For now we just avoid devirtualizing these covariant cases.
232 DevirtualizedMethod
= nullptr;
233 else if (getCXXRecord(Inner
) == DevirtualizedClass
)
234 // If the class of the Inner expression is where the dynamic method
235 // is defined, build the this pointer from it.
237 else if (getCXXRecord(Base
) != DevirtualizedClass
) {
238 // If the method is defined in a class that is not the best dynamic
239 // one or the one of the full expression, we would have to build
240 // a derived-to-base cast to compute the correct this pointer, but
241 // we don't have support for that yet, so do a virtual call.
242 DevirtualizedMethod
= nullptr;
246 bool TrivialForCodegen
=
247 MD
->isTrivial() || (MD
->isDefaulted() && MD
->getParent()->isUnion());
248 bool TrivialAssignment
=
250 (MD
->isCopyAssignmentOperator() || MD
->isMoveAssignmentOperator()) &&
251 !MD
->getParent()->mayInsertExtraPadding();
253 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
254 // operator before the LHS.
255 CallArgList RtlArgStorage
;
256 CallArgList
*RtlArgs
= nullptr;
257 LValue TrivialAssignmentRHS
;
258 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(CE
)) {
259 if (OCE
->isAssignmentOp()) {
260 if (TrivialAssignment
) {
261 TrivialAssignmentRHS
= EmitLValue(CE
->getArg(1));
263 RtlArgs
= &RtlArgStorage
;
264 EmitCallArgs(*RtlArgs
, MD
->getType()->castAs
<FunctionProtoType
>(),
265 drop_begin(CE
->arguments(), 1), CE
->getDirectCallee(),
266 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft
);
273 LValueBaseInfo BaseInfo
;
274 TBAAAccessInfo TBAAInfo
;
275 Address ThisValue
= EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
276 This
= MakeAddrLValue(ThisValue
, Base
->getType(), BaseInfo
, TBAAInfo
);
278 This
= EmitLValue(Base
);
281 if (const CXXConstructorDecl
*Ctor
= dyn_cast
<CXXConstructorDecl
>(MD
)) {
282 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
283 // constructing a new complete object of type Ctor.
285 assert(ReturnValue
.isNull() && "Constructor shouldn't have return value");
287 commonEmitCXXMemberOrOperatorCall(
288 *this, Ctor
, This
.getPointer(*this), /*ImplicitParam=*/nullptr,
289 /*ImplicitParamTy=*/QualType(), CE
, Args
, nullptr);
291 EmitCXXConstructorCall(Ctor
, Ctor_Complete
, /*ForVirtualBase=*/false,
292 /*Delegating=*/false, This
.getAddress(*this), Args
,
293 AggValueSlot::DoesNotOverlap
, CE
->getExprLoc(),
294 /*NewPointerIsChecked=*/false);
295 return RValue::get(nullptr);
298 if (TrivialForCodegen
) {
299 if (isa
<CXXDestructorDecl
>(MD
))
300 return RValue::get(nullptr);
302 if (TrivialAssignment
) {
303 // We don't like to generate the trivial copy/move assignment operator
304 // when it isn't necessary; just produce the proper effect here.
305 // It's important that we use the result of EmitLValue here rather than
306 // emitting call arguments, in order to preserve TBAA information from
308 LValue RHS
= isa
<CXXOperatorCallExpr
>(CE
)
309 ? TrivialAssignmentRHS
310 : EmitLValue(*CE
->arg_begin());
311 EmitAggregateAssign(This
, RHS
, CE
->getType());
312 return RValue::get(This
.getPointer(*this));
315 assert(MD
->getParent()->mayInsertExtraPadding() &&
316 "unknown trivial member function");
319 // Compute the function type we're calling.
320 const CXXMethodDecl
*CalleeDecl
=
321 DevirtualizedMethod
? DevirtualizedMethod
: MD
;
322 const CGFunctionInfo
*FInfo
= nullptr;
323 if (const auto *Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
))
324 FInfo
= &CGM
.getTypes().arrangeCXXStructorDeclaration(
325 GlobalDecl(Dtor
, Dtor_Complete
));
327 FInfo
= &CGM
.getTypes().arrangeCXXMethodDeclaration(CalleeDecl
);
329 llvm::FunctionType
*Ty
= CGM
.getTypes().GetFunctionType(*FInfo
);
331 // C++11 [class.mfct.non-static]p2:
332 // If a non-static member function of a class X is called for an object that
333 // is not of type X, or of a type derived from X, the behavior is undefined.
334 SourceLocation CallLoc
;
335 ASTContext
&C
= getContext();
337 CallLoc
= CE
->getExprLoc();
339 SanitizerSet SkippedChecks
;
340 if (const auto *CMCE
= dyn_cast
<CXXMemberCallExpr
>(CE
)) {
341 auto *IOA
= CMCE
->getImplicitObjectArgument();
342 bool IsImplicitObjectCXXThis
= IsWrappedCXXThis(IOA
);
343 if (IsImplicitObjectCXXThis
)
344 SkippedChecks
.set(SanitizerKind::Alignment
, true);
345 if (IsImplicitObjectCXXThis
|| isa
<DeclRefExpr
>(IOA
))
346 SkippedChecks
.set(SanitizerKind::Null
, true);
348 EmitTypeCheck(CodeGenFunction::TCK_MemberCall
, CallLoc
,
349 This
.getPointer(*this),
350 C
.getRecordType(CalleeDecl
->getParent()),
351 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
353 // C++ [class.virtual]p12:
354 // Explicit qualification with the scope operator (5.1) suppresses the
355 // virtual call mechanism.
357 // We also don't emit a virtual call if the base expression has a record type
358 // because then we know what the type is.
359 bool UseVirtualCall
= CanUseVirtualCall
&& !DevirtualizedMethod
;
361 if (const CXXDestructorDecl
*Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
)) {
362 assert(CE
->arg_begin() == CE
->arg_end() &&
363 "Destructor shouldn't have explicit parameters");
364 assert(ReturnValue
.isNull() && "Destructor shouldn't have return value");
365 if (UseVirtualCall
) {
366 CGM
.getCXXABI().EmitVirtualDestructorCall(*this, Dtor
, Dtor_Complete
,
367 This
.getAddress(*this),
368 cast
<CXXMemberCallExpr
>(CE
));
370 GlobalDecl
GD(Dtor
, Dtor_Complete
);
372 if (getLangOpts().AppleKext
&& Dtor
->isVirtual() && HasQualifier
)
373 Callee
= BuildAppleKextVirtualCall(Dtor
, Qualifier
, Ty
);
374 else if (!DevirtualizedMethod
)
376 CGCallee::forDirect(CGM
.getAddrOfCXXStructor(GD
, FInfo
, Ty
), GD
);
378 Callee
= CGCallee::forDirect(CGM
.GetAddrOfFunction(GD
, Ty
), GD
);
382 IsArrow
? Base
->getType()->getPointeeType() : Base
->getType();
383 EmitCXXDestructorCall(GD
, Callee
, This
.getPointer(*this), ThisTy
,
384 /*ImplicitParam=*/nullptr,
385 /*ImplicitParamTy=*/QualType(), CE
);
387 return RValue::get(nullptr);
390 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
391 // 'CalleeDecl' instead.
394 if (UseVirtualCall
) {
395 Callee
= CGCallee::forVirtual(CE
, MD
, This
.getAddress(*this), Ty
);
397 if (SanOpts
.has(SanitizerKind::CFINVCall
) &&
398 MD
->getParent()->isDynamicClass()) {
400 const CXXRecordDecl
*RD
;
401 std::tie(VTable
, RD
) = CGM
.getCXXABI().LoadVTablePtr(
402 *this, This
.getAddress(*this), CalleeDecl
->getParent());
403 EmitVTablePtrCheckForCall(RD
, VTable
, CFITCK_NVCall
, CE
->getBeginLoc());
406 if (getLangOpts().AppleKext
&& MD
->isVirtual() && HasQualifier
)
407 Callee
= BuildAppleKextVirtualCall(MD
, Qualifier
, Ty
);
408 else if (!DevirtualizedMethod
)
410 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
, Ty
), GlobalDecl(MD
));
413 CGCallee::forDirect(CGM
.GetAddrOfFunction(DevirtualizedMethod
, Ty
),
414 GlobalDecl(DevirtualizedMethod
));
418 if (MD
->isVirtual()) {
419 Address NewThisAddr
=
420 CGM
.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
421 *this, CalleeDecl
, This
.getAddress(*this), UseVirtualCall
);
422 This
.setAddress(NewThisAddr
);
425 return EmitCXXMemberOrOperatorCall(
426 CalleeDecl
, Callee
, ReturnValue
, This
.getPointer(*this),
427 /*ImplicitParam=*/nullptr, QualType(), CE
, RtlArgs
);
431 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr
*E
,
432 ReturnValueSlot ReturnValue
) {
433 const BinaryOperator
*BO
=
434 cast
<BinaryOperator
>(E
->getCallee()->IgnoreParens());
435 const Expr
*BaseExpr
= BO
->getLHS();
436 const Expr
*MemFnExpr
= BO
->getRHS();
438 const auto *MPT
= MemFnExpr
->getType()->castAs
<MemberPointerType
>();
439 const auto *FPT
= MPT
->getPointeeType()->castAs
<FunctionProtoType
>();
441 cast
<CXXRecordDecl
>(MPT
->getClass()->castAs
<RecordType
>()->getDecl());
443 // Emit the 'this' pointer.
444 Address This
= Address::invalid();
445 if (BO
->getOpcode() == BO_PtrMemI
)
446 This
= EmitPointerWithAlignment(BaseExpr
);
448 This
= EmitLValue(BaseExpr
).getAddress(*this);
450 EmitTypeCheck(TCK_MemberCall
, E
->getExprLoc(), This
.getPointer(),
451 QualType(MPT
->getClass(), 0));
453 // Get the member function pointer.
454 llvm::Value
*MemFnPtr
= EmitScalarExpr(MemFnExpr
);
456 // Ask the ABI to load the callee. Note that This is modified.
457 llvm::Value
*ThisPtrForCall
= nullptr;
459 CGM
.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO
, This
,
460 ThisPtrForCall
, MemFnPtr
, MPT
);
465 getContext().getPointerType(getContext().getTagDeclType(RD
));
467 // Push the this ptr.
468 Args
.add(RValue::get(ThisPtrForCall
), ThisType
);
470 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, 1);
472 // And the rest of the call args
473 EmitCallArgs(Args
, FPT
, E
->arguments());
474 return EmitCall(CGM
.getTypes().arrangeCXXMethodCall(Args
, FPT
, required
,
476 Callee
, ReturnValue
, Args
, nullptr, E
== MustTailCall
,
481 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr
*E
,
482 const CXXMethodDecl
*MD
,
483 ReturnValueSlot ReturnValue
) {
484 assert(MD
->isInstance() &&
485 "Trying to emit a member call expr on a static method!");
486 return EmitCXXMemberOrOperatorMemberCallExpr(
487 E
, MD
, ReturnValue
, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
488 /*IsArrow=*/false, E
->getArg(0));
491 RValue
CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr
*E
,
492 ReturnValueSlot ReturnValue
) {
493 return CGM
.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E
, ReturnValue
);
496 static void EmitNullBaseClassInitialization(CodeGenFunction
&CGF
,
498 const CXXRecordDecl
*Base
) {
502 DestPtr
= CGF
.Builder
.CreateElementBitCast(DestPtr
, CGF
.Int8Ty
);
504 const ASTRecordLayout
&Layout
= CGF
.getContext().getASTRecordLayout(Base
);
505 CharUnits NVSize
= Layout
.getNonVirtualSize();
507 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
508 // present, they are initialized by the most derived class before calling the
510 SmallVector
<std::pair
<CharUnits
, CharUnits
>, 1> Stores
;
511 Stores
.emplace_back(CharUnits::Zero(), NVSize
);
513 // Each store is split by the existence of a vbptr.
514 CharUnits VBPtrWidth
= CGF
.getPointerSize();
515 std::vector
<CharUnits
> VBPtrOffsets
=
516 CGF
.CGM
.getCXXABI().getVBPtrOffsets(Base
);
517 for (CharUnits VBPtrOffset
: VBPtrOffsets
) {
518 // Stop before we hit any virtual base pointers located in virtual bases.
519 if (VBPtrOffset
>= NVSize
)
521 std::pair
<CharUnits
, CharUnits
> LastStore
= Stores
.pop_back_val();
522 CharUnits LastStoreOffset
= LastStore
.first
;
523 CharUnits LastStoreSize
= LastStore
.second
;
525 CharUnits SplitBeforeOffset
= LastStoreOffset
;
526 CharUnits SplitBeforeSize
= VBPtrOffset
- SplitBeforeOffset
;
527 assert(!SplitBeforeSize
.isNegative() && "negative store size!");
528 if (!SplitBeforeSize
.isZero())
529 Stores
.emplace_back(SplitBeforeOffset
, SplitBeforeSize
);
531 CharUnits SplitAfterOffset
= VBPtrOffset
+ VBPtrWidth
;
532 CharUnits SplitAfterSize
= LastStoreSize
- SplitAfterOffset
;
533 assert(!SplitAfterSize
.isNegative() && "negative store size!");
534 if (!SplitAfterSize
.isZero())
535 Stores
.emplace_back(SplitAfterOffset
, SplitAfterSize
);
538 // If the type contains a pointer to data member we can't memset it to zero.
539 // Instead, create a null constant and copy it to the destination.
540 // TODO: there are other patterns besides zero that we can usefully memset,
541 // like -1, which happens to be the pattern used by member-pointers.
542 // TODO: isZeroInitializable can be over-conservative in the case where a
543 // virtual base contains a member pointer.
544 llvm::Constant
*NullConstantForBase
= CGF
.CGM
.EmitNullConstantForBase(Base
);
545 if (!NullConstantForBase
->isNullValue()) {
546 llvm::GlobalVariable
*NullVariable
= new llvm::GlobalVariable(
547 CGF
.CGM
.getModule(), NullConstantForBase
->getType(),
548 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
,
549 NullConstantForBase
, Twine());
552 std::max(Layout
.getNonVirtualAlignment(), DestPtr
.getAlignment());
553 NullVariable
->setAlignment(Align
.getAsAlign());
556 Address(CGF
.EmitCastToVoidPtr(NullVariable
), CGF
.Int8Ty
, Align
);
558 // Get and call the appropriate llvm.memcpy overload.
559 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
560 CharUnits StoreOffset
= Store
.first
;
561 CharUnits StoreSize
= Store
.second
;
562 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
563 CGF
.Builder
.CreateMemCpy(
564 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
565 CGF
.Builder
.CreateConstInBoundsByteGEP(SrcPtr
, StoreOffset
),
569 // Otherwise, just memset the whole thing to zero. This is legal
570 // because in LLVM, all default initializers (other than the ones we just
571 // handled above) are guaranteed to have a bit pattern of all zeros.
573 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
574 CharUnits StoreOffset
= Store
.first
;
575 CharUnits StoreSize
= Store
.second
;
576 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
577 CGF
.Builder
.CreateMemSet(
578 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
579 CGF
.Builder
.getInt8(0), StoreSizeVal
);
585 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr
*E
,
587 assert(!Dest
.isIgnored() && "Must have a destination!");
588 const CXXConstructorDecl
*CD
= E
->getConstructor();
590 // If we require zero initialization before (or instead of) calling the
591 // constructor, as can be the case with a non-user-provided default
592 // constructor, emit the zero initialization now, unless destination is
594 if (E
->requiresZeroInitialization() && !Dest
.isZeroed()) {
595 switch (E
->getConstructionKind()) {
596 case CXXConstructExpr::CK_Delegating
:
597 case CXXConstructExpr::CK_Complete
:
598 EmitNullInitialization(Dest
.getAddress(), E
->getType());
600 case CXXConstructExpr::CK_VirtualBase
:
601 case CXXConstructExpr::CK_NonVirtualBase
:
602 EmitNullBaseClassInitialization(*this, Dest
.getAddress(),
608 // If this is a call to a trivial default constructor, do nothing.
609 if (CD
->isTrivial() && CD
->isDefaultConstructor())
612 // Elide the constructor if we're constructing from a temporary.
613 if (getLangOpts().ElideConstructors
&& E
->isElidable()) {
614 // FIXME: This only handles the simplest case, where the source object
615 // is passed directly as the first argument to the constructor.
616 // This should also handle stepping though implicit casts and
617 // conversion sequences which involve two steps, with a
618 // conversion operator followed by a converting constructor.
619 const Expr
*SrcObj
= E
->getArg(0);
620 assert(SrcObj
->isTemporaryObject(getContext(), CD
->getParent()));
622 getContext().hasSameUnqualifiedType(E
->getType(), SrcObj
->getType()));
623 EmitAggExpr(SrcObj
, Dest
);
627 if (const ArrayType
*arrayType
628 = getContext().getAsArrayType(E
->getType())) {
629 EmitCXXAggrConstructorCall(CD
, arrayType
, Dest
.getAddress(), E
,
630 Dest
.isSanitizerChecked());
632 CXXCtorType Type
= Ctor_Complete
;
633 bool ForVirtualBase
= false;
634 bool Delegating
= false;
636 switch (E
->getConstructionKind()) {
637 case CXXConstructExpr::CK_Delegating
:
638 // We should be emitting a constructor; GlobalDecl will assert this
639 Type
= CurGD
.getCtorType();
643 case CXXConstructExpr::CK_Complete
:
644 Type
= Ctor_Complete
;
647 case CXXConstructExpr::CK_VirtualBase
:
648 ForVirtualBase
= true;
651 case CXXConstructExpr::CK_NonVirtualBase
:
655 // Call the constructor.
656 EmitCXXConstructorCall(CD
, Type
, ForVirtualBase
, Delegating
, Dest
, E
);
660 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest
, Address Src
,
662 if (const ExprWithCleanups
*E
= dyn_cast
<ExprWithCleanups
>(Exp
))
663 Exp
= E
->getSubExpr();
664 assert(isa
<CXXConstructExpr
>(Exp
) &&
665 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
666 const CXXConstructExpr
* E
= cast
<CXXConstructExpr
>(Exp
);
667 const CXXConstructorDecl
*CD
= E
->getConstructor();
668 RunCleanupsScope
Scope(*this);
670 // If we require zero initialization before (or instead of) calling the
671 // constructor, as can be the case with a non-user-provided default
672 // constructor, emit the zero initialization now.
673 // FIXME. Do I still need this for a copy ctor synthesis?
674 if (E
->requiresZeroInitialization())
675 EmitNullInitialization(Dest
, E
->getType());
677 assert(!getContext().getAsConstantArrayType(E
->getType())
678 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
679 EmitSynthesizedCXXCopyCtorCall(CD
, Dest
, Src
, E
);
682 static CharUnits
CalculateCookiePadding(CodeGenFunction
&CGF
,
683 const CXXNewExpr
*E
) {
685 return CharUnits::Zero();
687 // No cookie is required if the operator new[] being used is the
688 // reserved placement operator new[].
689 if (E
->getOperatorNew()->isReservedGlobalPlacementOperator())
690 return CharUnits::Zero();
692 return CGF
.CGM
.getCXXABI().GetArrayCookieSize(E
);
695 static llvm::Value
*EmitCXXNewAllocSize(CodeGenFunction
&CGF
,
697 unsigned minElements
,
698 llvm::Value
*&numElements
,
699 llvm::Value
*&sizeWithoutCookie
) {
700 QualType type
= e
->getAllocatedType();
703 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
705 = llvm::ConstantInt::get(CGF
.SizeTy
, typeSize
.getQuantity());
706 return sizeWithoutCookie
;
709 // The width of size_t.
710 unsigned sizeWidth
= CGF
.SizeTy
->getBitWidth();
712 // Figure out the cookie size.
713 llvm::APInt
cookieSize(sizeWidth
,
714 CalculateCookiePadding(CGF
, e
).getQuantity());
716 // Emit the array size expression.
717 // We multiply the size of all dimensions for NumElements.
718 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
720 ConstantEmitter(CGF
).tryEmitAbstract(*e
->getArraySize(), e
->getType());
722 numElements
= CGF
.EmitScalarExpr(*e
->getArraySize());
723 assert(isa
<llvm::IntegerType
>(numElements
->getType()));
725 // The number of elements can be have an arbitrary integer type;
726 // essentially, we need to multiply it by a constant factor, add a
727 // cookie size, and verify that the result is representable as a
728 // size_t. That's just a gloss, though, and it's wrong in one
729 // important way: if the count is negative, it's an error even if
730 // the cookie size would bring the total size >= 0.
732 = (*e
->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
733 llvm::IntegerType
*numElementsType
734 = cast
<llvm::IntegerType
>(numElements
->getType());
735 unsigned numElementsWidth
= numElementsType
->getBitWidth();
737 // Compute the constant factor.
738 llvm::APInt
arraySizeMultiplier(sizeWidth
, 1);
739 while (const ConstantArrayType
*CAT
740 = CGF
.getContext().getAsConstantArrayType(type
)) {
741 type
= CAT
->getElementType();
742 arraySizeMultiplier
*= CAT
->getSize();
745 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
746 llvm::APInt
typeSizeMultiplier(sizeWidth
, typeSize
.getQuantity());
747 typeSizeMultiplier
*= arraySizeMultiplier
;
749 // This will be a size_t.
752 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
753 // Don't bloat the -O0 code.
754 if (llvm::ConstantInt
*numElementsC
=
755 dyn_cast
<llvm::ConstantInt
>(numElements
)) {
756 const llvm::APInt
&count
= numElementsC
->getValue();
758 bool hasAnyOverflow
= false;
760 // If 'count' was a negative number, it's an overflow.
761 if (isSigned
&& count
.isNegative())
762 hasAnyOverflow
= true;
764 // We want to do all this arithmetic in size_t. If numElements is
765 // wider than that, check whether it's already too big, and if so,
767 else if (numElementsWidth
> sizeWidth
&&
768 numElementsWidth
- sizeWidth
> count
.countLeadingZeros())
769 hasAnyOverflow
= true;
771 // Okay, compute a count at the right width.
772 llvm::APInt adjustedCount
= count
.zextOrTrunc(sizeWidth
);
774 // If there is a brace-initializer, we cannot allocate fewer elements than
775 // there are initializers. If we do, that's treated like an overflow.
776 if (adjustedCount
.ult(minElements
))
777 hasAnyOverflow
= true;
779 // Scale numElements by that. This might overflow, but we don't
780 // care because it only overflows if allocationSize does, too, and
781 // if that overflows then we shouldn't use this.
782 numElements
= llvm::ConstantInt::get(CGF
.SizeTy
,
783 adjustedCount
* arraySizeMultiplier
);
785 // Compute the size before cookie, and track whether it overflowed.
787 llvm::APInt allocationSize
788 = adjustedCount
.umul_ov(typeSizeMultiplier
, overflow
);
789 hasAnyOverflow
|= overflow
;
791 // Add in the cookie, and check whether it's overflowed.
792 if (cookieSize
!= 0) {
793 // Save the current size without a cookie. This shouldn't be
794 // used if there was overflow.
795 sizeWithoutCookie
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
797 allocationSize
= allocationSize
.uadd_ov(cookieSize
, overflow
);
798 hasAnyOverflow
|= overflow
;
801 // On overflow, produce a -1 so operator new will fail.
802 if (hasAnyOverflow
) {
803 size
= llvm::Constant::getAllOnesValue(CGF
.SizeTy
);
805 size
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
808 // Otherwise, we might need to use the overflow intrinsics.
810 // There are up to five conditions we need to test for:
811 // 1) if isSigned, we need to check whether numElements is negative;
812 // 2) if numElementsWidth > sizeWidth, we need to check whether
813 // numElements is larger than something representable in size_t;
814 // 3) if minElements > 0, we need to check whether numElements is smaller
816 // 4) we need to compute
817 // sizeWithoutCookie := numElements * typeSizeMultiplier
818 // and check whether it overflows; and
819 // 5) if we need a cookie, we need to compute
820 // size := sizeWithoutCookie + cookieSize
821 // and check whether it overflows.
823 llvm::Value
*hasOverflow
= nullptr;
825 // If numElementsWidth > sizeWidth, then one way or another, we're
826 // going to have to do a comparison for (2), and this happens to
827 // take care of (1), too.
828 if (numElementsWidth
> sizeWidth
) {
829 llvm::APInt
threshold(numElementsWidth
, 1);
830 threshold
<<= sizeWidth
;
832 llvm::Value
*thresholdV
833 = llvm::ConstantInt::get(numElementsType
, threshold
);
835 hasOverflow
= CGF
.Builder
.CreateICmpUGE(numElements
, thresholdV
);
836 numElements
= CGF
.Builder
.CreateTrunc(numElements
, CGF
.SizeTy
);
838 // Otherwise, if we're signed, we want to sext up to size_t.
839 } else if (isSigned
) {
840 if (numElementsWidth
< sizeWidth
)
841 numElements
= CGF
.Builder
.CreateSExt(numElements
, CGF
.SizeTy
);
843 // If there's a non-1 type size multiplier, then we can do the
844 // signedness check at the same time as we do the multiply
845 // because a negative number times anything will cause an
846 // unsigned overflow. Otherwise, we have to do it here. But at least
847 // in this case, we can subsume the >= minElements check.
848 if (typeSizeMultiplier
== 1)
849 hasOverflow
= CGF
.Builder
.CreateICmpSLT(numElements
,
850 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
852 // Otherwise, zext up to size_t if necessary.
853 } else if (numElementsWidth
< sizeWidth
) {
854 numElements
= CGF
.Builder
.CreateZExt(numElements
, CGF
.SizeTy
);
857 assert(numElements
->getType() == CGF
.SizeTy
);
860 // Don't allow allocation of fewer elements than we have initializers.
862 hasOverflow
= CGF
.Builder
.CreateICmpULT(numElements
,
863 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
864 } else if (numElementsWidth
> sizeWidth
) {
865 // The other existing overflow subsumes this check.
866 // We do an unsigned comparison, since any signed value < -1 is
867 // taken care of either above or below.
868 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
,
869 CGF
.Builder
.CreateICmpULT(numElements
,
870 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
)));
876 // Multiply by the type size if necessary. This multiplier
877 // includes all the factors for nested arrays.
879 // This step also causes numElements to be scaled up by the
880 // nested-array factor if necessary. Overflow on this computation
881 // can be ignored because the result shouldn't be used if
883 if (typeSizeMultiplier
!= 1) {
884 llvm::Function
*umul_with_overflow
885 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::umul_with_overflow
, CGF
.SizeTy
);
888 llvm::ConstantInt::get(CGF
.SizeTy
, typeSizeMultiplier
);
889 llvm::Value
*result
=
890 CGF
.Builder
.CreateCall(umul_with_overflow
, {size
, tsmV
});
892 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
894 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
896 hasOverflow
= overflowed
;
898 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
900 // Also scale up numElements by the array size multiplier.
901 if (arraySizeMultiplier
!= 1) {
902 // If the base element type size is 1, then we can re-use the
903 // multiply we just did.
904 if (typeSize
.isOne()) {
905 assert(arraySizeMultiplier
== typeSizeMultiplier
);
908 // Otherwise we need a separate multiply.
911 llvm::ConstantInt::get(CGF
.SizeTy
, arraySizeMultiplier
);
912 numElements
= CGF
.Builder
.CreateMul(numElements
, asmV
);
916 // numElements doesn't need to be scaled.
917 assert(arraySizeMultiplier
== 1);
920 // Add in the cookie size if necessary.
921 if (cookieSize
!= 0) {
922 sizeWithoutCookie
= size
;
924 llvm::Function
*uadd_with_overflow
925 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::uadd_with_overflow
, CGF
.SizeTy
);
927 llvm::Value
*cookieSizeV
= llvm::ConstantInt::get(CGF
.SizeTy
, cookieSize
);
928 llvm::Value
*result
=
929 CGF
.Builder
.CreateCall(uadd_with_overflow
, {size
, cookieSizeV
});
931 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
933 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
935 hasOverflow
= overflowed
;
937 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
940 // If we had any possibility of dynamic overflow, make a select to
941 // overwrite 'size' with an all-ones value, which should cause
942 // operator new to throw.
944 size
= CGF
.Builder
.CreateSelect(hasOverflow
,
945 llvm::Constant::getAllOnesValue(CGF
.SizeTy
),
950 sizeWithoutCookie
= size
;
952 assert(sizeWithoutCookie
&& "didn't set sizeWithoutCookie?");
957 static void StoreAnyExprIntoOneUnit(CodeGenFunction
&CGF
, const Expr
*Init
,
958 QualType AllocType
, Address NewPtr
,
959 AggValueSlot::Overlap_t MayOverlap
) {
960 // FIXME: Refactor with EmitExprAsInit.
961 switch (CGF
.getEvaluationKind(AllocType
)) {
963 CGF
.EmitScalarInit(Init
, nullptr,
964 CGF
.MakeAddrLValue(NewPtr
, AllocType
), false);
967 CGF
.EmitComplexExprIntoLValue(Init
, CGF
.MakeAddrLValue(NewPtr
, AllocType
),
970 case TEK_Aggregate
: {
972 = AggValueSlot::forAddr(NewPtr
, AllocType
.getQualifiers(),
973 AggValueSlot::IsDestructed
,
974 AggValueSlot::DoesNotNeedGCBarriers
,
975 AggValueSlot::IsNotAliased
,
976 MayOverlap
, AggValueSlot::IsNotZeroed
,
977 AggValueSlot::IsSanitizerChecked
);
978 CGF
.EmitAggExpr(Init
, Slot
);
982 llvm_unreachable("bad evaluation kind");
985 void CodeGenFunction::EmitNewArrayInitializer(
986 const CXXNewExpr
*E
, QualType ElementType
, llvm::Type
*ElementTy
,
987 Address BeginPtr
, llvm::Value
*NumElements
,
988 llvm::Value
*AllocSizeWithoutCookie
) {
989 // If we have a type with trivial initialization and no initializer,
990 // there's nothing to do.
991 if (!E
->hasInitializer())
994 Address CurPtr
= BeginPtr
;
996 unsigned InitListElements
= 0;
998 const Expr
*Init
= E
->getInitializer();
999 Address EndOfInit
= Address::invalid();
1000 QualType::DestructionKind DtorKind
= ElementType
.isDestructedType();
1001 EHScopeStack::stable_iterator Cleanup
;
1002 llvm::Instruction
*CleanupDominator
= nullptr;
1004 CharUnits ElementSize
= getContext().getTypeSizeInChars(ElementType
);
1005 CharUnits ElementAlign
=
1006 BeginPtr
.getAlignment().alignmentOfArrayElement(ElementSize
);
1008 // Attempt to perform zero-initialization using memset.
1009 auto TryMemsetInitialization
= [&]() -> bool {
1010 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1011 // we can initialize with a memset to -1.
1012 if (!CGM
.getTypes().isZeroInitializable(ElementType
))
1015 // Optimization: since zero initialization will just set the memory
1016 // to all zeroes, generate a single memset to do it in one shot.
1018 // Subtract out the size of any elements we've already initialized.
1019 auto *RemainingSize
= AllocSizeWithoutCookie
;
1020 if (InitListElements
) {
1021 // We know this can't overflow; we check this when doing the allocation.
1022 auto *InitializedSize
= llvm::ConstantInt::get(
1023 RemainingSize
->getType(),
1024 getContext().getTypeSizeInChars(ElementType
).getQuantity() *
1026 RemainingSize
= Builder
.CreateSub(RemainingSize
, InitializedSize
);
1029 // Create the memset.
1030 Builder
.CreateMemSet(CurPtr
, Builder
.getInt8(0), RemainingSize
, false);
1034 // If the initializer is an initializer list, first do the explicit elements.
1035 if (const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(Init
)) {
1036 // Initializing from a (braced) string literal is a special case; the init
1037 // list element does not initialize a (single) array element.
1038 if (ILE
->isStringLiteralInit()) {
1039 // Initialize the initial portion of length equal to that of the string
1040 // literal. The allocation must be for at least this much; we emitted a
1041 // check for that earlier.
1043 AggValueSlot::forAddr(CurPtr
, ElementType
.getQualifiers(),
1044 AggValueSlot::IsDestructed
,
1045 AggValueSlot::DoesNotNeedGCBarriers
,
1046 AggValueSlot::IsNotAliased
,
1047 AggValueSlot::DoesNotOverlap
,
1048 AggValueSlot::IsNotZeroed
,
1049 AggValueSlot::IsSanitizerChecked
);
1050 EmitAggExpr(ILE
->getInit(0), Slot
);
1052 // Move past these elements.
1054 cast
<ConstantArrayType
>(ILE
->getType()->getAsArrayTypeUnsafe())
1055 ->getSize().getZExtValue();
1056 CurPtr
= Builder
.CreateConstInBoundsGEP(
1057 CurPtr
, InitListElements
, "string.init.end");
1059 // Zero out the rest, if any remain.
1060 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1061 if (!ConstNum
|| !ConstNum
->equalsInt(InitListElements
)) {
1062 bool OK
= TryMemsetInitialization();
1064 assert(OK
&& "couldn't memset character type?");
1069 InitListElements
= ILE
->getNumInits();
1071 // If this is a multi-dimensional array new, we will initialize multiple
1072 // elements with each init list element.
1073 QualType AllocType
= E
->getAllocatedType();
1074 if (const ConstantArrayType
*CAT
= dyn_cast_or_null
<ConstantArrayType
>(
1075 AllocType
->getAsArrayTypeUnsafe())) {
1076 ElementTy
= ConvertTypeForMem(AllocType
);
1077 CurPtr
= Builder
.CreateElementBitCast(CurPtr
, ElementTy
);
1078 InitListElements
*= getContext().getConstantArrayElementCount(CAT
);
1081 // Enter a partial-destruction Cleanup if necessary.
1082 if (needsEHCleanup(DtorKind
)) {
1083 // In principle we could tell the Cleanup where we are more
1084 // directly, but the control flow can get so varied here that it
1085 // would actually be quite complex. Therefore we go through an
1087 EndOfInit
= CreateTempAlloca(BeginPtr
.getType(), getPointerAlign(),
1089 CleanupDominator
= Builder
.CreateStore(BeginPtr
.getPointer(), EndOfInit
);
1090 pushIrregularPartialArrayCleanup(BeginPtr
.getPointer(), EndOfInit
,
1091 ElementType
, ElementAlign
,
1092 getDestroyer(DtorKind
));
1093 Cleanup
= EHStack
.stable_begin();
1096 CharUnits StartAlign
= CurPtr
.getAlignment();
1097 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
) {
1098 // Tell the cleanup that it needs to destroy up to this
1099 // element. TODO: some of these stores can be trivially
1100 // observed to be unnecessary.
1101 if (EndOfInit
.isValid()) {
1103 Builder
.CreateBitCast(CurPtr
.getPointer(), BeginPtr
.getType());
1104 Builder
.CreateStore(FinishedPtr
, EndOfInit
);
1106 // FIXME: If the last initializer is an incomplete initializer list for
1107 // an array, and we have an array filler, we can fold together the two
1108 // initialization loops.
1109 StoreAnyExprIntoOneUnit(*this, ILE
->getInit(i
),
1110 ILE
->getInit(i
)->getType(), CurPtr
,
1111 AggValueSlot::DoesNotOverlap
);
1112 CurPtr
= Address(Builder
.CreateInBoundsGEP(
1113 CurPtr
.getElementType(), CurPtr
.getPointer(),
1114 Builder
.getSize(1), "array.exp.next"),
1115 CurPtr
.getElementType(),
1116 StartAlign
.alignmentAtOffset((i
+ 1) * ElementSize
));
1119 // The remaining elements are filled with the array filler expression.
1120 Init
= ILE
->getArrayFiller();
1122 // Extract the initializer for the individual array elements by pulling
1123 // out the array filler from all the nested initializer lists. This avoids
1124 // generating a nested loop for the initialization.
1125 while (Init
&& Init
->getType()->isConstantArrayType()) {
1126 auto *SubILE
= dyn_cast
<InitListExpr
>(Init
);
1129 assert(SubILE
->getNumInits() == 0 && "explicit inits in array filler?");
1130 Init
= SubILE
->getArrayFiller();
1133 // Switch back to initializing one base element at a time.
1134 CurPtr
= Builder
.CreateElementBitCast(CurPtr
, BeginPtr
.getElementType());
1137 // If all elements have already been initialized, skip any further
1139 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1140 if (ConstNum
&& ConstNum
->getZExtValue() <= InitListElements
) {
1141 // If there was a Cleanup, deactivate it.
1142 if (CleanupDominator
)
1143 DeactivateCleanupBlock(Cleanup
, CleanupDominator
);
1147 assert(Init
&& "have trailing elements to initialize but no initializer");
1149 // If this is a constructor call, try to optimize it out, and failing that
1150 // emit a single loop to initialize all remaining elements.
1151 if (const CXXConstructExpr
*CCE
= dyn_cast
<CXXConstructExpr
>(Init
)) {
1152 CXXConstructorDecl
*Ctor
= CCE
->getConstructor();
1153 if (Ctor
->isTrivial()) {
1154 // If new expression did not specify value-initialization, then there
1155 // is no initialization.
1156 if (!CCE
->requiresZeroInitialization() || Ctor
->getParent()->isEmpty())
1159 if (TryMemsetInitialization())
1163 // Store the new Cleanup position for irregular Cleanups.
1165 // FIXME: Share this cleanup with the constructor call emission rather than
1166 // having it create a cleanup of its own.
1167 if (EndOfInit
.isValid())
1168 Builder
.CreateStore(CurPtr
.getPointer(), EndOfInit
);
1170 // Emit a constructor call loop to initialize the remaining elements.
1171 if (InitListElements
)
1172 NumElements
= Builder
.CreateSub(
1174 llvm::ConstantInt::get(NumElements
->getType(), InitListElements
));
1175 EmitCXXAggrConstructorCall(Ctor
, NumElements
, CurPtr
, CCE
,
1176 /*NewPointerIsChecked*/true,
1177 CCE
->requiresZeroInitialization());
1181 // If this is value-initialization, we can usually use memset.
1182 ImplicitValueInitExpr
IVIE(ElementType
);
1183 if (isa
<ImplicitValueInitExpr
>(Init
)) {
1184 if (TryMemsetInitialization())
1187 // Switch to an ImplicitValueInitExpr for the element type. This handles
1188 // only one case: multidimensional array new of pointers to members. In
1189 // all other cases, we already have an initializer for the array element.
1193 // At this point we should have found an initializer for the individual
1194 // elements of the array.
1195 assert(getContext().hasSameUnqualifiedType(ElementType
, Init
->getType()) &&
1196 "got wrong type of element to initialize");
1198 // If we have an empty initializer list, we can usually use memset.
1199 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
))
1200 if (ILE
->getNumInits() == 0 && TryMemsetInitialization())
1203 // If we have a struct whose every field is value-initialized, we can
1204 // usually use memset.
1205 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
)) {
1206 if (const RecordType
*RType
= ILE
->getType()->getAs
<RecordType
>()) {
1207 if (RType
->getDecl()->isStruct()) {
1208 unsigned NumElements
= 0;
1209 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RType
->getDecl()))
1210 NumElements
= CXXRD
->getNumBases();
1211 for (auto *Field
: RType
->getDecl()->fields())
1212 if (!Field
->isUnnamedBitfield())
1214 // FIXME: Recurse into nested InitListExprs.
1215 if (ILE
->getNumInits() == NumElements
)
1216 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1217 if (!isa
<ImplicitValueInitExpr
>(ILE
->getInit(i
)))
1219 if (ILE
->getNumInits() == NumElements
&& TryMemsetInitialization())
1225 // Create the loop blocks.
1226 llvm::BasicBlock
*EntryBB
= Builder
.GetInsertBlock();
1227 llvm::BasicBlock
*LoopBB
= createBasicBlock("new.loop");
1228 llvm::BasicBlock
*ContBB
= createBasicBlock("new.loop.end");
1230 // Find the end of the array, hoisted out of the loop.
1231 llvm::Value
*EndPtr
=
1232 Builder
.CreateInBoundsGEP(BeginPtr
.getElementType(), BeginPtr
.getPointer(),
1233 NumElements
, "array.end");
1235 // If the number of elements isn't constant, we have to now check if there is
1236 // anything left to initialize.
1238 llvm::Value
*IsEmpty
=
1239 Builder
.CreateICmpEQ(CurPtr
.getPointer(), EndPtr
, "array.isempty");
1240 Builder
.CreateCondBr(IsEmpty
, ContBB
, LoopBB
);
1246 // Set up the current-element phi.
1247 llvm::PHINode
*CurPtrPhi
=
1248 Builder
.CreatePHI(CurPtr
.getType(), 2, "array.cur");
1249 CurPtrPhi
->addIncoming(CurPtr
.getPointer(), EntryBB
);
1251 CurPtr
= Address(CurPtrPhi
, CurPtr
.getElementType(), ElementAlign
);
1253 // Store the new Cleanup position for irregular Cleanups.
1254 if (EndOfInit
.isValid())
1255 Builder
.CreateStore(CurPtr
.getPointer(), EndOfInit
);
1257 // Enter a partial-destruction Cleanup if necessary.
1258 if (!CleanupDominator
&& needsEHCleanup(DtorKind
)) {
1259 pushRegularPartialArrayCleanup(BeginPtr
.getPointer(), CurPtr
.getPointer(),
1260 ElementType
, ElementAlign
,
1261 getDestroyer(DtorKind
));
1262 Cleanup
= EHStack
.stable_begin();
1263 CleanupDominator
= Builder
.CreateUnreachable();
1266 // Emit the initializer into this element.
1267 StoreAnyExprIntoOneUnit(*this, Init
, Init
->getType(), CurPtr
,
1268 AggValueSlot::DoesNotOverlap
);
1270 // Leave the Cleanup if we entered one.
1271 if (CleanupDominator
) {
1272 DeactivateCleanupBlock(Cleanup
, CleanupDominator
);
1273 CleanupDominator
->eraseFromParent();
1276 // Advance to the next element by adjusting the pointer type as necessary.
1277 llvm::Value
*NextPtr
=
1278 Builder
.CreateConstInBoundsGEP1_32(ElementTy
, CurPtr
.getPointer(), 1,
1281 // Check whether we've gotten to the end of the array and, if so,
1283 llvm::Value
*IsEnd
= Builder
.CreateICmpEQ(NextPtr
, EndPtr
, "array.atend");
1284 Builder
.CreateCondBr(IsEnd
, ContBB
, LoopBB
);
1285 CurPtrPhi
->addIncoming(NextPtr
, Builder
.GetInsertBlock());
1290 static void EmitNewInitializer(CodeGenFunction
&CGF
, const CXXNewExpr
*E
,
1291 QualType ElementType
, llvm::Type
*ElementTy
,
1292 Address NewPtr
, llvm::Value
*NumElements
,
1293 llvm::Value
*AllocSizeWithoutCookie
) {
1294 ApplyDebugLocation
DL(CGF
, E
);
1296 CGF
.EmitNewArrayInitializer(E
, ElementType
, ElementTy
, NewPtr
, NumElements
,
1297 AllocSizeWithoutCookie
);
1298 else if (const Expr
*Init
= E
->getInitializer())
1299 StoreAnyExprIntoOneUnit(CGF
, Init
, E
->getAllocatedType(), NewPtr
,
1300 AggValueSlot::DoesNotOverlap
);
1303 /// Emit a call to an operator new or operator delete function, as implicitly
1304 /// created by new-expressions and delete-expressions.
1305 static RValue
EmitNewDeleteCall(CodeGenFunction
&CGF
,
1306 const FunctionDecl
*CalleeDecl
,
1307 const FunctionProtoType
*CalleeType
,
1308 const CallArgList
&Args
) {
1309 llvm::CallBase
*CallOrInvoke
;
1310 llvm::Constant
*CalleePtr
= CGF
.CGM
.GetAddrOfFunction(CalleeDecl
);
1311 CGCallee Callee
= CGCallee::forDirect(CalleePtr
, GlobalDecl(CalleeDecl
));
1313 CGF
.EmitCall(CGF
.CGM
.getTypes().arrangeFreeFunctionCall(
1314 Args
, CalleeType
, /*ChainCall=*/false),
1315 Callee
, ReturnValueSlot(), Args
, &CallOrInvoke
);
1317 /// C++1y [expr.new]p10:
1318 /// [In a new-expression,] an implementation is allowed to omit a call
1319 /// to a replaceable global allocation function.
1321 /// We model such elidable calls with the 'builtin' attribute.
1322 llvm::Function
*Fn
= dyn_cast
<llvm::Function
>(CalleePtr
);
1323 if (CalleeDecl
->isReplaceableGlobalAllocationFunction() &&
1324 Fn
&& Fn
->hasFnAttribute(llvm::Attribute::NoBuiltin
)) {
1325 CallOrInvoke
->addFnAttr(llvm::Attribute::Builtin
);
1331 RValue
CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType
*Type
,
1332 const CallExpr
*TheCall
,
1335 EmitCallArgs(Args
, Type
, TheCall
->arguments());
1336 // Find the allocation or deallocation function that we're calling.
1337 ASTContext
&Ctx
= getContext();
1338 DeclarationName Name
= Ctx
.DeclarationNames
1339 .getCXXOperatorName(IsDelete
? OO_Delete
: OO_New
);
1341 for (auto *Decl
: Ctx
.getTranslationUnitDecl()->lookup(Name
))
1342 if (auto *FD
= dyn_cast
<FunctionDecl
>(Decl
))
1343 if (Ctx
.hasSameType(FD
->getType(), QualType(Type
, 0)))
1344 return EmitNewDeleteCall(*this, FD
, Type
, Args
);
1345 llvm_unreachable("predeclared global operator new/delete is missing");
1349 /// The parameters to pass to a usual operator delete.
1350 struct UsualDeleteParams
{
1351 bool DestroyingDelete
= false;
1353 bool Alignment
= false;
1357 static UsualDeleteParams
getUsualDeleteParams(const FunctionDecl
*FD
) {
1358 UsualDeleteParams Params
;
1360 const FunctionProtoType
*FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
1361 auto AI
= FPT
->param_type_begin(), AE
= FPT
->param_type_end();
1363 // The first argument is always a void*.
1366 // The next parameter may be a std::destroying_delete_t.
1367 if (FD
->isDestroyingOperatorDelete()) {
1368 Params
.DestroyingDelete
= true;
1373 // Figure out what other parameters we should be implicitly passing.
1374 if (AI
!= AE
&& (*AI
)->isIntegerType()) {
1379 if (AI
!= AE
&& (*AI
)->isAlignValT()) {
1380 Params
.Alignment
= true;
1384 assert(AI
== AE
&& "unexpected usual deallocation function parameter");
1389 /// A cleanup to call the given 'operator delete' function upon abnormal
1390 /// exit from a new expression. Templated on a traits type that deals with
1391 /// ensuring that the arguments dominate the cleanup if necessary.
1392 template<typename Traits
>
1393 class CallDeleteDuringNew final
: public EHScopeStack::Cleanup
{
1394 /// Type used to hold llvm::Value*s.
1395 typedef typename
Traits::ValueTy ValueTy
;
1396 /// Type used to hold RValues.
1397 typedef typename
Traits::RValueTy RValueTy
;
1398 struct PlacementArg
{
1403 unsigned NumPlacementArgs
: 31;
1404 unsigned PassAlignmentToPlacementDelete
: 1;
1405 const FunctionDecl
*OperatorDelete
;
1408 CharUnits AllocAlign
;
1410 PlacementArg
*getPlacementArgs() {
1411 return reinterpret_cast<PlacementArg
*>(this + 1);
1415 static size_t getExtraSize(size_t NumPlacementArgs
) {
1416 return NumPlacementArgs
* sizeof(PlacementArg
);
1419 CallDeleteDuringNew(size_t NumPlacementArgs
,
1420 const FunctionDecl
*OperatorDelete
, ValueTy Ptr
,
1421 ValueTy AllocSize
, bool PassAlignmentToPlacementDelete
,
1422 CharUnits AllocAlign
)
1423 : NumPlacementArgs(NumPlacementArgs
),
1424 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete
),
1425 OperatorDelete(OperatorDelete
), Ptr(Ptr
), AllocSize(AllocSize
),
1426 AllocAlign(AllocAlign
) {}
1428 void setPlacementArg(unsigned I
, RValueTy Arg
, QualType Type
) {
1429 assert(I
< NumPlacementArgs
&& "index out of range");
1430 getPlacementArgs()[I
] = {Arg
, Type
};
1433 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1434 const auto *FPT
= OperatorDelete
->getType()->castAs
<FunctionProtoType
>();
1435 CallArgList DeleteArgs
;
1437 // The first argument is always a void* (or C* for a destroying operator
1438 // delete for class type C).
1439 DeleteArgs
.add(Traits::get(CGF
, Ptr
), FPT
->getParamType(0));
1441 // Figure out what other parameters we should be implicitly passing.
1442 UsualDeleteParams Params
;
1443 if (NumPlacementArgs
) {
1444 // A placement deallocation function is implicitly passed an alignment
1445 // if the placement allocation function was, but is never passed a size.
1446 Params
.Alignment
= PassAlignmentToPlacementDelete
;
1448 // For a non-placement new-expression, 'operator delete' can take a
1449 // size and/or an alignment if it has the right parameters.
1450 Params
= getUsualDeleteParams(OperatorDelete
);
1453 assert(!Params
.DestroyingDelete
&&
1454 "should not call destroying delete in a new-expression");
1456 // The second argument can be a std::size_t (for non-placement delete).
1458 DeleteArgs
.add(Traits::get(CGF
, AllocSize
),
1459 CGF
.getContext().getSizeType());
1461 // The next (second or third) argument can be a std::align_val_t, which
1462 // is an enum whose underlying type is std::size_t.
1463 // FIXME: Use the right type as the parameter type. Note that in a call
1464 // to operator delete(size_t, ...), we may not have it available.
1465 if (Params
.Alignment
)
1466 DeleteArgs
.add(RValue::get(llvm::ConstantInt::get(
1467 CGF
.SizeTy
, AllocAlign
.getQuantity())),
1468 CGF
.getContext().getSizeType());
1470 // Pass the rest of the arguments, which must match exactly.
1471 for (unsigned I
= 0; I
!= NumPlacementArgs
; ++I
) {
1472 auto Arg
= getPlacementArgs()[I
];
1473 DeleteArgs
.add(Traits::get(CGF
, Arg
.ArgValue
), Arg
.ArgType
);
1476 // Call 'operator delete'.
1477 EmitNewDeleteCall(CGF
, OperatorDelete
, FPT
, DeleteArgs
);
1482 /// Enter a cleanup to call 'operator delete' if the initializer in a
1483 /// new-expression throws.
1484 static void EnterNewDeleteCleanup(CodeGenFunction
&CGF
,
1485 const CXXNewExpr
*E
,
1487 llvm::Value
*AllocSize
,
1488 CharUnits AllocAlign
,
1489 const CallArgList
&NewArgs
) {
1490 unsigned NumNonPlacementArgs
= E
->passAlignment() ? 2 : 1;
1492 // If we're not inside a conditional branch, then the cleanup will
1493 // dominate and we can do the easier (and more efficient) thing.
1494 if (!CGF
.isInConditionalBranch()) {
1495 struct DirectCleanupTraits
{
1496 typedef llvm::Value
*ValueTy
;
1497 typedef RValue RValueTy
;
1498 static RValue
get(CodeGenFunction
&, ValueTy V
) { return RValue::get(V
); }
1499 static RValue
get(CodeGenFunction
&, RValueTy V
) { return V
; }
1502 typedef CallDeleteDuringNew
<DirectCleanupTraits
> DirectCleanup
;
1504 DirectCleanup
*Cleanup
= CGF
.EHStack
1505 .pushCleanupWithExtra
<DirectCleanup
>(EHCleanup
,
1506 E
->getNumPlacementArgs(),
1507 E
->getOperatorDelete(),
1508 NewPtr
.getPointer(),
1512 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1513 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1514 Cleanup
->setPlacementArg(I
, Arg
.getRValue(CGF
), Arg
.Ty
);
1520 // Otherwise, we need to save all this stuff.
1521 DominatingValue
<RValue
>::saved_type SavedNewPtr
=
1522 DominatingValue
<RValue
>::save(CGF
, RValue::get(NewPtr
.getPointer()));
1523 DominatingValue
<RValue
>::saved_type SavedAllocSize
=
1524 DominatingValue
<RValue
>::save(CGF
, RValue::get(AllocSize
));
1526 struct ConditionalCleanupTraits
{
1527 typedef DominatingValue
<RValue
>::saved_type ValueTy
;
1528 typedef DominatingValue
<RValue
>::saved_type RValueTy
;
1529 static RValue
get(CodeGenFunction
&CGF
, ValueTy V
) {
1530 return V
.restore(CGF
);
1533 typedef CallDeleteDuringNew
<ConditionalCleanupTraits
> ConditionalCleanup
;
1535 ConditionalCleanup
*Cleanup
= CGF
.EHStack
1536 .pushCleanupWithExtra
<ConditionalCleanup
>(EHCleanup
,
1537 E
->getNumPlacementArgs(),
1538 E
->getOperatorDelete(),
1543 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1544 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1545 Cleanup
->setPlacementArg(
1546 I
, DominatingValue
<RValue
>::save(CGF
, Arg
.getRValue(CGF
)), Arg
.Ty
);
1549 CGF
.initFullExprCleanup();
1552 llvm::Value
*CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr
*E
) {
1553 // The element type being allocated.
1554 QualType allocType
= getContext().getBaseElementType(E
->getAllocatedType());
1556 // 1. Build a call to the allocation function.
1557 FunctionDecl
*allocator
= E
->getOperatorNew();
1559 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1560 unsigned minElements
= 0;
1561 if (E
->isArray() && E
->hasInitializer()) {
1562 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
->getInitializer());
1563 if (ILE
&& ILE
->isStringLiteralInit())
1565 cast
<ConstantArrayType
>(ILE
->getType()->getAsArrayTypeUnsafe())
1566 ->getSize().getZExtValue();
1568 minElements
= ILE
->getNumInits();
1571 llvm::Value
*numElements
= nullptr;
1572 llvm::Value
*allocSizeWithoutCookie
= nullptr;
1573 llvm::Value
*allocSize
=
1574 EmitCXXNewAllocSize(*this, E
, minElements
, numElements
,
1575 allocSizeWithoutCookie
);
1576 CharUnits allocAlign
= getContext().getTypeAlignInChars(allocType
);
1578 // Emit the allocation call. If the allocator is a global placement
1579 // operator, just "inline" it directly.
1580 Address allocation
= Address::invalid();
1581 CallArgList allocatorArgs
;
1582 if (allocator
->isReservedGlobalPlacementOperator()) {
1583 assert(E
->getNumPlacementArgs() == 1);
1584 const Expr
*arg
= *E
->placement_arguments().begin();
1586 LValueBaseInfo BaseInfo
;
1587 allocation
= EmitPointerWithAlignment(arg
, &BaseInfo
);
1589 // The pointer expression will, in many cases, be an opaque void*.
1590 // In these cases, discard the computed alignment and use the
1591 // formal alignment of the allocated type.
1592 if (BaseInfo
.getAlignmentSource() != AlignmentSource::Decl
)
1593 allocation
= allocation
.withAlignment(allocAlign
);
1595 // Set up allocatorArgs for the call to operator delete if it's not
1596 // the reserved global operator.
1597 if (E
->getOperatorDelete() &&
1598 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1599 allocatorArgs
.add(RValue::get(allocSize
), getContext().getSizeType());
1600 allocatorArgs
.add(RValue::get(allocation
.getPointer()), arg
->getType());
1604 const FunctionProtoType
*allocatorType
=
1605 allocator
->getType()->castAs
<FunctionProtoType
>();
1606 unsigned ParamsToSkip
= 0;
1608 // The allocation size is the first argument.
1609 QualType sizeType
= getContext().getSizeType();
1610 allocatorArgs
.add(RValue::get(allocSize
), sizeType
);
1613 if (allocSize
!= allocSizeWithoutCookie
) {
1614 CharUnits cookieAlign
= getSizeAlign(); // FIXME: Ask the ABI.
1615 allocAlign
= std::max(allocAlign
, cookieAlign
);
1618 // The allocation alignment may be passed as the second argument.
1619 if (E
->passAlignment()) {
1620 QualType AlignValT
= sizeType
;
1621 if (allocatorType
->getNumParams() > 1) {
1622 AlignValT
= allocatorType
->getParamType(1);
1623 assert(getContext().hasSameUnqualifiedType(
1624 AlignValT
->castAs
<EnumType
>()->getDecl()->getIntegerType(),
1626 "wrong type for alignment parameter");
1629 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1630 assert(allocator
->isVariadic() && "can't pass alignment to allocator");
1633 RValue::get(llvm::ConstantInt::get(SizeTy
, allocAlign
.getQuantity())),
1637 // FIXME: Why do we not pass a CalleeDecl here?
1638 EmitCallArgs(allocatorArgs
, allocatorType
, E
->placement_arguments(),
1639 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip
);
1642 EmitNewDeleteCall(*this, allocator
, allocatorType
, allocatorArgs
);
1644 // Set !heapallocsite metadata on the call to operator new.
1646 if (auto *newCall
= dyn_cast
<llvm::CallBase
>(RV
.getScalarVal()))
1647 getDebugInfo()->addHeapAllocSiteMetadata(newCall
, allocType
,
1650 // If this was a call to a global replaceable allocation function that does
1651 // not take an alignment argument, the allocator is known to produce
1652 // storage that's suitably aligned for any object that fits, up to a known
1653 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1654 CharUnits allocationAlign
= allocAlign
;
1655 if (!E
->passAlignment() &&
1656 allocator
->isReplaceableGlobalAllocationFunction()) {
1657 unsigned AllocatorAlign
= llvm::PowerOf2Floor(std::min
<uint64_t>(
1658 Target
.getNewAlign(), getContext().getTypeSize(allocType
)));
1659 allocationAlign
= std::max(
1660 allocationAlign
, getContext().toCharUnitsFromBits(AllocatorAlign
));
1663 allocation
= Address(RV
.getScalarVal(), Int8Ty
, allocationAlign
);
1666 // Emit a null check on the allocation result if the allocation
1667 // function is allowed to return null (because it has a non-throwing
1668 // exception spec or is the reserved placement new) and we have an
1669 // interesting initializer will be running sanitizers on the initialization.
1670 bool nullCheck
= E
->shouldNullCheckAllocation() &&
1671 (!allocType
.isPODType(getContext()) || E
->hasInitializer() ||
1672 sanitizePerformTypeCheck());
1674 llvm::BasicBlock
*nullCheckBB
= nullptr;
1675 llvm::BasicBlock
*contBB
= nullptr;
1677 // The null-check means that the initializer is conditionally
1679 ConditionalEvaluation
conditional(*this);
1682 conditional
.begin(*this);
1684 nullCheckBB
= Builder
.GetInsertBlock();
1685 llvm::BasicBlock
*notNullBB
= createBasicBlock("new.notnull");
1686 contBB
= createBasicBlock("new.cont");
1688 llvm::Value
*isNull
=
1689 Builder
.CreateIsNull(allocation
.getPointer(), "new.isnull");
1690 Builder
.CreateCondBr(isNull
, contBB
, notNullBB
);
1691 EmitBlock(notNullBB
);
1694 // If there's an operator delete, enter a cleanup to call it if an
1695 // exception is thrown.
1696 EHScopeStack::stable_iterator operatorDeleteCleanup
;
1697 llvm::Instruction
*cleanupDominator
= nullptr;
1698 if (E
->getOperatorDelete() &&
1699 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1700 EnterNewDeleteCleanup(*this, E
, allocation
, allocSize
, allocAlign
,
1702 operatorDeleteCleanup
= EHStack
.stable_begin();
1703 cleanupDominator
= Builder
.CreateUnreachable();
1706 assert((allocSize
== allocSizeWithoutCookie
) ==
1707 CalculateCookiePadding(*this, E
).isZero());
1708 if (allocSize
!= allocSizeWithoutCookie
) {
1709 assert(E
->isArray());
1710 allocation
= CGM
.getCXXABI().InitializeArrayCookie(*this, allocation
,
1715 llvm::Type
*elementTy
= ConvertTypeForMem(allocType
);
1716 Address result
= Builder
.CreateElementBitCast(allocation
, elementTy
);
1718 // Passing pointer through launder.invariant.group to avoid propagation of
1719 // vptrs information which may be included in previous type.
1720 // To not break LTO with different optimizations levels, we do it regardless
1721 // of optimization level.
1722 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
1723 allocator
->isReservedGlobalPlacementOperator())
1724 result
= Builder
.CreateLaunderInvariantGroup(result
);
1726 // Emit sanitizer checks for pointer value now, so that in the case of an
1727 // array it was checked only once and not at each constructor call. We may
1728 // have already checked that the pointer is non-null.
1729 // FIXME: If we have an array cookie and a potentially-throwing allocator,
1730 // we'll null check the wrong pointer here.
1731 SanitizerSet SkippedChecks
;
1732 SkippedChecks
.set(SanitizerKind::Null
, nullCheck
);
1733 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall
,
1734 E
->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1735 result
.getPointer(), allocType
, result
.getAlignment(),
1736 SkippedChecks
, numElements
);
1738 EmitNewInitializer(*this, E
, allocType
, elementTy
, result
, numElements
,
1739 allocSizeWithoutCookie
);
1740 llvm::Value
*resultPtr
= result
.getPointer();
1742 // NewPtr is a pointer to the base element type. If we're
1743 // allocating an array of arrays, we'll need to cast back to the
1744 // array pointer type.
1745 llvm::Type
*resultType
= ConvertTypeForMem(E
->getType());
1746 if (resultPtr
->getType() != resultType
)
1747 resultPtr
= Builder
.CreateBitCast(resultPtr
, resultType
);
1750 // Deactivate the 'operator delete' cleanup if we finished
1752 if (operatorDeleteCleanup
.isValid()) {
1753 DeactivateCleanupBlock(operatorDeleteCleanup
, cleanupDominator
);
1754 cleanupDominator
->eraseFromParent();
1758 conditional
.end(*this);
1760 llvm::BasicBlock
*notNullBB
= Builder
.GetInsertBlock();
1763 llvm::PHINode
*PHI
= Builder
.CreatePHI(resultPtr
->getType(), 2);
1764 PHI
->addIncoming(resultPtr
, notNullBB
);
1765 PHI
->addIncoming(llvm::Constant::getNullValue(resultPtr
->getType()),
1774 void CodeGenFunction::EmitDeleteCall(const FunctionDecl
*DeleteFD
,
1775 llvm::Value
*Ptr
, QualType DeleteTy
,
1776 llvm::Value
*NumElements
,
1777 CharUnits CookieSize
) {
1778 assert((!NumElements
&& CookieSize
.isZero()) ||
1779 DeleteFD
->getOverloadedOperator() == OO_Array_Delete
);
1781 const auto *DeleteFTy
= DeleteFD
->getType()->castAs
<FunctionProtoType
>();
1782 CallArgList DeleteArgs
;
1784 auto Params
= getUsualDeleteParams(DeleteFD
);
1785 auto ParamTypeIt
= DeleteFTy
->param_type_begin();
1787 // Pass the pointer itself.
1788 QualType ArgTy
= *ParamTypeIt
++;
1789 llvm::Value
*DeletePtr
= Builder
.CreateBitCast(Ptr
, ConvertType(ArgTy
));
1790 DeleteArgs
.add(RValue::get(DeletePtr
), ArgTy
);
1792 // Pass the std::destroying_delete tag if present.
1793 llvm::AllocaInst
*DestroyingDeleteTag
= nullptr;
1794 if (Params
.DestroyingDelete
) {
1795 QualType DDTag
= *ParamTypeIt
++;
1796 llvm::Type
*Ty
= getTypes().ConvertType(DDTag
);
1797 CharUnits Align
= CGM
.getNaturalTypeAlignment(DDTag
);
1798 DestroyingDeleteTag
= CreateTempAlloca(Ty
, "destroying.delete.tag");
1799 DestroyingDeleteTag
->setAlignment(Align
.getAsAlign());
1801 RValue::getAggregate(Address(DestroyingDeleteTag
, Ty
, Align
)), DDTag
);
1804 // Pass the size if the delete function has a size_t parameter.
1806 QualType SizeType
= *ParamTypeIt
++;
1807 CharUnits DeleteTypeSize
= getContext().getTypeSizeInChars(DeleteTy
);
1808 llvm::Value
*Size
= llvm::ConstantInt::get(ConvertType(SizeType
),
1809 DeleteTypeSize
.getQuantity());
1811 // For array new, multiply by the number of elements.
1813 Size
= Builder
.CreateMul(Size
, NumElements
);
1815 // If there is a cookie, add the cookie size.
1816 if (!CookieSize
.isZero())
1817 Size
= Builder
.CreateAdd(
1818 Size
, llvm::ConstantInt::get(SizeTy
, CookieSize
.getQuantity()));
1820 DeleteArgs
.add(RValue::get(Size
), SizeType
);
1823 // Pass the alignment if the delete function has an align_val_t parameter.
1824 if (Params
.Alignment
) {
1825 QualType AlignValType
= *ParamTypeIt
++;
1826 CharUnits DeleteTypeAlign
=
1827 getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1828 DeleteTy
, true /* NeedsPreferredAlignment */));
1829 llvm::Value
*Align
= llvm::ConstantInt::get(ConvertType(AlignValType
),
1830 DeleteTypeAlign
.getQuantity());
1831 DeleteArgs
.add(RValue::get(Align
), AlignValType
);
1834 assert(ParamTypeIt
== DeleteFTy
->param_type_end() &&
1835 "unknown parameter to usual delete function");
1837 // Emit the call to delete.
1838 EmitNewDeleteCall(*this, DeleteFD
, DeleteFTy
, DeleteArgs
);
1840 // If call argument lowering didn't use the destroying_delete_t alloca,
1842 if (DestroyingDeleteTag
&& DestroyingDeleteTag
->use_empty())
1843 DestroyingDeleteTag
->eraseFromParent();
1847 /// Calls the given 'operator delete' on a single object.
1848 struct CallObjectDelete final
: EHScopeStack::Cleanup
{
1850 const FunctionDecl
*OperatorDelete
;
1851 QualType ElementType
;
1853 CallObjectDelete(llvm::Value
*Ptr
,
1854 const FunctionDecl
*OperatorDelete
,
1855 QualType ElementType
)
1856 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), ElementType(ElementType
) {}
1858 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1859 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
);
1865 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl
*OperatorDelete
,
1866 llvm::Value
*CompletePtr
,
1867 QualType ElementType
) {
1868 EHStack
.pushCleanup
<CallObjectDelete
>(NormalAndEHCleanup
, CompletePtr
,
1869 OperatorDelete
, ElementType
);
1872 /// Emit the code for deleting a single object with a destroying operator
1873 /// delete. If the element type has a non-virtual destructor, Ptr has already
1874 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1875 /// Ptr points to an object of the static type.
1876 static void EmitDestroyingObjectDelete(CodeGenFunction
&CGF
,
1877 const CXXDeleteExpr
*DE
, Address Ptr
,
1878 QualType ElementType
) {
1879 auto *Dtor
= ElementType
->getAsCXXRecordDecl()->getDestructor();
1880 if (Dtor
&& Dtor
->isVirtual())
1881 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1884 CGF
.EmitDeleteCall(DE
->getOperatorDelete(), Ptr
.getPointer(), ElementType
);
1887 /// Emit the code for deleting a single object.
1888 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1890 static bool EmitObjectDelete(CodeGenFunction
&CGF
,
1891 const CXXDeleteExpr
*DE
,
1893 QualType ElementType
,
1894 llvm::BasicBlock
*UnconditionalDeleteBlock
) {
1895 // C++11 [expr.delete]p3:
1896 // If the static type of the object to be deleted is different from its
1897 // dynamic type, the static type shall be a base class of the dynamic type
1898 // of the object to be deleted and the static type shall have a virtual
1899 // destructor or the behavior is undefined.
1900 CGF
.EmitTypeCheck(CodeGenFunction::TCK_MemberCall
,
1901 DE
->getExprLoc(), Ptr
.getPointer(),
1904 const FunctionDecl
*OperatorDelete
= DE
->getOperatorDelete();
1905 assert(!OperatorDelete
->isDestroyingOperatorDelete());
1907 // Find the destructor for the type, if applicable. If the
1908 // destructor is virtual, we'll just emit the vcall and return.
1909 const CXXDestructorDecl
*Dtor
= nullptr;
1910 if (const RecordType
*RT
= ElementType
->getAs
<RecordType
>()) {
1911 CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
1912 if (RD
->hasDefinition() && !RD
->hasTrivialDestructor()) {
1913 Dtor
= RD
->getDestructor();
1915 if (Dtor
->isVirtual()) {
1916 bool UseVirtualCall
= true;
1917 const Expr
*Base
= DE
->getArgument();
1918 if (auto *DevirtualizedDtor
=
1919 dyn_cast_or_null
<const CXXDestructorDecl
>(
1920 Dtor
->getDevirtualizedMethod(
1921 Base
, CGF
.CGM
.getLangOpts().AppleKext
))) {
1922 UseVirtualCall
= false;
1923 const CXXRecordDecl
*DevirtualizedClass
=
1924 DevirtualizedDtor
->getParent();
1925 if (declaresSameEntity(getCXXRecord(Base
), DevirtualizedClass
)) {
1926 // Devirtualized to the class of the base type (the type of the
1927 // whole expression).
1928 Dtor
= DevirtualizedDtor
;
1930 // Devirtualized to some other type. Would need to cast the this
1931 // pointer to that type but we don't have support for that yet, so
1932 // do a virtual call. FIXME: handle the case where it is
1933 // devirtualized to the derived type (the type of the inner
1934 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1935 UseVirtualCall
= true;
1938 if (UseVirtualCall
) {
1939 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1947 // Make sure that we call delete even if the dtor throws.
1948 // This doesn't have to a conditional cleanup because we're going
1949 // to pop it off in a second.
1950 CGF
.EHStack
.pushCleanup
<CallObjectDelete
>(NormalAndEHCleanup
,
1952 OperatorDelete
, ElementType
);
1955 CGF
.EmitCXXDestructorCall(Dtor
, Dtor_Complete
,
1956 /*ForVirtualBase=*/false,
1957 /*Delegating=*/false,
1959 else if (auto Lifetime
= ElementType
.getObjCLifetime()) {
1961 case Qualifiers::OCL_None
:
1962 case Qualifiers::OCL_ExplicitNone
:
1963 case Qualifiers::OCL_Autoreleasing
:
1966 case Qualifiers::OCL_Strong
:
1967 CGF
.EmitARCDestroyStrong(Ptr
, ARCPreciseLifetime
);
1970 case Qualifiers::OCL_Weak
:
1971 CGF
.EmitARCDestroyWeak(Ptr
);
1976 // When optimizing for size, call 'operator delete' unconditionally.
1977 if (CGF
.CGM
.getCodeGenOpts().OptimizeSize
> 1) {
1978 CGF
.EmitBlock(UnconditionalDeleteBlock
);
1979 CGF
.PopCleanupBlock();
1983 CGF
.PopCleanupBlock();
1988 /// Calls the given 'operator delete' on an array of objects.
1989 struct CallArrayDelete final
: EHScopeStack::Cleanup
{
1991 const FunctionDecl
*OperatorDelete
;
1992 llvm::Value
*NumElements
;
1993 QualType ElementType
;
1994 CharUnits CookieSize
;
1996 CallArrayDelete(llvm::Value
*Ptr
,
1997 const FunctionDecl
*OperatorDelete
,
1998 llvm::Value
*NumElements
,
1999 QualType ElementType
,
2000 CharUnits CookieSize
)
2001 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), NumElements(NumElements
),
2002 ElementType(ElementType
), CookieSize(CookieSize
) {}
2004 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2005 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
, NumElements
,
2011 /// Emit the code for deleting an array of objects.
2012 static void EmitArrayDelete(CodeGenFunction
&CGF
,
2013 const CXXDeleteExpr
*E
,
2015 QualType elementType
) {
2016 llvm::Value
*numElements
= nullptr;
2017 llvm::Value
*allocatedPtr
= nullptr;
2018 CharUnits cookieSize
;
2019 CGF
.CGM
.getCXXABI().ReadArrayCookie(CGF
, deletedPtr
, E
, elementType
,
2020 numElements
, allocatedPtr
, cookieSize
);
2022 assert(allocatedPtr
&& "ReadArrayCookie didn't set allocated pointer");
2024 // Make sure that we call delete even if one of the dtors throws.
2025 const FunctionDecl
*operatorDelete
= E
->getOperatorDelete();
2026 CGF
.EHStack
.pushCleanup
<CallArrayDelete
>(NormalAndEHCleanup
,
2027 allocatedPtr
, operatorDelete
,
2028 numElements
, elementType
,
2031 // Destroy the elements.
2032 if (QualType::DestructionKind dtorKind
= elementType
.isDestructedType()) {
2033 assert(numElements
&& "no element count for a type with a destructor!");
2035 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
2036 CharUnits elementAlign
=
2037 deletedPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
2039 llvm::Value
*arrayBegin
= deletedPtr
.getPointer();
2040 llvm::Value
*arrayEnd
= CGF
.Builder
.CreateInBoundsGEP(
2041 deletedPtr
.getElementType(), arrayBegin
, numElements
, "delete.end");
2043 // Note that it is legal to allocate a zero-length array, and we
2044 // can never fold the check away because the length should always
2045 // come from a cookie.
2046 CGF
.emitArrayDestroy(arrayBegin
, arrayEnd
, elementType
, elementAlign
,
2047 CGF
.getDestroyer(dtorKind
),
2048 /*checkZeroLength*/ true,
2049 CGF
.needsEHCleanup(dtorKind
));
2052 // Pop the cleanup block.
2053 CGF
.PopCleanupBlock();
2056 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr
*E
) {
2057 const Expr
*Arg
= E
->getArgument();
2058 Address Ptr
= EmitPointerWithAlignment(Arg
);
2060 // Null check the pointer.
2062 // We could avoid this null check if we can determine that the object
2063 // destruction is trivial and doesn't require an array cookie; we can
2064 // unconditionally perform the operator delete call in that case. For now, we
2065 // assume that deleted pointers are null rarely enough that it's better to
2066 // keep the branch. This might be worth revisiting for a -O0 code size win.
2067 llvm::BasicBlock
*DeleteNotNull
= createBasicBlock("delete.notnull");
2068 llvm::BasicBlock
*DeleteEnd
= createBasicBlock("delete.end");
2070 llvm::Value
*IsNull
= Builder
.CreateIsNull(Ptr
.getPointer(), "isnull");
2072 Builder
.CreateCondBr(IsNull
, DeleteEnd
, DeleteNotNull
);
2073 EmitBlock(DeleteNotNull
);
2075 QualType DeleteTy
= E
->getDestroyedType();
2077 // A destroying operator delete overrides the entire operation of the
2078 // delete expression.
2079 if (E
->getOperatorDelete()->isDestroyingOperatorDelete()) {
2080 EmitDestroyingObjectDelete(*this, E
, Ptr
, DeleteTy
);
2081 EmitBlock(DeleteEnd
);
2085 // We might be deleting a pointer to array. If so, GEP down to the
2086 // first non-array element.
2087 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2088 if (DeleteTy
->isConstantArrayType()) {
2089 llvm::Value
*Zero
= Builder
.getInt32(0);
2090 SmallVector
<llvm::Value
*,8> GEP
;
2092 GEP
.push_back(Zero
); // point at the outermost array
2094 // For each layer of array type we're pointing at:
2095 while (const ConstantArrayType
*Arr
2096 = getContext().getAsConstantArrayType(DeleteTy
)) {
2097 // 1. Unpeel the array type.
2098 DeleteTy
= Arr
->getElementType();
2100 // 2. GEP to the first element of the array.
2101 GEP
.push_back(Zero
);
2104 Ptr
= Address(Builder
.CreateInBoundsGEP(Ptr
.getElementType(),
2105 Ptr
.getPointer(), GEP
, "del.first"),
2106 ConvertTypeForMem(DeleteTy
), Ptr
.getAlignment());
2109 assert(ConvertTypeForMem(DeleteTy
) == Ptr
.getElementType());
2111 if (E
->isArrayForm()) {
2112 EmitArrayDelete(*this, E
, Ptr
, DeleteTy
);
2113 EmitBlock(DeleteEnd
);
2115 if (!EmitObjectDelete(*this, E
, Ptr
, DeleteTy
, DeleteEnd
))
2116 EmitBlock(DeleteEnd
);
2120 static bool isGLValueFromPointerDeref(const Expr
*E
) {
2121 E
= E
->IgnoreParens();
2123 if (const auto *CE
= dyn_cast
<CastExpr
>(E
)) {
2124 if (!CE
->getSubExpr()->isGLValue())
2126 return isGLValueFromPointerDeref(CE
->getSubExpr());
2129 if (const auto *OVE
= dyn_cast
<OpaqueValueExpr
>(E
))
2130 return isGLValueFromPointerDeref(OVE
->getSourceExpr());
2132 if (const auto *BO
= dyn_cast
<BinaryOperator
>(E
))
2133 if (BO
->getOpcode() == BO_Comma
)
2134 return isGLValueFromPointerDeref(BO
->getRHS());
2136 if (const auto *ACO
= dyn_cast
<AbstractConditionalOperator
>(E
))
2137 return isGLValueFromPointerDeref(ACO
->getTrueExpr()) ||
2138 isGLValueFromPointerDeref(ACO
->getFalseExpr());
2140 // C++11 [expr.sub]p1:
2141 // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2142 if (isa
<ArraySubscriptExpr
>(E
))
2145 if (const auto *UO
= dyn_cast
<UnaryOperator
>(E
))
2146 if (UO
->getOpcode() == UO_Deref
)
2152 static llvm::Value
*EmitTypeidFromVTable(CodeGenFunction
&CGF
, const Expr
*E
,
2153 llvm::Type
*StdTypeInfoPtrTy
) {
2154 // Get the vtable pointer.
2155 Address ThisPtr
= CGF
.EmitLValue(E
).getAddress(CGF
);
2157 QualType SrcRecordTy
= E
->getType();
2159 // C++ [class.cdtor]p4:
2160 // If the operand of typeid refers to the object under construction or
2161 // destruction and the static type of the operand is neither the constructor
2162 // or destructor’s class nor one of its bases, the behavior is undefined.
2163 CGF
.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation
, E
->getExprLoc(),
2164 ThisPtr
.getPointer(), SrcRecordTy
);
2166 // C++ [expr.typeid]p2:
2167 // If the glvalue expression is obtained by applying the unary * operator to
2168 // a pointer and the pointer is a null pointer value, the typeid expression
2169 // throws the std::bad_typeid exception.
2171 // However, this paragraph's intent is not clear. We choose a very generous
2172 // interpretation which implores us to consider comma operators, conditional
2173 // operators, parentheses and other such constructs.
2174 if (CGF
.CGM
.getCXXABI().shouldTypeidBeNullChecked(
2175 isGLValueFromPointerDeref(E
), SrcRecordTy
)) {
2176 llvm::BasicBlock
*BadTypeidBlock
=
2177 CGF
.createBasicBlock("typeid.bad_typeid");
2178 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock("typeid.end");
2180 llvm::Value
*IsNull
= CGF
.Builder
.CreateIsNull(ThisPtr
.getPointer());
2181 CGF
.Builder
.CreateCondBr(IsNull
, BadTypeidBlock
, EndBlock
);
2183 CGF
.EmitBlock(BadTypeidBlock
);
2184 CGF
.CGM
.getCXXABI().EmitBadTypeidCall(CGF
);
2185 CGF
.EmitBlock(EndBlock
);
2188 return CGF
.CGM
.getCXXABI().EmitTypeid(CGF
, SrcRecordTy
, ThisPtr
,
2192 llvm::Value
*CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr
*E
) {
2193 llvm::Type
*StdTypeInfoPtrTy
=
2194 ConvertType(E
->getType())->getPointerTo();
2196 if (E
->isTypeOperand()) {
2197 llvm::Constant
*TypeInfo
=
2198 CGM
.GetAddrOfRTTIDescriptor(E
->getTypeOperand(getContext()));
2199 return Builder
.CreateBitCast(TypeInfo
, StdTypeInfoPtrTy
);
2202 // C++ [expr.typeid]p2:
2203 // When typeid is applied to a glvalue expression whose type is a
2204 // polymorphic class type, the result refers to a std::type_info object
2205 // representing the type of the most derived object (that is, the dynamic
2206 // type) to which the glvalue refers.
2207 // If the operand is already most derived object, no need to look up vtable.
2208 if (E
->isPotentiallyEvaluated() && !E
->isMostDerived(getContext()))
2209 return EmitTypeidFromVTable(*this, E
->getExprOperand(),
2212 QualType OperandTy
= E
->getExprOperand()->getType();
2213 return Builder
.CreateBitCast(CGM
.GetAddrOfRTTIDescriptor(OperandTy
),
2217 static llvm::Value
*EmitDynamicCastToNull(CodeGenFunction
&CGF
,
2219 llvm::Type
*DestLTy
= CGF
.ConvertType(DestTy
);
2220 if (DestTy
->isPointerType())
2221 return llvm::Constant::getNullValue(DestLTy
);
2223 /// C++ [expr.dynamic.cast]p9:
2224 /// A failed cast to reference type throws std::bad_cast
2225 if (!CGF
.CGM
.getCXXABI().EmitBadCastCall(CGF
))
2228 CGF
.EmitBlock(CGF
.createBasicBlock("dynamic_cast.end"));
2229 return llvm::UndefValue::get(DestLTy
);
2232 llvm::Value
*CodeGenFunction::EmitDynamicCast(Address ThisAddr
,
2233 const CXXDynamicCastExpr
*DCE
) {
2234 CGM
.EmitExplicitCastExprType(DCE
, this);
2235 QualType DestTy
= DCE
->getTypeAsWritten();
2237 QualType SrcTy
= DCE
->getSubExpr()->getType();
2239 // C++ [expr.dynamic.cast]p7:
2240 // If T is "pointer to cv void," then the result is a pointer to the most
2241 // derived object pointed to by v.
2242 const PointerType
*DestPTy
= DestTy
->getAs
<PointerType
>();
2244 bool isDynamicCastToVoid
;
2245 QualType SrcRecordTy
;
2246 QualType DestRecordTy
;
2248 isDynamicCastToVoid
= DestPTy
->getPointeeType()->isVoidType();
2249 SrcRecordTy
= SrcTy
->castAs
<PointerType
>()->getPointeeType();
2250 DestRecordTy
= DestPTy
->getPointeeType();
2252 isDynamicCastToVoid
= false;
2253 SrcRecordTy
= SrcTy
;
2254 DestRecordTy
= DestTy
->castAs
<ReferenceType
>()->getPointeeType();
2257 // C++ [class.cdtor]p5:
2258 // If the operand of the dynamic_cast refers to the object under
2259 // construction or destruction and the static type of the operand is not a
2260 // pointer to or object of the constructor or destructor’s own class or one
2261 // of its bases, the dynamic_cast results in undefined behavior.
2262 EmitTypeCheck(TCK_DynamicOperation
, DCE
->getExprLoc(), ThisAddr
.getPointer(),
2265 if (DCE
->isAlwaysNull())
2266 if (llvm::Value
*T
= EmitDynamicCastToNull(*this, DestTy
))
2269 assert(SrcRecordTy
->isRecordType() && "source type must be a record type!");
2271 // C++ [expr.dynamic.cast]p4:
2272 // If the value of v is a null pointer value in the pointer case, the result
2273 // is the null pointer value of type T.
2274 bool ShouldNullCheckSrcValue
=
2275 CGM
.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy
->isPointerType(),
2278 llvm::BasicBlock
*CastNull
= nullptr;
2279 llvm::BasicBlock
*CastNotNull
= nullptr;
2280 llvm::BasicBlock
*CastEnd
= createBasicBlock("dynamic_cast.end");
2282 if (ShouldNullCheckSrcValue
) {
2283 CastNull
= createBasicBlock("dynamic_cast.null");
2284 CastNotNull
= createBasicBlock("dynamic_cast.notnull");
2286 llvm::Value
*IsNull
= Builder
.CreateIsNull(ThisAddr
.getPointer());
2287 Builder
.CreateCondBr(IsNull
, CastNull
, CastNotNull
);
2288 EmitBlock(CastNotNull
);
2292 if (isDynamicCastToVoid
) {
2293 Value
= CGM
.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr
, SrcRecordTy
,
2296 assert(DestRecordTy
->isRecordType() &&
2297 "destination type must be a record type!");
2298 Value
= CGM
.getCXXABI().EmitDynamicCastCall(*this, ThisAddr
, SrcRecordTy
,
2299 DestTy
, DestRecordTy
, CastEnd
);
2300 CastNotNull
= Builder
.GetInsertBlock();
2303 if (ShouldNullCheckSrcValue
) {
2304 EmitBranch(CastEnd
);
2306 EmitBlock(CastNull
);
2307 EmitBranch(CastEnd
);
2312 if (ShouldNullCheckSrcValue
) {
2313 llvm::PHINode
*PHI
= Builder
.CreatePHI(Value
->getType(), 2);
2314 PHI
->addIncoming(Value
, CastNotNull
);
2315 PHI
->addIncoming(llvm::Constant::getNullValue(Value
->getType()), CastNull
);