1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code dealing with code generation of C++ expressions
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "ConstantEmitter.h"
19 #include "TargetInfo.h"
20 #include "clang/Basic/CodeGenOptions.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "llvm/IR/Intrinsics.h"
24 using namespace clang
;
25 using namespace CodeGen
;
28 struct MemberCallInfo
{
30 // Number of prefix arguments for the call. Ignores the `this` pointer.
36 commonEmitCXXMemberOrOperatorCall(CodeGenFunction
&CGF
, GlobalDecl GD
,
37 llvm::Value
*This
, llvm::Value
*ImplicitParam
,
38 QualType ImplicitParamTy
, const CallExpr
*CE
,
39 CallArgList
&Args
, CallArgList
*RtlArgs
) {
40 auto *MD
= cast
<CXXMethodDecl
>(GD
.getDecl());
42 assert(CE
== nullptr || isa
<CXXMemberCallExpr
>(CE
) ||
43 isa
<CXXOperatorCallExpr
>(CE
));
44 assert(MD
->isInstance() &&
45 "Trying to emit a member or operator call expr on a static method!");
48 const CXXRecordDecl
*RD
=
49 CGF
.CGM
.getCXXABI().getThisArgumentTypeForMethod(GD
);
50 Args
.add(RValue::get(This
), CGF
.getTypes().DeriveThisType(RD
, MD
));
52 // If there is an implicit parameter (e.g. VTT), emit it.
54 Args
.add(RValue::get(ImplicitParam
), ImplicitParamTy
);
57 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
58 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, Args
.size());
59 unsigned PrefixSize
= Args
.size() - 1;
61 // And the rest of the call args.
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 Args
.addFrom(*RtlArgs
);
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned ArgsToSkip
= isa
<CXXOperatorCallExpr
>(CE
) ? 1 : 0;
70 CGF
.EmitCallArgs(Args
, FPT
, drop_begin(CE
->arguments(), ArgsToSkip
),
71 CE
->getDirectCallee());
74 FPT
->getNumParams() == 0 &&
75 "No CallExpr specified for function with non-zero number of arguments");
77 return {required
, PrefixSize
};
80 RValue
CodeGenFunction::EmitCXXMemberOrOperatorCall(
81 const CXXMethodDecl
*MD
, const CGCallee
&Callee
,
82 ReturnValueSlot ReturnValue
,
83 llvm::Value
*This
, llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
,
84 const CallExpr
*CE
, CallArgList
*RtlArgs
) {
85 const FunctionProtoType
*FPT
= MD
->getType()->castAs
<FunctionProtoType
>();
87 MemberCallInfo CallInfo
= commonEmitCXXMemberOrOperatorCall(
88 *this, MD
, This
, ImplicitParam
, ImplicitParamTy
, CE
, Args
, RtlArgs
);
89 auto &FnInfo
= CGM
.getTypes().arrangeCXXMethodCall(
90 Args
, FPT
, CallInfo
.ReqArgs
, CallInfo
.PrefixSize
);
91 return EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, nullptr,
92 CE
&& CE
== MustTailCall
,
93 CE
? CE
->getExprLoc() : SourceLocation());
96 RValue
CodeGenFunction::EmitCXXDestructorCall(
97 GlobalDecl Dtor
, const CGCallee
&Callee
, llvm::Value
*This
, QualType ThisTy
,
98 llvm::Value
*ImplicitParam
, QualType ImplicitParamTy
, const CallExpr
*CE
) {
99 const CXXMethodDecl
*DtorDecl
= cast
<CXXMethodDecl
>(Dtor
.getDecl());
101 assert(!ThisTy
.isNull());
102 assert(ThisTy
->getAsCXXRecordDecl() == DtorDecl
->getParent() &&
103 "Pointer/Object mixup");
105 LangAS SrcAS
= ThisTy
.getAddressSpace();
106 LangAS DstAS
= DtorDecl
->getMethodQualifiers().getAddressSpace();
107 if (SrcAS
!= DstAS
) {
108 QualType DstTy
= DtorDecl
->getThisType();
109 llvm::Type
*NewType
= CGM
.getTypes().ConvertType(DstTy
);
110 This
= getTargetHooks().performAddrSpaceCast(*this, This
, SrcAS
, DstAS
,
115 commonEmitCXXMemberOrOperatorCall(*this, Dtor
, This
, ImplicitParam
,
116 ImplicitParamTy
, CE
, Args
, nullptr);
117 return EmitCall(CGM
.getTypes().arrangeCXXStructorDeclaration(Dtor
), Callee
,
118 ReturnValueSlot(), Args
, nullptr, CE
&& CE
== MustTailCall
,
119 CE
? CE
->getExprLoc() : SourceLocation
{});
122 RValue
CodeGenFunction::EmitCXXPseudoDestructorExpr(
123 const CXXPseudoDestructorExpr
*E
) {
124 QualType DestroyedType
= E
->getDestroyedType();
125 if (DestroyedType
.hasStrongOrWeakObjCLifetime()) {
126 // Automatic Reference Counting:
127 // If the pseudo-expression names a retainable object with weak or
128 // strong lifetime, the object shall be released.
129 Expr
*BaseExpr
= E
->getBase();
130 Address BaseValue
= Address::invalid();
131 Qualifiers BaseQuals
;
133 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
135 BaseValue
= EmitPointerWithAlignment(BaseExpr
);
136 const auto *PTy
= BaseExpr
->getType()->castAs
<PointerType
>();
137 BaseQuals
= PTy
->getPointeeType().getQualifiers();
139 LValue BaseLV
= EmitLValue(BaseExpr
);
140 BaseValue
= BaseLV
.getAddress(*this);
141 QualType BaseTy
= BaseExpr
->getType();
142 BaseQuals
= BaseTy
.getQualifiers();
145 switch (DestroyedType
.getObjCLifetime()) {
146 case Qualifiers::OCL_None
:
147 case Qualifiers::OCL_ExplicitNone
:
148 case Qualifiers::OCL_Autoreleasing
:
151 case Qualifiers::OCL_Strong
:
152 EmitARCRelease(Builder
.CreateLoad(BaseValue
,
153 DestroyedType
.isVolatileQualified()),
157 case Qualifiers::OCL_Weak
:
158 EmitARCDestroyWeak(BaseValue
);
162 // C++ [expr.pseudo]p1:
163 // The result shall only be used as the operand for the function call
164 // operator (), and the result of such a call has type void. The only
165 // effect is the evaluation of the postfix-expression before the dot or
167 EmitIgnoredExpr(E
->getBase());
170 return RValue::get(nullptr);
173 static CXXRecordDecl
*getCXXRecord(const Expr
*E
) {
174 QualType T
= E
->getType();
175 if (const PointerType
*PTy
= T
->getAs
<PointerType
>())
176 T
= PTy
->getPointeeType();
177 const RecordType
*Ty
= T
->castAs
<RecordType
>();
178 return cast
<CXXRecordDecl
>(Ty
->getDecl());
181 // Note: This function also emit constructor calls to support a MSVC
182 // extensions allowing explicit constructor function call.
183 RValue
CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr
*CE
,
184 ReturnValueSlot ReturnValue
) {
185 const Expr
*callee
= CE
->getCallee()->IgnoreParens();
187 if (isa
<BinaryOperator
>(callee
))
188 return EmitCXXMemberPointerCallExpr(CE
, ReturnValue
);
190 const MemberExpr
*ME
= cast
<MemberExpr
>(callee
);
191 const CXXMethodDecl
*MD
= cast
<CXXMethodDecl
>(ME
->getMemberDecl());
193 if (MD
->isStatic()) {
194 // The method is static, emit it as we would a regular call.
196 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
), GlobalDecl(MD
));
197 return EmitCall(getContext().getPointerType(MD
->getType()), callee
, CE
,
201 bool HasQualifier
= ME
->hasQualifier();
202 NestedNameSpecifier
*Qualifier
= HasQualifier
? ME
->getQualifier() : nullptr;
203 bool IsArrow
= ME
->isArrow();
204 const Expr
*Base
= ME
->getBase();
206 return EmitCXXMemberOrOperatorMemberCallExpr(
207 CE
, MD
, ReturnValue
, HasQualifier
, Qualifier
, IsArrow
, Base
);
210 RValue
CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
211 const CallExpr
*CE
, const CXXMethodDecl
*MD
, ReturnValueSlot ReturnValue
,
212 bool HasQualifier
, NestedNameSpecifier
*Qualifier
, bool IsArrow
,
214 assert(isa
<CXXMemberCallExpr
>(CE
) || isa
<CXXOperatorCallExpr
>(CE
));
216 // Compute the object pointer.
217 bool CanUseVirtualCall
= MD
->isVirtual() && !HasQualifier
;
219 const CXXMethodDecl
*DevirtualizedMethod
= nullptr;
220 if (CanUseVirtualCall
&&
221 MD
->getDevirtualizedMethod(Base
, getLangOpts().AppleKext
)) {
222 const CXXRecordDecl
*BestDynamicDecl
= Base
->getBestDynamicClassType();
223 DevirtualizedMethod
= MD
->getCorrespondingMethodInClass(BestDynamicDecl
);
224 assert(DevirtualizedMethod
);
225 const CXXRecordDecl
*DevirtualizedClass
= DevirtualizedMethod
->getParent();
226 const Expr
*Inner
= Base
->IgnoreParenBaseCasts();
227 if (DevirtualizedMethod
->getReturnType().getCanonicalType() !=
228 MD
->getReturnType().getCanonicalType())
229 // If the return types are not the same, this might be a case where more
230 // code needs to run to compensate for it. For example, the derived
231 // method might return a type that inherits form from the return
232 // type of MD and has a prefix.
233 // For now we just avoid devirtualizing these covariant cases.
234 DevirtualizedMethod
= nullptr;
235 else if (getCXXRecord(Inner
) == DevirtualizedClass
)
236 // If the class of the Inner expression is where the dynamic method
237 // is defined, build the this pointer from it.
239 else if (getCXXRecord(Base
) != DevirtualizedClass
) {
240 // If the method is defined in a class that is not the best dynamic
241 // one or the one of the full expression, we would have to build
242 // a derived-to-base cast to compute the correct this pointer, but
243 // we don't have support for that yet, so do a virtual call.
244 DevirtualizedMethod
= nullptr;
248 bool TrivialForCodegen
=
249 MD
->isTrivial() || (MD
->isDefaulted() && MD
->getParent()->isUnion());
250 bool TrivialAssignment
=
252 (MD
->isCopyAssignmentOperator() || MD
->isMoveAssignmentOperator()) &&
253 !MD
->getParent()->mayInsertExtraPadding();
255 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
256 // operator before the LHS.
257 CallArgList RtlArgStorage
;
258 CallArgList
*RtlArgs
= nullptr;
259 LValue TrivialAssignmentRHS
;
260 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(CE
)) {
261 if (OCE
->isAssignmentOp()) {
262 if (TrivialAssignment
) {
263 TrivialAssignmentRHS
= EmitLValue(CE
->getArg(1));
265 RtlArgs
= &RtlArgStorage
;
266 EmitCallArgs(*RtlArgs
, MD
->getType()->castAs
<FunctionProtoType
>(),
267 drop_begin(CE
->arguments(), 1), CE
->getDirectCallee(),
268 /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft
);
275 LValueBaseInfo BaseInfo
;
276 TBAAAccessInfo TBAAInfo
;
277 Address ThisValue
= EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
278 This
= MakeAddrLValue(ThisValue
, Base
->getType(), BaseInfo
, TBAAInfo
);
280 This
= EmitLValue(Base
);
283 if (const CXXConstructorDecl
*Ctor
= dyn_cast
<CXXConstructorDecl
>(MD
)) {
284 // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
285 // constructing a new complete object of type Ctor.
287 assert(ReturnValue
.isNull() && "Constructor shouldn't have return value");
289 commonEmitCXXMemberOrOperatorCall(
290 *this, {Ctor
, Ctor_Complete
}, This
.getPointer(*this),
291 /*ImplicitParam=*/nullptr,
292 /*ImplicitParamTy=*/QualType(), CE
, Args
, nullptr);
294 EmitCXXConstructorCall(Ctor
, Ctor_Complete
, /*ForVirtualBase=*/false,
295 /*Delegating=*/false, This
.getAddress(*this), Args
,
296 AggValueSlot::DoesNotOverlap
, CE
->getExprLoc(),
297 /*NewPointerIsChecked=*/false);
298 return RValue::get(nullptr);
301 if (TrivialForCodegen
) {
302 if (isa
<CXXDestructorDecl
>(MD
))
303 return RValue::get(nullptr);
305 if (TrivialAssignment
) {
306 // We don't like to generate the trivial copy/move assignment operator
307 // when it isn't necessary; just produce the proper effect here.
308 // It's important that we use the result of EmitLValue here rather than
309 // emitting call arguments, in order to preserve TBAA information from
311 LValue RHS
= isa
<CXXOperatorCallExpr
>(CE
)
312 ? TrivialAssignmentRHS
313 : EmitLValue(*CE
->arg_begin());
314 EmitAggregateAssign(This
, RHS
, CE
->getType());
315 return RValue::get(This
.getPointer(*this));
318 assert(MD
->getParent()->mayInsertExtraPadding() &&
319 "unknown trivial member function");
322 // Compute the function type we're calling.
323 const CXXMethodDecl
*CalleeDecl
=
324 DevirtualizedMethod
? DevirtualizedMethod
: MD
;
325 const CGFunctionInfo
*FInfo
= nullptr;
326 if (const auto *Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
))
327 FInfo
= &CGM
.getTypes().arrangeCXXStructorDeclaration(
328 GlobalDecl(Dtor
, Dtor_Complete
));
330 FInfo
= &CGM
.getTypes().arrangeCXXMethodDeclaration(CalleeDecl
);
332 llvm::FunctionType
*Ty
= CGM
.getTypes().GetFunctionType(*FInfo
);
334 // C++11 [class.mfct.non-static]p2:
335 // If a non-static member function of a class X is called for an object that
336 // is not of type X, or of a type derived from X, the behavior is undefined.
337 SourceLocation CallLoc
;
338 ASTContext
&C
= getContext();
340 CallLoc
= CE
->getExprLoc();
342 SanitizerSet SkippedChecks
;
343 if (const auto *CMCE
= dyn_cast
<CXXMemberCallExpr
>(CE
)) {
344 auto *IOA
= CMCE
->getImplicitObjectArgument();
345 bool IsImplicitObjectCXXThis
= IsWrappedCXXThis(IOA
);
346 if (IsImplicitObjectCXXThis
)
347 SkippedChecks
.set(SanitizerKind::Alignment
, true);
348 if (IsImplicitObjectCXXThis
|| isa
<DeclRefExpr
>(IOA
))
349 SkippedChecks
.set(SanitizerKind::Null
, true);
351 EmitTypeCheck(CodeGenFunction::TCK_MemberCall
, CallLoc
,
352 This
.getPointer(*this),
353 C
.getRecordType(CalleeDecl
->getParent()),
354 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
356 // C++ [class.virtual]p12:
357 // Explicit qualification with the scope operator (5.1) suppresses the
358 // virtual call mechanism.
360 // We also don't emit a virtual call if the base expression has a record type
361 // because then we know what the type is.
362 bool UseVirtualCall
= CanUseVirtualCall
&& !DevirtualizedMethod
;
364 if (const CXXDestructorDecl
*Dtor
= dyn_cast
<CXXDestructorDecl
>(CalleeDecl
)) {
365 assert(CE
->arg_begin() == CE
->arg_end() &&
366 "Destructor shouldn't have explicit parameters");
367 assert(ReturnValue
.isNull() && "Destructor shouldn't have return value");
368 if (UseVirtualCall
) {
369 CGM
.getCXXABI().EmitVirtualDestructorCall(*this, Dtor
, Dtor_Complete
,
370 This
.getAddress(*this),
371 cast
<CXXMemberCallExpr
>(CE
));
373 GlobalDecl
GD(Dtor
, Dtor_Complete
);
375 if (getLangOpts().AppleKext
&& Dtor
->isVirtual() && HasQualifier
)
376 Callee
= BuildAppleKextVirtualCall(Dtor
, Qualifier
, Ty
);
377 else if (!DevirtualizedMethod
)
379 CGCallee::forDirect(CGM
.getAddrOfCXXStructor(GD
, FInfo
, Ty
), GD
);
381 Callee
= CGCallee::forDirect(CGM
.GetAddrOfFunction(GD
, Ty
), GD
);
385 IsArrow
? Base
->getType()->getPointeeType() : Base
->getType();
386 EmitCXXDestructorCall(GD
, Callee
, This
.getPointer(*this), ThisTy
,
387 /*ImplicitParam=*/nullptr,
388 /*ImplicitParamTy=*/QualType(), CE
);
390 return RValue::get(nullptr);
393 // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
394 // 'CalleeDecl' instead.
397 if (UseVirtualCall
) {
398 Callee
= CGCallee::forVirtual(CE
, MD
, This
.getAddress(*this), Ty
);
400 if (SanOpts
.has(SanitizerKind::CFINVCall
) &&
401 MD
->getParent()->isDynamicClass()) {
403 const CXXRecordDecl
*RD
;
404 std::tie(VTable
, RD
) = CGM
.getCXXABI().LoadVTablePtr(
405 *this, This
.getAddress(*this), CalleeDecl
->getParent());
406 EmitVTablePtrCheckForCall(RD
, VTable
, CFITCK_NVCall
, CE
->getBeginLoc());
409 if (getLangOpts().AppleKext
&& MD
->isVirtual() && HasQualifier
)
410 Callee
= BuildAppleKextVirtualCall(MD
, Qualifier
, Ty
);
411 else if (!DevirtualizedMethod
)
413 CGCallee::forDirect(CGM
.GetAddrOfFunction(MD
, Ty
), GlobalDecl(MD
));
416 CGCallee::forDirect(CGM
.GetAddrOfFunction(DevirtualizedMethod
, Ty
),
417 GlobalDecl(DevirtualizedMethod
));
421 if (MD
->isVirtual()) {
422 Address NewThisAddr
=
423 CGM
.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
424 *this, CalleeDecl
, This
.getAddress(*this), UseVirtualCall
);
425 This
.setAddress(NewThisAddr
);
428 return EmitCXXMemberOrOperatorCall(
429 CalleeDecl
, Callee
, ReturnValue
, This
.getPointer(*this),
430 /*ImplicitParam=*/nullptr, QualType(), CE
, RtlArgs
);
434 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr
*E
,
435 ReturnValueSlot ReturnValue
) {
436 const BinaryOperator
*BO
=
437 cast
<BinaryOperator
>(E
->getCallee()->IgnoreParens());
438 const Expr
*BaseExpr
= BO
->getLHS();
439 const Expr
*MemFnExpr
= BO
->getRHS();
441 const auto *MPT
= MemFnExpr
->getType()->castAs
<MemberPointerType
>();
442 const auto *FPT
= MPT
->getPointeeType()->castAs
<FunctionProtoType
>();
444 cast
<CXXRecordDecl
>(MPT
->getClass()->castAs
<RecordType
>()->getDecl());
446 // Emit the 'this' pointer.
447 Address This
= Address::invalid();
448 if (BO
->getOpcode() == BO_PtrMemI
)
449 This
= EmitPointerWithAlignment(BaseExpr
, nullptr, nullptr, KnownNonNull
);
451 This
= EmitLValue(BaseExpr
, KnownNonNull
).getAddress(*this);
453 EmitTypeCheck(TCK_MemberCall
, E
->getExprLoc(), This
.getPointer(),
454 QualType(MPT
->getClass(), 0));
456 // Get the member function pointer.
457 llvm::Value
*MemFnPtr
= EmitScalarExpr(MemFnExpr
);
459 // Ask the ABI to load the callee. Note that This is modified.
460 llvm::Value
*ThisPtrForCall
= nullptr;
462 CGM
.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO
, This
,
463 ThisPtrForCall
, MemFnPtr
, MPT
);
468 getContext().getPointerType(getContext().getTagDeclType(RD
));
470 // Push the this ptr.
471 Args
.add(RValue::get(ThisPtrForCall
), ThisType
);
473 RequiredArgs required
= RequiredArgs::forPrototypePlus(FPT
, 1);
475 // And the rest of the call args
476 EmitCallArgs(Args
, FPT
, E
->arguments());
477 return EmitCall(CGM
.getTypes().arrangeCXXMethodCall(Args
, FPT
, required
,
479 Callee
, ReturnValue
, Args
, nullptr, E
== MustTailCall
,
484 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr
*E
,
485 const CXXMethodDecl
*MD
,
486 ReturnValueSlot ReturnValue
) {
487 assert(MD
->isInstance() &&
488 "Trying to emit a member call expr on a static method!");
489 return EmitCXXMemberOrOperatorMemberCallExpr(
490 E
, MD
, ReturnValue
, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
491 /*IsArrow=*/false, E
->getArg(0));
494 RValue
CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr
*E
,
495 ReturnValueSlot ReturnValue
) {
496 return CGM
.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E
, ReturnValue
);
499 static void EmitNullBaseClassInitialization(CodeGenFunction
&CGF
,
501 const CXXRecordDecl
*Base
) {
505 DestPtr
= CGF
.Builder
.CreateElementBitCast(DestPtr
, CGF
.Int8Ty
);
507 const ASTRecordLayout
&Layout
= CGF
.getContext().getASTRecordLayout(Base
);
508 CharUnits NVSize
= Layout
.getNonVirtualSize();
510 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
511 // present, they are initialized by the most derived class before calling the
513 SmallVector
<std::pair
<CharUnits
, CharUnits
>, 1> Stores
;
514 Stores
.emplace_back(CharUnits::Zero(), NVSize
);
516 // Each store is split by the existence of a vbptr.
517 CharUnits VBPtrWidth
= CGF
.getPointerSize();
518 std::vector
<CharUnits
> VBPtrOffsets
=
519 CGF
.CGM
.getCXXABI().getVBPtrOffsets(Base
);
520 for (CharUnits VBPtrOffset
: VBPtrOffsets
) {
521 // Stop before we hit any virtual base pointers located in virtual bases.
522 if (VBPtrOffset
>= NVSize
)
524 std::pair
<CharUnits
, CharUnits
> LastStore
= Stores
.pop_back_val();
525 CharUnits LastStoreOffset
= LastStore
.first
;
526 CharUnits LastStoreSize
= LastStore
.second
;
528 CharUnits SplitBeforeOffset
= LastStoreOffset
;
529 CharUnits SplitBeforeSize
= VBPtrOffset
- SplitBeforeOffset
;
530 assert(!SplitBeforeSize
.isNegative() && "negative store size!");
531 if (!SplitBeforeSize
.isZero())
532 Stores
.emplace_back(SplitBeforeOffset
, SplitBeforeSize
);
534 CharUnits SplitAfterOffset
= VBPtrOffset
+ VBPtrWidth
;
535 CharUnits SplitAfterSize
= LastStoreSize
- SplitAfterOffset
;
536 assert(!SplitAfterSize
.isNegative() && "negative store size!");
537 if (!SplitAfterSize
.isZero())
538 Stores
.emplace_back(SplitAfterOffset
, SplitAfterSize
);
541 // If the type contains a pointer to data member we can't memset it to zero.
542 // Instead, create a null constant and copy it to the destination.
543 // TODO: there are other patterns besides zero that we can usefully memset,
544 // like -1, which happens to be the pattern used by member-pointers.
545 // TODO: isZeroInitializable can be over-conservative in the case where a
546 // virtual base contains a member pointer.
547 llvm::Constant
*NullConstantForBase
= CGF
.CGM
.EmitNullConstantForBase(Base
);
548 if (!NullConstantForBase
->isNullValue()) {
549 llvm::GlobalVariable
*NullVariable
= new llvm::GlobalVariable(
550 CGF
.CGM
.getModule(), NullConstantForBase
->getType(),
551 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
,
552 NullConstantForBase
, Twine());
555 std::max(Layout
.getNonVirtualAlignment(), DestPtr
.getAlignment());
556 NullVariable
->setAlignment(Align
.getAsAlign());
559 Address(CGF
.EmitCastToVoidPtr(NullVariable
), CGF
.Int8Ty
, Align
);
561 // Get and call the appropriate llvm.memcpy overload.
562 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
563 CharUnits StoreOffset
= Store
.first
;
564 CharUnits StoreSize
= Store
.second
;
565 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
566 CGF
.Builder
.CreateMemCpy(
567 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
568 CGF
.Builder
.CreateConstInBoundsByteGEP(SrcPtr
, StoreOffset
),
572 // Otherwise, just memset the whole thing to zero. This is legal
573 // because in LLVM, all default initializers (other than the ones we just
574 // handled above) are guaranteed to have a bit pattern of all zeros.
576 for (std::pair
<CharUnits
, CharUnits
> Store
: Stores
) {
577 CharUnits StoreOffset
= Store
.first
;
578 CharUnits StoreSize
= Store
.second
;
579 llvm::Value
*StoreSizeVal
= CGF
.CGM
.getSize(StoreSize
);
580 CGF
.Builder
.CreateMemSet(
581 CGF
.Builder
.CreateConstInBoundsByteGEP(DestPtr
, StoreOffset
),
582 CGF
.Builder
.getInt8(0), StoreSizeVal
);
588 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr
*E
,
590 assert(!Dest
.isIgnored() && "Must have a destination!");
591 const CXXConstructorDecl
*CD
= E
->getConstructor();
593 // If we require zero initialization before (or instead of) calling the
594 // constructor, as can be the case with a non-user-provided default
595 // constructor, emit the zero initialization now, unless destination is
597 if (E
->requiresZeroInitialization() && !Dest
.isZeroed()) {
598 switch (E
->getConstructionKind()) {
599 case CXXConstructExpr::CK_Delegating
:
600 case CXXConstructExpr::CK_Complete
:
601 EmitNullInitialization(Dest
.getAddress(), E
->getType());
603 case CXXConstructExpr::CK_VirtualBase
:
604 case CXXConstructExpr::CK_NonVirtualBase
:
605 EmitNullBaseClassInitialization(*this, Dest
.getAddress(),
611 // If this is a call to a trivial default constructor, do nothing.
612 if (CD
->isTrivial() && CD
->isDefaultConstructor())
615 // Elide the constructor if we're constructing from a temporary.
616 if (getLangOpts().ElideConstructors
&& E
->isElidable()) {
617 // FIXME: This only handles the simplest case, where the source object
618 // is passed directly as the first argument to the constructor.
619 // This should also handle stepping though implicit casts and
620 // conversion sequences which involve two steps, with a
621 // conversion operator followed by a converting constructor.
622 const Expr
*SrcObj
= E
->getArg(0);
623 assert(SrcObj
->isTemporaryObject(getContext(), CD
->getParent()));
625 getContext().hasSameUnqualifiedType(E
->getType(), SrcObj
->getType()));
626 EmitAggExpr(SrcObj
, Dest
);
630 if (const ArrayType
*arrayType
631 = getContext().getAsArrayType(E
->getType())) {
632 EmitCXXAggrConstructorCall(CD
, arrayType
, Dest
.getAddress(), E
,
633 Dest
.isSanitizerChecked());
635 CXXCtorType Type
= Ctor_Complete
;
636 bool ForVirtualBase
= false;
637 bool Delegating
= false;
639 switch (E
->getConstructionKind()) {
640 case CXXConstructExpr::CK_Delegating
:
641 // We should be emitting a constructor; GlobalDecl will assert this
642 Type
= CurGD
.getCtorType();
646 case CXXConstructExpr::CK_Complete
:
647 Type
= Ctor_Complete
;
650 case CXXConstructExpr::CK_VirtualBase
:
651 ForVirtualBase
= true;
654 case CXXConstructExpr::CK_NonVirtualBase
:
658 // Call the constructor.
659 EmitCXXConstructorCall(CD
, Type
, ForVirtualBase
, Delegating
, Dest
, E
);
663 void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest
, Address Src
,
665 if (const ExprWithCleanups
*E
= dyn_cast
<ExprWithCleanups
>(Exp
))
666 Exp
= E
->getSubExpr();
667 assert(isa
<CXXConstructExpr
>(Exp
) &&
668 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
669 const CXXConstructExpr
* E
= cast
<CXXConstructExpr
>(Exp
);
670 const CXXConstructorDecl
*CD
= E
->getConstructor();
671 RunCleanupsScope
Scope(*this);
673 // If we require zero initialization before (or instead of) calling the
674 // constructor, as can be the case with a non-user-provided default
675 // constructor, emit the zero initialization now.
676 // FIXME. Do I still need this for a copy ctor synthesis?
677 if (E
->requiresZeroInitialization())
678 EmitNullInitialization(Dest
, E
->getType());
680 assert(!getContext().getAsConstantArrayType(E
->getType())
681 && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
682 EmitSynthesizedCXXCopyCtorCall(CD
, Dest
, Src
, E
);
685 static CharUnits
CalculateCookiePadding(CodeGenFunction
&CGF
,
686 const CXXNewExpr
*E
) {
688 return CharUnits::Zero();
690 // No cookie is required if the operator new[] being used is the
691 // reserved placement operator new[].
692 if (E
->getOperatorNew()->isReservedGlobalPlacementOperator())
693 return CharUnits::Zero();
695 return CGF
.CGM
.getCXXABI().GetArrayCookieSize(E
);
698 static llvm::Value
*EmitCXXNewAllocSize(CodeGenFunction
&CGF
,
700 unsigned minElements
,
701 llvm::Value
*&numElements
,
702 llvm::Value
*&sizeWithoutCookie
) {
703 QualType type
= e
->getAllocatedType();
706 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
708 = llvm::ConstantInt::get(CGF
.SizeTy
, typeSize
.getQuantity());
709 return sizeWithoutCookie
;
712 // The width of size_t.
713 unsigned sizeWidth
= CGF
.SizeTy
->getBitWidth();
715 // Figure out the cookie size.
716 llvm::APInt
cookieSize(sizeWidth
,
717 CalculateCookiePadding(CGF
, e
).getQuantity());
719 // Emit the array size expression.
720 // We multiply the size of all dimensions for NumElements.
721 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
723 ConstantEmitter(CGF
).tryEmitAbstract(*e
->getArraySize(), e
->getType());
725 numElements
= CGF
.EmitScalarExpr(*e
->getArraySize());
726 assert(isa
<llvm::IntegerType
>(numElements
->getType()));
728 // The number of elements can be have an arbitrary integer type;
729 // essentially, we need to multiply it by a constant factor, add a
730 // cookie size, and verify that the result is representable as a
731 // size_t. That's just a gloss, though, and it's wrong in one
732 // important way: if the count is negative, it's an error even if
733 // the cookie size would bring the total size >= 0.
735 = (*e
->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
736 llvm::IntegerType
*numElementsType
737 = cast
<llvm::IntegerType
>(numElements
->getType());
738 unsigned numElementsWidth
= numElementsType
->getBitWidth();
740 // Compute the constant factor.
741 llvm::APInt
arraySizeMultiplier(sizeWidth
, 1);
742 while (const ConstantArrayType
*CAT
743 = CGF
.getContext().getAsConstantArrayType(type
)) {
744 type
= CAT
->getElementType();
745 arraySizeMultiplier
*= CAT
->getSize();
748 CharUnits typeSize
= CGF
.getContext().getTypeSizeInChars(type
);
749 llvm::APInt
typeSizeMultiplier(sizeWidth
, typeSize
.getQuantity());
750 typeSizeMultiplier
*= arraySizeMultiplier
;
752 // This will be a size_t.
755 // If someone is doing 'new int[42]' there is no need to do a dynamic check.
756 // Don't bloat the -O0 code.
757 if (llvm::ConstantInt
*numElementsC
=
758 dyn_cast
<llvm::ConstantInt
>(numElements
)) {
759 const llvm::APInt
&count
= numElementsC
->getValue();
761 bool hasAnyOverflow
= false;
763 // If 'count' was a negative number, it's an overflow.
764 if (isSigned
&& count
.isNegative())
765 hasAnyOverflow
= true;
767 // We want to do all this arithmetic in size_t. If numElements is
768 // wider than that, check whether it's already too big, and if so,
770 else if (numElementsWidth
> sizeWidth
&&
771 numElementsWidth
- sizeWidth
> count
.countl_zero())
772 hasAnyOverflow
= true;
774 // Okay, compute a count at the right width.
775 llvm::APInt adjustedCount
= count
.zextOrTrunc(sizeWidth
);
777 // If there is a brace-initializer, we cannot allocate fewer elements than
778 // there are initializers. If we do, that's treated like an overflow.
779 if (adjustedCount
.ult(minElements
))
780 hasAnyOverflow
= true;
782 // Scale numElements by that. This might overflow, but we don't
783 // care because it only overflows if allocationSize does, too, and
784 // if that overflows then we shouldn't use this.
785 numElements
= llvm::ConstantInt::get(CGF
.SizeTy
,
786 adjustedCount
* arraySizeMultiplier
);
788 // Compute the size before cookie, and track whether it overflowed.
790 llvm::APInt allocationSize
791 = adjustedCount
.umul_ov(typeSizeMultiplier
, overflow
);
792 hasAnyOverflow
|= overflow
;
794 // Add in the cookie, and check whether it's overflowed.
795 if (cookieSize
!= 0) {
796 // Save the current size without a cookie. This shouldn't be
797 // used if there was overflow.
798 sizeWithoutCookie
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
800 allocationSize
= allocationSize
.uadd_ov(cookieSize
, overflow
);
801 hasAnyOverflow
|= overflow
;
804 // On overflow, produce a -1 so operator new will fail.
805 if (hasAnyOverflow
) {
806 size
= llvm::Constant::getAllOnesValue(CGF
.SizeTy
);
808 size
= llvm::ConstantInt::get(CGF
.SizeTy
, allocationSize
);
811 // Otherwise, we might need to use the overflow intrinsics.
813 // There are up to five conditions we need to test for:
814 // 1) if isSigned, we need to check whether numElements is negative;
815 // 2) if numElementsWidth > sizeWidth, we need to check whether
816 // numElements is larger than something representable in size_t;
817 // 3) if minElements > 0, we need to check whether numElements is smaller
819 // 4) we need to compute
820 // sizeWithoutCookie := numElements * typeSizeMultiplier
821 // and check whether it overflows; and
822 // 5) if we need a cookie, we need to compute
823 // size := sizeWithoutCookie + cookieSize
824 // and check whether it overflows.
826 llvm::Value
*hasOverflow
= nullptr;
828 // If numElementsWidth > sizeWidth, then one way or another, we're
829 // going to have to do a comparison for (2), and this happens to
830 // take care of (1), too.
831 if (numElementsWidth
> sizeWidth
) {
832 llvm::APInt threshold
=
833 llvm::APInt::getOneBitSet(numElementsWidth
, sizeWidth
);
835 llvm::Value
*thresholdV
836 = llvm::ConstantInt::get(numElementsType
, threshold
);
838 hasOverflow
= CGF
.Builder
.CreateICmpUGE(numElements
, thresholdV
);
839 numElements
= CGF
.Builder
.CreateTrunc(numElements
, CGF
.SizeTy
);
841 // Otherwise, if we're signed, we want to sext up to size_t.
842 } else if (isSigned
) {
843 if (numElementsWidth
< sizeWidth
)
844 numElements
= CGF
.Builder
.CreateSExt(numElements
, CGF
.SizeTy
);
846 // If there's a non-1 type size multiplier, then we can do the
847 // signedness check at the same time as we do the multiply
848 // because a negative number times anything will cause an
849 // unsigned overflow. Otherwise, we have to do it here. But at least
850 // in this case, we can subsume the >= minElements check.
851 if (typeSizeMultiplier
== 1)
852 hasOverflow
= CGF
.Builder
.CreateICmpSLT(numElements
,
853 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
855 // Otherwise, zext up to size_t if necessary.
856 } else if (numElementsWidth
< sizeWidth
) {
857 numElements
= CGF
.Builder
.CreateZExt(numElements
, CGF
.SizeTy
);
860 assert(numElements
->getType() == CGF
.SizeTy
);
863 // Don't allow allocation of fewer elements than we have initializers.
865 hasOverflow
= CGF
.Builder
.CreateICmpULT(numElements
,
866 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
));
867 } else if (numElementsWidth
> sizeWidth
) {
868 // The other existing overflow subsumes this check.
869 // We do an unsigned comparison, since any signed value < -1 is
870 // taken care of either above or below.
871 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
,
872 CGF
.Builder
.CreateICmpULT(numElements
,
873 llvm::ConstantInt::get(CGF
.SizeTy
, minElements
)));
879 // Multiply by the type size if necessary. This multiplier
880 // includes all the factors for nested arrays.
882 // This step also causes numElements to be scaled up by the
883 // nested-array factor if necessary. Overflow on this computation
884 // can be ignored because the result shouldn't be used if
886 if (typeSizeMultiplier
!= 1) {
887 llvm::Function
*umul_with_overflow
888 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::umul_with_overflow
, CGF
.SizeTy
);
891 llvm::ConstantInt::get(CGF
.SizeTy
, typeSizeMultiplier
);
892 llvm::Value
*result
=
893 CGF
.Builder
.CreateCall(umul_with_overflow
, {size
, tsmV
});
895 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
897 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
899 hasOverflow
= overflowed
;
901 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
903 // Also scale up numElements by the array size multiplier.
904 if (arraySizeMultiplier
!= 1) {
905 // If the base element type size is 1, then we can re-use the
906 // multiply we just did.
907 if (typeSize
.isOne()) {
908 assert(arraySizeMultiplier
== typeSizeMultiplier
);
911 // Otherwise we need a separate multiply.
914 llvm::ConstantInt::get(CGF
.SizeTy
, arraySizeMultiplier
);
915 numElements
= CGF
.Builder
.CreateMul(numElements
, asmV
);
919 // numElements doesn't need to be scaled.
920 assert(arraySizeMultiplier
== 1);
923 // Add in the cookie size if necessary.
924 if (cookieSize
!= 0) {
925 sizeWithoutCookie
= size
;
927 llvm::Function
*uadd_with_overflow
928 = CGF
.CGM
.getIntrinsic(llvm::Intrinsic::uadd_with_overflow
, CGF
.SizeTy
);
930 llvm::Value
*cookieSizeV
= llvm::ConstantInt::get(CGF
.SizeTy
, cookieSize
);
931 llvm::Value
*result
=
932 CGF
.Builder
.CreateCall(uadd_with_overflow
, {size
, cookieSizeV
});
934 llvm::Value
*overflowed
= CGF
.Builder
.CreateExtractValue(result
, 1);
936 hasOverflow
= CGF
.Builder
.CreateOr(hasOverflow
, overflowed
);
938 hasOverflow
= overflowed
;
940 size
= CGF
.Builder
.CreateExtractValue(result
, 0);
943 // If we had any possibility of dynamic overflow, make a select to
944 // overwrite 'size' with an all-ones value, which should cause
945 // operator new to throw.
947 size
= CGF
.Builder
.CreateSelect(hasOverflow
,
948 llvm::Constant::getAllOnesValue(CGF
.SizeTy
),
953 sizeWithoutCookie
= size
;
955 assert(sizeWithoutCookie
&& "didn't set sizeWithoutCookie?");
960 static void StoreAnyExprIntoOneUnit(CodeGenFunction
&CGF
, const Expr
*Init
,
961 QualType AllocType
, Address NewPtr
,
962 AggValueSlot::Overlap_t MayOverlap
) {
963 // FIXME: Refactor with EmitExprAsInit.
964 switch (CGF
.getEvaluationKind(AllocType
)) {
966 CGF
.EmitScalarInit(Init
, nullptr,
967 CGF
.MakeAddrLValue(NewPtr
, AllocType
), false);
970 CGF
.EmitComplexExprIntoLValue(Init
, CGF
.MakeAddrLValue(NewPtr
, AllocType
),
973 case TEK_Aggregate
: {
975 = AggValueSlot::forAddr(NewPtr
, AllocType
.getQualifiers(),
976 AggValueSlot::IsDestructed
,
977 AggValueSlot::DoesNotNeedGCBarriers
,
978 AggValueSlot::IsNotAliased
,
979 MayOverlap
, AggValueSlot::IsNotZeroed
,
980 AggValueSlot::IsSanitizerChecked
);
981 CGF
.EmitAggExpr(Init
, Slot
);
985 llvm_unreachable("bad evaluation kind");
988 void CodeGenFunction::EmitNewArrayInitializer(
989 const CXXNewExpr
*E
, QualType ElementType
, llvm::Type
*ElementTy
,
990 Address BeginPtr
, llvm::Value
*NumElements
,
991 llvm::Value
*AllocSizeWithoutCookie
) {
992 // If we have a type with trivial initialization and no initializer,
993 // there's nothing to do.
994 if (!E
->hasInitializer())
997 Address CurPtr
= BeginPtr
;
999 unsigned InitListElements
= 0;
1001 const Expr
*Init
= E
->getInitializer();
1002 Address EndOfInit
= Address::invalid();
1003 QualType::DestructionKind DtorKind
= ElementType
.isDestructedType();
1004 EHScopeStack::stable_iterator Cleanup
;
1005 llvm::Instruction
*CleanupDominator
= nullptr;
1007 CharUnits ElementSize
= getContext().getTypeSizeInChars(ElementType
);
1008 CharUnits ElementAlign
=
1009 BeginPtr
.getAlignment().alignmentOfArrayElement(ElementSize
);
1011 // Attempt to perform zero-initialization using memset.
1012 auto TryMemsetInitialization
= [&]() -> bool {
1013 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1014 // we can initialize with a memset to -1.
1015 if (!CGM
.getTypes().isZeroInitializable(ElementType
))
1018 // Optimization: since zero initialization will just set the memory
1019 // to all zeroes, generate a single memset to do it in one shot.
1021 // Subtract out the size of any elements we've already initialized.
1022 auto *RemainingSize
= AllocSizeWithoutCookie
;
1023 if (InitListElements
) {
1024 // We know this can't overflow; we check this when doing the allocation.
1025 auto *InitializedSize
= llvm::ConstantInt::get(
1026 RemainingSize
->getType(),
1027 getContext().getTypeSizeInChars(ElementType
).getQuantity() *
1029 RemainingSize
= Builder
.CreateSub(RemainingSize
, InitializedSize
);
1032 // Create the memset.
1033 Builder
.CreateMemSet(CurPtr
, Builder
.getInt8(0), RemainingSize
, false);
1037 // If the initializer is an initializer list, first do the explicit elements.
1038 if (const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(Init
)) {
1039 // Initializing from a (braced) string literal is a special case; the init
1040 // list element does not initialize a (single) array element.
1041 if (ILE
->isStringLiteralInit()) {
1042 // Initialize the initial portion of length equal to that of the string
1043 // literal. The allocation must be for at least this much; we emitted a
1044 // check for that earlier.
1046 AggValueSlot::forAddr(CurPtr
, ElementType
.getQualifiers(),
1047 AggValueSlot::IsDestructed
,
1048 AggValueSlot::DoesNotNeedGCBarriers
,
1049 AggValueSlot::IsNotAliased
,
1050 AggValueSlot::DoesNotOverlap
,
1051 AggValueSlot::IsNotZeroed
,
1052 AggValueSlot::IsSanitizerChecked
);
1053 EmitAggExpr(ILE
->getInit(0), Slot
);
1055 // Move past these elements.
1057 cast
<ConstantArrayType
>(ILE
->getType()->getAsArrayTypeUnsafe())
1058 ->getSize().getZExtValue();
1059 CurPtr
= Builder
.CreateConstInBoundsGEP(
1060 CurPtr
, InitListElements
, "string.init.end");
1062 // Zero out the rest, if any remain.
1063 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1064 if (!ConstNum
|| !ConstNum
->equalsInt(InitListElements
)) {
1065 bool OK
= TryMemsetInitialization();
1067 assert(OK
&& "couldn't memset character type?");
1072 InitListElements
= ILE
->getNumInits();
1074 // If this is a multi-dimensional array new, we will initialize multiple
1075 // elements with each init list element.
1076 QualType AllocType
= E
->getAllocatedType();
1077 if (const ConstantArrayType
*CAT
= dyn_cast_or_null
<ConstantArrayType
>(
1078 AllocType
->getAsArrayTypeUnsafe())) {
1079 ElementTy
= ConvertTypeForMem(AllocType
);
1080 CurPtr
= Builder
.CreateElementBitCast(CurPtr
, ElementTy
);
1081 InitListElements
*= getContext().getConstantArrayElementCount(CAT
);
1084 // Enter a partial-destruction Cleanup if necessary.
1085 if (needsEHCleanup(DtorKind
)) {
1086 // In principle we could tell the Cleanup where we are more
1087 // directly, but the control flow can get so varied here that it
1088 // would actually be quite complex. Therefore we go through an
1090 EndOfInit
= CreateTempAlloca(BeginPtr
.getType(), getPointerAlign(),
1092 CleanupDominator
= Builder
.CreateStore(BeginPtr
.getPointer(), EndOfInit
);
1093 pushIrregularPartialArrayCleanup(BeginPtr
.getPointer(), EndOfInit
,
1094 ElementType
, ElementAlign
,
1095 getDestroyer(DtorKind
));
1096 Cleanup
= EHStack
.stable_begin();
1099 CharUnits StartAlign
= CurPtr
.getAlignment();
1100 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
) {
1101 // Tell the cleanup that it needs to destroy up to this
1102 // element. TODO: some of these stores can be trivially
1103 // observed to be unnecessary.
1104 if (EndOfInit
.isValid()) {
1106 Builder
.CreateBitCast(CurPtr
.getPointer(), BeginPtr
.getType());
1107 Builder
.CreateStore(FinishedPtr
, EndOfInit
);
1109 // FIXME: If the last initializer is an incomplete initializer list for
1110 // an array, and we have an array filler, we can fold together the two
1111 // initialization loops.
1112 StoreAnyExprIntoOneUnit(*this, ILE
->getInit(i
),
1113 ILE
->getInit(i
)->getType(), CurPtr
,
1114 AggValueSlot::DoesNotOverlap
);
1115 CurPtr
= Address(Builder
.CreateInBoundsGEP(
1116 CurPtr
.getElementType(), CurPtr
.getPointer(),
1117 Builder
.getSize(1), "array.exp.next"),
1118 CurPtr
.getElementType(),
1119 StartAlign
.alignmentAtOffset((i
+ 1) * ElementSize
));
1122 // The remaining elements are filled with the array filler expression.
1123 Init
= ILE
->getArrayFiller();
1125 // Extract the initializer for the individual array elements by pulling
1126 // out the array filler from all the nested initializer lists. This avoids
1127 // generating a nested loop for the initialization.
1128 while (Init
&& Init
->getType()->isConstantArrayType()) {
1129 auto *SubILE
= dyn_cast
<InitListExpr
>(Init
);
1132 assert(SubILE
->getNumInits() == 0 && "explicit inits in array filler?");
1133 Init
= SubILE
->getArrayFiller();
1136 // Switch back to initializing one base element at a time.
1137 CurPtr
= Builder
.CreateElementBitCast(CurPtr
, BeginPtr
.getElementType());
1140 // If all elements have already been initialized, skip any further
1142 llvm::ConstantInt
*ConstNum
= dyn_cast
<llvm::ConstantInt
>(NumElements
);
1143 if (ConstNum
&& ConstNum
->getZExtValue() <= InitListElements
) {
1144 // If there was a Cleanup, deactivate it.
1145 if (CleanupDominator
)
1146 DeactivateCleanupBlock(Cleanup
, CleanupDominator
);
1150 assert(Init
&& "have trailing elements to initialize but no initializer");
1152 // If this is a constructor call, try to optimize it out, and failing that
1153 // emit a single loop to initialize all remaining elements.
1154 if (const CXXConstructExpr
*CCE
= dyn_cast
<CXXConstructExpr
>(Init
)) {
1155 CXXConstructorDecl
*Ctor
= CCE
->getConstructor();
1156 if (Ctor
->isTrivial()) {
1157 // If new expression did not specify value-initialization, then there
1158 // is no initialization.
1159 if (!CCE
->requiresZeroInitialization() || Ctor
->getParent()->isEmpty())
1162 if (TryMemsetInitialization())
1166 // Store the new Cleanup position for irregular Cleanups.
1168 // FIXME: Share this cleanup with the constructor call emission rather than
1169 // having it create a cleanup of its own.
1170 if (EndOfInit
.isValid())
1171 Builder
.CreateStore(CurPtr
.getPointer(), EndOfInit
);
1173 // Emit a constructor call loop to initialize the remaining elements.
1174 if (InitListElements
)
1175 NumElements
= Builder
.CreateSub(
1177 llvm::ConstantInt::get(NumElements
->getType(), InitListElements
));
1178 EmitCXXAggrConstructorCall(Ctor
, NumElements
, CurPtr
, CCE
,
1179 /*NewPointerIsChecked*/true,
1180 CCE
->requiresZeroInitialization());
1184 // If this is value-initialization, we can usually use memset.
1185 ImplicitValueInitExpr
IVIE(ElementType
);
1186 if (isa
<ImplicitValueInitExpr
>(Init
)) {
1187 if (TryMemsetInitialization())
1190 // Switch to an ImplicitValueInitExpr for the element type. This handles
1191 // only one case: multidimensional array new of pointers to members. In
1192 // all other cases, we already have an initializer for the array element.
1196 // At this point we should have found an initializer for the individual
1197 // elements of the array.
1198 assert(getContext().hasSameUnqualifiedType(ElementType
, Init
->getType()) &&
1199 "got wrong type of element to initialize");
1201 // If we have an empty initializer list, we can usually use memset.
1202 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
))
1203 if (ILE
->getNumInits() == 0 && TryMemsetInitialization())
1206 // If we have a struct whose every field is value-initialized, we can
1207 // usually use memset.
1208 if (auto *ILE
= dyn_cast
<InitListExpr
>(Init
)) {
1209 if (const RecordType
*RType
= ILE
->getType()->getAs
<RecordType
>()) {
1210 if (RType
->getDecl()->isStruct()) {
1211 unsigned NumElements
= 0;
1212 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RType
->getDecl()))
1213 NumElements
= CXXRD
->getNumBases();
1214 for (auto *Field
: RType
->getDecl()->fields())
1215 if (!Field
->isUnnamedBitfield())
1217 // FIXME: Recurse into nested InitListExprs.
1218 if (ILE
->getNumInits() == NumElements
)
1219 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1220 if (!isa
<ImplicitValueInitExpr
>(ILE
->getInit(i
)))
1222 if (ILE
->getNumInits() == NumElements
&& TryMemsetInitialization())
1228 // Create the loop blocks.
1229 llvm::BasicBlock
*EntryBB
= Builder
.GetInsertBlock();
1230 llvm::BasicBlock
*LoopBB
= createBasicBlock("new.loop");
1231 llvm::BasicBlock
*ContBB
= createBasicBlock("new.loop.end");
1233 // Find the end of the array, hoisted out of the loop.
1234 llvm::Value
*EndPtr
=
1235 Builder
.CreateInBoundsGEP(BeginPtr
.getElementType(), BeginPtr
.getPointer(),
1236 NumElements
, "array.end");
1238 // If the number of elements isn't constant, we have to now check if there is
1239 // anything left to initialize.
1241 llvm::Value
*IsEmpty
=
1242 Builder
.CreateICmpEQ(CurPtr
.getPointer(), EndPtr
, "array.isempty");
1243 Builder
.CreateCondBr(IsEmpty
, ContBB
, LoopBB
);
1249 // Set up the current-element phi.
1250 llvm::PHINode
*CurPtrPhi
=
1251 Builder
.CreatePHI(CurPtr
.getType(), 2, "array.cur");
1252 CurPtrPhi
->addIncoming(CurPtr
.getPointer(), EntryBB
);
1254 CurPtr
= Address(CurPtrPhi
, CurPtr
.getElementType(), ElementAlign
);
1256 // Store the new Cleanup position for irregular Cleanups.
1257 if (EndOfInit
.isValid())
1258 Builder
.CreateStore(CurPtr
.getPointer(), EndOfInit
);
1260 // Enter a partial-destruction Cleanup if necessary.
1261 if (!CleanupDominator
&& needsEHCleanup(DtorKind
)) {
1262 pushRegularPartialArrayCleanup(BeginPtr
.getPointer(), CurPtr
.getPointer(),
1263 ElementType
, ElementAlign
,
1264 getDestroyer(DtorKind
));
1265 Cleanup
= EHStack
.stable_begin();
1266 CleanupDominator
= Builder
.CreateUnreachable();
1269 // Emit the initializer into this element.
1270 StoreAnyExprIntoOneUnit(*this, Init
, Init
->getType(), CurPtr
,
1271 AggValueSlot::DoesNotOverlap
);
1273 // Leave the Cleanup if we entered one.
1274 if (CleanupDominator
) {
1275 DeactivateCleanupBlock(Cleanup
, CleanupDominator
);
1276 CleanupDominator
->eraseFromParent();
1279 // Advance to the next element by adjusting the pointer type as necessary.
1280 llvm::Value
*NextPtr
=
1281 Builder
.CreateConstInBoundsGEP1_32(ElementTy
, CurPtr
.getPointer(), 1,
1284 // Check whether we've gotten to the end of the array and, if so,
1286 llvm::Value
*IsEnd
= Builder
.CreateICmpEQ(NextPtr
, EndPtr
, "array.atend");
1287 Builder
.CreateCondBr(IsEnd
, ContBB
, LoopBB
);
1288 CurPtrPhi
->addIncoming(NextPtr
, Builder
.GetInsertBlock());
1293 static void EmitNewInitializer(CodeGenFunction
&CGF
, const CXXNewExpr
*E
,
1294 QualType ElementType
, llvm::Type
*ElementTy
,
1295 Address NewPtr
, llvm::Value
*NumElements
,
1296 llvm::Value
*AllocSizeWithoutCookie
) {
1297 ApplyDebugLocation
DL(CGF
, E
);
1299 CGF
.EmitNewArrayInitializer(E
, ElementType
, ElementTy
, NewPtr
, NumElements
,
1300 AllocSizeWithoutCookie
);
1301 else if (const Expr
*Init
= E
->getInitializer())
1302 StoreAnyExprIntoOneUnit(CGF
, Init
, E
->getAllocatedType(), NewPtr
,
1303 AggValueSlot::DoesNotOverlap
);
1306 /// Emit a call to an operator new or operator delete function, as implicitly
1307 /// created by new-expressions and delete-expressions.
1308 static RValue
EmitNewDeleteCall(CodeGenFunction
&CGF
,
1309 const FunctionDecl
*CalleeDecl
,
1310 const FunctionProtoType
*CalleeType
,
1311 const CallArgList
&Args
) {
1312 llvm::CallBase
*CallOrInvoke
;
1313 llvm::Constant
*CalleePtr
= CGF
.CGM
.GetAddrOfFunction(CalleeDecl
);
1314 CGCallee Callee
= CGCallee::forDirect(CalleePtr
, GlobalDecl(CalleeDecl
));
1316 CGF
.EmitCall(CGF
.CGM
.getTypes().arrangeFreeFunctionCall(
1317 Args
, CalleeType
, /*ChainCall=*/false),
1318 Callee
, ReturnValueSlot(), Args
, &CallOrInvoke
);
1320 /// C++1y [expr.new]p10:
1321 /// [In a new-expression,] an implementation is allowed to omit a call
1322 /// to a replaceable global allocation function.
1324 /// We model such elidable calls with the 'builtin' attribute.
1325 llvm::Function
*Fn
= dyn_cast
<llvm::Function
>(CalleePtr
);
1326 if (CalleeDecl
->isReplaceableGlobalAllocationFunction() &&
1327 Fn
&& Fn
->hasFnAttribute(llvm::Attribute::NoBuiltin
)) {
1328 CallOrInvoke
->addFnAttr(llvm::Attribute::Builtin
);
1334 RValue
CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType
*Type
,
1335 const CallExpr
*TheCall
,
1338 EmitCallArgs(Args
, Type
, TheCall
->arguments());
1339 // Find the allocation or deallocation function that we're calling.
1340 ASTContext
&Ctx
= getContext();
1341 DeclarationName Name
= Ctx
.DeclarationNames
1342 .getCXXOperatorName(IsDelete
? OO_Delete
: OO_New
);
1344 for (auto *Decl
: Ctx
.getTranslationUnitDecl()->lookup(Name
))
1345 if (auto *FD
= dyn_cast
<FunctionDecl
>(Decl
))
1346 if (Ctx
.hasSameType(FD
->getType(), QualType(Type
, 0)))
1347 return EmitNewDeleteCall(*this, FD
, Type
, Args
);
1348 llvm_unreachable("predeclared global operator new/delete is missing");
1352 /// The parameters to pass to a usual operator delete.
1353 struct UsualDeleteParams
{
1354 bool DestroyingDelete
= false;
1356 bool Alignment
= false;
1360 static UsualDeleteParams
getUsualDeleteParams(const FunctionDecl
*FD
) {
1361 UsualDeleteParams Params
;
1363 const FunctionProtoType
*FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
1364 auto AI
= FPT
->param_type_begin(), AE
= FPT
->param_type_end();
1366 // The first argument is always a void*.
1369 // The next parameter may be a std::destroying_delete_t.
1370 if (FD
->isDestroyingOperatorDelete()) {
1371 Params
.DestroyingDelete
= true;
1376 // Figure out what other parameters we should be implicitly passing.
1377 if (AI
!= AE
&& (*AI
)->isIntegerType()) {
1382 if (AI
!= AE
&& (*AI
)->isAlignValT()) {
1383 Params
.Alignment
= true;
1387 assert(AI
== AE
&& "unexpected usual deallocation function parameter");
1392 /// A cleanup to call the given 'operator delete' function upon abnormal
1393 /// exit from a new expression. Templated on a traits type that deals with
1394 /// ensuring that the arguments dominate the cleanup if necessary.
1395 template<typename Traits
>
1396 class CallDeleteDuringNew final
: public EHScopeStack::Cleanup
{
1397 /// Type used to hold llvm::Value*s.
1398 typedef typename
Traits::ValueTy ValueTy
;
1399 /// Type used to hold RValues.
1400 typedef typename
Traits::RValueTy RValueTy
;
1401 struct PlacementArg
{
1406 unsigned NumPlacementArgs
: 31;
1407 unsigned PassAlignmentToPlacementDelete
: 1;
1408 const FunctionDecl
*OperatorDelete
;
1411 CharUnits AllocAlign
;
1413 PlacementArg
*getPlacementArgs() {
1414 return reinterpret_cast<PlacementArg
*>(this + 1);
1418 static size_t getExtraSize(size_t NumPlacementArgs
) {
1419 return NumPlacementArgs
* sizeof(PlacementArg
);
1422 CallDeleteDuringNew(size_t NumPlacementArgs
,
1423 const FunctionDecl
*OperatorDelete
, ValueTy Ptr
,
1424 ValueTy AllocSize
, bool PassAlignmentToPlacementDelete
,
1425 CharUnits AllocAlign
)
1426 : NumPlacementArgs(NumPlacementArgs
),
1427 PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete
),
1428 OperatorDelete(OperatorDelete
), Ptr(Ptr
), AllocSize(AllocSize
),
1429 AllocAlign(AllocAlign
) {}
1431 void setPlacementArg(unsigned I
, RValueTy Arg
, QualType Type
) {
1432 assert(I
< NumPlacementArgs
&& "index out of range");
1433 getPlacementArgs()[I
] = {Arg
, Type
};
1436 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1437 const auto *FPT
= OperatorDelete
->getType()->castAs
<FunctionProtoType
>();
1438 CallArgList DeleteArgs
;
1440 // The first argument is always a void* (or C* for a destroying operator
1441 // delete for class type C).
1442 DeleteArgs
.add(Traits::get(CGF
, Ptr
), FPT
->getParamType(0));
1444 // Figure out what other parameters we should be implicitly passing.
1445 UsualDeleteParams Params
;
1446 if (NumPlacementArgs
) {
1447 // A placement deallocation function is implicitly passed an alignment
1448 // if the placement allocation function was, but is never passed a size.
1449 Params
.Alignment
= PassAlignmentToPlacementDelete
;
1451 // For a non-placement new-expression, 'operator delete' can take a
1452 // size and/or an alignment if it has the right parameters.
1453 Params
= getUsualDeleteParams(OperatorDelete
);
1456 assert(!Params
.DestroyingDelete
&&
1457 "should not call destroying delete in a new-expression");
1459 // The second argument can be a std::size_t (for non-placement delete).
1461 DeleteArgs
.add(Traits::get(CGF
, AllocSize
),
1462 CGF
.getContext().getSizeType());
1464 // The next (second or third) argument can be a std::align_val_t, which
1465 // is an enum whose underlying type is std::size_t.
1466 // FIXME: Use the right type as the parameter type. Note that in a call
1467 // to operator delete(size_t, ...), we may not have it available.
1468 if (Params
.Alignment
)
1469 DeleteArgs
.add(RValue::get(llvm::ConstantInt::get(
1470 CGF
.SizeTy
, AllocAlign
.getQuantity())),
1471 CGF
.getContext().getSizeType());
1473 // Pass the rest of the arguments, which must match exactly.
1474 for (unsigned I
= 0; I
!= NumPlacementArgs
; ++I
) {
1475 auto Arg
= getPlacementArgs()[I
];
1476 DeleteArgs
.add(Traits::get(CGF
, Arg
.ArgValue
), Arg
.ArgType
);
1479 // Call 'operator delete'.
1480 EmitNewDeleteCall(CGF
, OperatorDelete
, FPT
, DeleteArgs
);
1485 /// Enter a cleanup to call 'operator delete' if the initializer in a
1486 /// new-expression throws.
1487 static void EnterNewDeleteCleanup(CodeGenFunction
&CGF
,
1488 const CXXNewExpr
*E
,
1490 llvm::Value
*AllocSize
,
1491 CharUnits AllocAlign
,
1492 const CallArgList
&NewArgs
) {
1493 unsigned NumNonPlacementArgs
= E
->passAlignment() ? 2 : 1;
1495 // If we're not inside a conditional branch, then the cleanup will
1496 // dominate and we can do the easier (and more efficient) thing.
1497 if (!CGF
.isInConditionalBranch()) {
1498 struct DirectCleanupTraits
{
1499 typedef llvm::Value
*ValueTy
;
1500 typedef RValue RValueTy
;
1501 static RValue
get(CodeGenFunction
&, ValueTy V
) { return RValue::get(V
); }
1502 static RValue
get(CodeGenFunction
&, RValueTy V
) { return V
; }
1505 typedef CallDeleteDuringNew
<DirectCleanupTraits
> DirectCleanup
;
1507 DirectCleanup
*Cleanup
= CGF
.EHStack
1508 .pushCleanupWithExtra
<DirectCleanup
>(EHCleanup
,
1509 E
->getNumPlacementArgs(),
1510 E
->getOperatorDelete(),
1511 NewPtr
.getPointer(),
1515 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1516 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1517 Cleanup
->setPlacementArg(I
, Arg
.getRValue(CGF
), Arg
.Ty
);
1523 // Otherwise, we need to save all this stuff.
1524 DominatingValue
<RValue
>::saved_type SavedNewPtr
=
1525 DominatingValue
<RValue
>::save(CGF
, RValue::get(NewPtr
.getPointer()));
1526 DominatingValue
<RValue
>::saved_type SavedAllocSize
=
1527 DominatingValue
<RValue
>::save(CGF
, RValue::get(AllocSize
));
1529 struct ConditionalCleanupTraits
{
1530 typedef DominatingValue
<RValue
>::saved_type ValueTy
;
1531 typedef DominatingValue
<RValue
>::saved_type RValueTy
;
1532 static RValue
get(CodeGenFunction
&CGF
, ValueTy V
) {
1533 return V
.restore(CGF
);
1536 typedef CallDeleteDuringNew
<ConditionalCleanupTraits
> ConditionalCleanup
;
1538 ConditionalCleanup
*Cleanup
= CGF
.EHStack
1539 .pushCleanupWithExtra
<ConditionalCleanup
>(EHCleanup
,
1540 E
->getNumPlacementArgs(),
1541 E
->getOperatorDelete(),
1546 for (unsigned I
= 0, N
= E
->getNumPlacementArgs(); I
!= N
; ++I
) {
1547 auto &Arg
= NewArgs
[I
+ NumNonPlacementArgs
];
1548 Cleanup
->setPlacementArg(
1549 I
, DominatingValue
<RValue
>::save(CGF
, Arg
.getRValue(CGF
)), Arg
.Ty
);
1552 CGF
.initFullExprCleanup();
1555 llvm::Value
*CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr
*E
) {
1556 // The element type being allocated.
1557 QualType allocType
= getContext().getBaseElementType(E
->getAllocatedType());
1559 // 1. Build a call to the allocation function.
1560 FunctionDecl
*allocator
= E
->getOperatorNew();
1562 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1563 unsigned minElements
= 0;
1564 if (E
->isArray() && E
->hasInitializer()) {
1565 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
->getInitializer());
1566 if (ILE
&& ILE
->isStringLiteralInit())
1568 cast
<ConstantArrayType
>(ILE
->getType()->getAsArrayTypeUnsafe())
1569 ->getSize().getZExtValue();
1571 minElements
= ILE
->getNumInits();
1574 llvm::Value
*numElements
= nullptr;
1575 llvm::Value
*allocSizeWithoutCookie
= nullptr;
1576 llvm::Value
*allocSize
=
1577 EmitCXXNewAllocSize(*this, E
, minElements
, numElements
,
1578 allocSizeWithoutCookie
);
1579 CharUnits allocAlign
= getContext().getTypeAlignInChars(allocType
);
1581 // Emit the allocation call. If the allocator is a global placement
1582 // operator, just "inline" it directly.
1583 Address allocation
= Address::invalid();
1584 CallArgList allocatorArgs
;
1585 if (allocator
->isReservedGlobalPlacementOperator()) {
1586 assert(E
->getNumPlacementArgs() == 1);
1587 const Expr
*arg
= *E
->placement_arguments().begin();
1589 LValueBaseInfo BaseInfo
;
1590 allocation
= EmitPointerWithAlignment(arg
, &BaseInfo
);
1592 // The pointer expression will, in many cases, be an opaque void*.
1593 // In these cases, discard the computed alignment and use the
1594 // formal alignment of the allocated type.
1595 if (BaseInfo
.getAlignmentSource() != AlignmentSource::Decl
)
1596 allocation
= allocation
.withAlignment(allocAlign
);
1598 // Set up allocatorArgs for the call to operator delete if it's not
1599 // the reserved global operator.
1600 if (E
->getOperatorDelete() &&
1601 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1602 allocatorArgs
.add(RValue::get(allocSize
), getContext().getSizeType());
1603 allocatorArgs
.add(RValue::get(allocation
.getPointer()), arg
->getType());
1607 const FunctionProtoType
*allocatorType
=
1608 allocator
->getType()->castAs
<FunctionProtoType
>();
1609 unsigned ParamsToSkip
= 0;
1611 // The allocation size is the first argument.
1612 QualType sizeType
= getContext().getSizeType();
1613 allocatorArgs
.add(RValue::get(allocSize
), sizeType
);
1616 if (allocSize
!= allocSizeWithoutCookie
) {
1617 CharUnits cookieAlign
= getSizeAlign(); // FIXME: Ask the ABI.
1618 allocAlign
= std::max(allocAlign
, cookieAlign
);
1621 // The allocation alignment may be passed as the second argument.
1622 if (E
->passAlignment()) {
1623 QualType AlignValT
= sizeType
;
1624 if (allocatorType
->getNumParams() > 1) {
1625 AlignValT
= allocatorType
->getParamType(1);
1626 assert(getContext().hasSameUnqualifiedType(
1627 AlignValT
->castAs
<EnumType
>()->getDecl()->getIntegerType(),
1629 "wrong type for alignment parameter");
1632 // Corner case, passing alignment to 'operator new(size_t, ...)'.
1633 assert(allocator
->isVariadic() && "can't pass alignment to allocator");
1636 RValue::get(llvm::ConstantInt::get(SizeTy
, allocAlign
.getQuantity())),
1640 // FIXME: Why do we not pass a CalleeDecl here?
1641 EmitCallArgs(allocatorArgs
, allocatorType
, E
->placement_arguments(),
1642 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip
);
1645 EmitNewDeleteCall(*this, allocator
, allocatorType
, allocatorArgs
);
1647 // Set !heapallocsite metadata on the call to operator new.
1649 if (auto *newCall
= dyn_cast
<llvm::CallBase
>(RV
.getScalarVal()))
1650 getDebugInfo()->addHeapAllocSiteMetadata(newCall
, allocType
,
1653 // If this was a call to a global replaceable allocation function that does
1654 // not take an alignment argument, the allocator is known to produce
1655 // storage that's suitably aligned for any object that fits, up to a known
1656 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1657 CharUnits allocationAlign
= allocAlign
;
1658 if (!E
->passAlignment() &&
1659 allocator
->isReplaceableGlobalAllocationFunction()) {
1660 unsigned AllocatorAlign
= llvm::bit_floor(std::min
<uint64_t>(
1661 Target
.getNewAlign(), getContext().getTypeSize(allocType
)));
1662 allocationAlign
= std::max(
1663 allocationAlign
, getContext().toCharUnitsFromBits(AllocatorAlign
));
1666 allocation
= Address(RV
.getScalarVal(), Int8Ty
, allocationAlign
);
1669 // Emit a null check on the allocation result if the allocation
1670 // function is allowed to return null (because it has a non-throwing
1671 // exception spec or is the reserved placement new) and we have an
1672 // interesting initializer will be running sanitizers on the initialization.
1673 bool nullCheck
= E
->shouldNullCheckAllocation() &&
1674 (!allocType
.isPODType(getContext()) || E
->hasInitializer() ||
1675 sanitizePerformTypeCheck());
1677 llvm::BasicBlock
*nullCheckBB
= nullptr;
1678 llvm::BasicBlock
*contBB
= nullptr;
1680 // The null-check means that the initializer is conditionally
1682 ConditionalEvaluation
conditional(*this);
1685 conditional
.begin(*this);
1687 nullCheckBB
= Builder
.GetInsertBlock();
1688 llvm::BasicBlock
*notNullBB
= createBasicBlock("new.notnull");
1689 contBB
= createBasicBlock("new.cont");
1691 llvm::Value
*isNull
=
1692 Builder
.CreateIsNull(allocation
.getPointer(), "new.isnull");
1693 Builder
.CreateCondBr(isNull
, contBB
, notNullBB
);
1694 EmitBlock(notNullBB
);
1697 // If there's an operator delete, enter a cleanup to call it if an
1698 // exception is thrown.
1699 EHScopeStack::stable_iterator operatorDeleteCleanup
;
1700 llvm::Instruction
*cleanupDominator
= nullptr;
1701 if (E
->getOperatorDelete() &&
1702 !E
->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1703 EnterNewDeleteCleanup(*this, E
, allocation
, allocSize
, allocAlign
,
1705 operatorDeleteCleanup
= EHStack
.stable_begin();
1706 cleanupDominator
= Builder
.CreateUnreachable();
1709 assert((allocSize
== allocSizeWithoutCookie
) ==
1710 CalculateCookiePadding(*this, E
).isZero());
1711 if (allocSize
!= allocSizeWithoutCookie
) {
1712 assert(E
->isArray());
1713 allocation
= CGM
.getCXXABI().InitializeArrayCookie(*this, allocation
,
1718 llvm::Type
*elementTy
= ConvertTypeForMem(allocType
);
1719 Address result
= Builder
.CreateElementBitCast(allocation
, elementTy
);
1721 // Passing pointer through launder.invariant.group to avoid propagation of
1722 // vptrs information which may be included in previous type.
1723 // To not break LTO with different optimizations levels, we do it regardless
1724 // of optimization level.
1725 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
1726 allocator
->isReservedGlobalPlacementOperator())
1727 result
= Builder
.CreateLaunderInvariantGroup(result
);
1729 // Emit sanitizer checks for pointer value now, so that in the case of an
1730 // array it was checked only once and not at each constructor call. We may
1731 // have already checked that the pointer is non-null.
1732 // FIXME: If we have an array cookie and a potentially-throwing allocator,
1733 // we'll null check the wrong pointer here.
1734 SanitizerSet SkippedChecks
;
1735 SkippedChecks
.set(SanitizerKind::Null
, nullCheck
);
1736 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall
,
1737 E
->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1738 result
.getPointer(), allocType
, result
.getAlignment(),
1739 SkippedChecks
, numElements
);
1741 EmitNewInitializer(*this, E
, allocType
, elementTy
, result
, numElements
,
1742 allocSizeWithoutCookie
);
1743 llvm::Value
*resultPtr
= result
.getPointer();
1745 // NewPtr is a pointer to the base element type. If we're
1746 // allocating an array of arrays, we'll need to cast back to the
1747 // array pointer type.
1748 llvm::Type
*resultType
= ConvertTypeForMem(E
->getType());
1749 if (resultPtr
->getType() != resultType
)
1750 resultPtr
= Builder
.CreateBitCast(resultPtr
, resultType
);
1753 // Deactivate the 'operator delete' cleanup if we finished
1755 if (operatorDeleteCleanup
.isValid()) {
1756 DeactivateCleanupBlock(operatorDeleteCleanup
, cleanupDominator
);
1757 cleanupDominator
->eraseFromParent();
1761 conditional
.end(*this);
1763 llvm::BasicBlock
*notNullBB
= Builder
.GetInsertBlock();
1766 llvm::PHINode
*PHI
= Builder
.CreatePHI(resultPtr
->getType(), 2);
1767 PHI
->addIncoming(resultPtr
, notNullBB
);
1768 PHI
->addIncoming(llvm::Constant::getNullValue(resultPtr
->getType()),
1777 void CodeGenFunction::EmitDeleteCall(const FunctionDecl
*DeleteFD
,
1778 llvm::Value
*Ptr
, QualType DeleteTy
,
1779 llvm::Value
*NumElements
,
1780 CharUnits CookieSize
) {
1781 assert((!NumElements
&& CookieSize
.isZero()) ||
1782 DeleteFD
->getOverloadedOperator() == OO_Array_Delete
);
1784 const auto *DeleteFTy
= DeleteFD
->getType()->castAs
<FunctionProtoType
>();
1785 CallArgList DeleteArgs
;
1787 auto Params
= getUsualDeleteParams(DeleteFD
);
1788 auto ParamTypeIt
= DeleteFTy
->param_type_begin();
1790 // Pass the pointer itself.
1791 QualType ArgTy
= *ParamTypeIt
++;
1792 llvm::Value
*DeletePtr
= Builder
.CreateBitCast(Ptr
, ConvertType(ArgTy
));
1793 DeleteArgs
.add(RValue::get(DeletePtr
), ArgTy
);
1795 // Pass the std::destroying_delete tag if present.
1796 llvm::AllocaInst
*DestroyingDeleteTag
= nullptr;
1797 if (Params
.DestroyingDelete
) {
1798 QualType DDTag
= *ParamTypeIt
++;
1799 llvm::Type
*Ty
= getTypes().ConvertType(DDTag
);
1800 CharUnits Align
= CGM
.getNaturalTypeAlignment(DDTag
);
1801 DestroyingDeleteTag
= CreateTempAlloca(Ty
, "destroying.delete.tag");
1802 DestroyingDeleteTag
->setAlignment(Align
.getAsAlign());
1804 RValue::getAggregate(Address(DestroyingDeleteTag
, Ty
, Align
)), DDTag
);
1807 // Pass the size if the delete function has a size_t parameter.
1809 QualType SizeType
= *ParamTypeIt
++;
1810 CharUnits DeleteTypeSize
= getContext().getTypeSizeInChars(DeleteTy
);
1811 llvm::Value
*Size
= llvm::ConstantInt::get(ConvertType(SizeType
),
1812 DeleteTypeSize
.getQuantity());
1814 // For array new, multiply by the number of elements.
1816 Size
= Builder
.CreateMul(Size
, NumElements
);
1818 // If there is a cookie, add the cookie size.
1819 if (!CookieSize
.isZero())
1820 Size
= Builder
.CreateAdd(
1821 Size
, llvm::ConstantInt::get(SizeTy
, CookieSize
.getQuantity()));
1823 DeleteArgs
.add(RValue::get(Size
), SizeType
);
1826 // Pass the alignment if the delete function has an align_val_t parameter.
1827 if (Params
.Alignment
) {
1828 QualType AlignValType
= *ParamTypeIt
++;
1829 CharUnits DeleteTypeAlign
=
1830 getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1831 DeleteTy
, true /* NeedsPreferredAlignment */));
1832 llvm::Value
*Align
= llvm::ConstantInt::get(ConvertType(AlignValType
),
1833 DeleteTypeAlign
.getQuantity());
1834 DeleteArgs
.add(RValue::get(Align
), AlignValType
);
1837 assert(ParamTypeIt
== DeleteFTy
->param_type_end() &&
1838 "unknown parameter to usual delete function");
1840 // Emit the call to delete.
1841 EmitNewDeleteCall(*this, DeleteFD
, DeleteFTy
, DeleteArgs
);
1843 // If call argument lowering didn't use the destroying_delete_t alloca,
1845 if (DestroyingDeleteTag
&& DestroyingDeleteTag
->use_empty())
1846 DestroyingDeleteTag
->eraseFromParent();
1850 /// Calls the given 'operator delete' on a single object.
1851 struct CallObjectDelete final
: EHScopeStack::Cleanup
{
1853 const FunctionDecl
*OperatorDelete
;
1854 QualType ElementType
;
1856 CallObjectDelete(llvm::Value
*Ptr
,
1857 const FunctionDecl
*OperatorDelete
,
1858 QualType ElementType
)
1859 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), ElementType(ElementType
) {}
1861 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1862 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
);
1868 CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl
*OperatorDelete
,
1869 llvm::Value
*CompletePtr
,
1870 QualType ElementType
) {
1871 EHStack
.pushCleanup
<CallObjectDelete
>(NormalAndEHCleanup
, CompletePtr
,
1872 OperatorDelete
, ElementType
);
1875 /// Emit the code for deleting a single object with a destroying operator
1876 /// delete. If the element type has a non-virtual destructor, Ptr has already
1877 /// been converted to the type of the parameter of 'operator delete'. Otherwise
1878 /// Ptr points to an object of the static type.
1879 static void EmitDestroyingObjectDelete(CodeGenFunction
&CGF
,
1880 const CXXDeleteExpr
*DE
, Address Ptr
,
1881 QualType ElementType
) {
1882 auto *Dtor
= ElementType
->getAsCXXRecordDecl()->getDestructor();
1883 if (Dtor
&& Dtor
->isVirtual())
1884 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1887 CGF
.EmitDeleteCall(DE
->getOperatorDelete(), Ptr
.getPointer(), ElementType
);
1890 /// Emit the code for deleting a single object.
1891 /// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1893 static bool EmitObjectDelete(CodeGenFunction
&CGF
,
1894 const CXXDeleteExpr
*DE
,
1896 QualType ElementType
,
1897 llvm::BasicBlock
*UnconditionalDeleteBlock
) {
1898 // C++11 [expr.delete]p3:
1899 // If the static type of the object to be deleted is different from its
1900 // dynamic type, the static type shall be a base class of the dynamic type
1901 // of the object to be deleted and the static type shall have a virtual
1902 // destructor or the behavior is undefined.
1903 CGF
.EmitTypeCheck(CodeGenFunction::TCK_MemberCall
,
1904 DE
->getExprLoc(), Ptr
.getPointer(),
1907 const FunctionDecl
*OperatorDelete
= DE
->getOperatorDelete();
1908 assert(!OperatorDelete
->isDestroyingOperatorDelete());
1910 // Find the destructor for the type, if applicable. If the
1911 // destructor is virtual, we'll just emit the vcall and return.
1912 const CXXDestructorDecl
*Dtor
= nullptr;
1913 if (const RecordType
*RT
= ElementType
->getAs
<RecordType
>()) {
1914 CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
1915 if (RD
->hasDefinition() && !RD
->hasTrivialDestructor()) {
1916 Dtor
= RD
->getDestructor();
1918 if (Dtor
->isVirtual()) {
1919 bool UseVirtualCall
= true;
1920 const Expr
*Base
= DE
->getArgument();
1921 if (auto *DevirtualizedDtor
=
1922 dyn_cast_or_null
<const CXXDestructorDecl
>(
1923 Dtor
->getDevirtualizedMethod(
1924 Base
, CGF
.CGM
.getLangOpts().AppleKext
))) {
1925 UseVirtualCall
= false;
1926 const CXXRecordDecl
*DevirtualizedClass
=
1927 DevirtualizedDtor
->getParent();
1928 if (declaresSameEntity(getCXXRecord(Base
), DevirtualizedClass
)) {
1929 // Devirtualized to the class of the base type (the type of the
1930 // whole expression).
1931 Dtor
= DevirtualizedDtor
;
1933 // Devirtualized to some other type. Would need to cast the this
1934 // pointer to that type but we don't have support for that yet, so
1935 // do a virtual call. FIXME: handle the case where it is
1936 // devirtualized to the derived type (the type of the inner
1937 // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1938 UseVirtualCall
= true;
1941 if (UseVirtualCall
) {
1942 CGF
.CGM
.getCXXABI().emitVirtualObjectDelete(CGF
, DE
, Ptr
, ElementType
,
1950 // Make sure that we call delete even if the dtor throws.
1951 // This doesn't have to a conditional cleanup because we're going
1952 // to pop it off in a second.
1953 CGF
.EHStack
.pushCleanup
<CallObjectDelete
>(NormalAndEHCleanup
,
1955 OperatorDelete
, ElementType
);
1958 CGF
.EmitCXXDestructorCall(Dtor
, Dtor_Complete
,
1959 /*ForVirtualBase=*/false,
1960 /*Delegating=*/false,
1962 else if (auto Lifetime
= ElementType
.getObjCLifetime()) {
1964 case Qualifiers::OCL_None
:
1965 case Qualifiers::OCL_ExplicitNone
:
1966 case Qualifiers::OCL_Autoreleasing
:
1969 case Qualifiers::OCL_Strong
:
1970 CGF
.EmitARCDestroyStrong(Ptr
, ARCPreciseLifetime
);
1973 case Qualifiers::OCL_Weak
:
1974 CGF
.EmitARCDestroyWeak(Ptr
);
1979 // When optimizing for size, call 'operator delete' unconditionally.
1980 if (CGF
.CGM
.getCodeGenOpts().OptimizeSize
> 1) {
1981 CGF
.EmitBlock(UnconditionalDeleteBlock
);
1982 CGF
.PopCleanupBlock();
1986 CGF
.PopCleanupBlock();
1991 /// Calls the given 'operator delete' on an array of objects.
1992 struct CallArrayDelete final
: EHScopeStack::Cleanup
{
1994 const FunctionDecl
*OperatorDelete
;
1995 llvm::Value
*NumElements
;
1996 QualType ElementType
;
1997 CharUnits CookieSize
;
1999 CallArrayDelete(llvm::Value
*Ptr
,
2000 const FunctionDecl
*OperatorDelete
,
2001 llvm::Value
*NumElements
,
2002 QualType ElementType
,
2003 CharUnits CookieSize
)
2004 : Ptr(Ptr
), OperatorDelete(OperatorDelete
), NumElements(NumElements
),
2005 ElementType(ElementType
), CookieSize(CookieSize
) {}
2007 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
2008 CGF
.EmitDeleteCall(OperatorDelete
, Ptr
, ElementType
, NumElements
,
2014 /// Emit the code for deleting an array of objects.
2015 static void EmitArrayDelete(CodeGenFunction
&CGF
,
2016 const CXXDeleteExpr
*E
,
2018 QualType elementType
) {
2019 llvm::Value
*numElements
= nullptr;
2020 llvm::Value
*allocatedPtr
= nullptr;
2021 CharUnits cookieSize
;
2022 CGF
.CGM
.getCXXABI().ReadArrayCookie(CGF
, deletedPtr
, E
, elementType
,
2023 numElements
, allocatedPtr
, cookieSize
);
2025 assert(allocatedPtr
&& "ReadArrayCookie didn't set allocated pointer");
2027 // Make sure that we call delete even if one of the dtors throws.
2028 const FunctionDecl
*operatorDelete
= E
->getOperatorDelete();
2029 CGF
.EHStack
.pushCleanup
<CallArrayDelete
>(NormalAndEHCleanup
,
2030 allocatedPtr
, operatorDelete
,
2031 numElements
, elementType
,
2034 // Destroy the elements.
2035 if (QualType::DestructionKind dtorKind
= elementType
.isDestructedType()) {
2036 assert(numElements
&& "no element count for a type with a destructor!");
2038 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
2039 CharUnits elementAlign
=
2040 deletedPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
2042 llvm::Value
*arrayBegin
= deletedPtr
.getPointer();
2043 llvm::Value
*arrayEnd
= CGF
.Builder
.CreateInBoundsGEP(
2044 deletedPtr
.getElementType(), arrayBegin
, numElements
, "delete.end");
2046 // Note that it is legal to allocate a zero-length array, and we
2047 // can never fold the check away because the length should always
2048 // come from a cookie.
2049 CGF
.emitArrayDestroy(arrayBegin
, arrayEnd
, elementType
, elementAlign
,
2050 CGF
.getDestroyer(dtorKind
),
2051 /*checkZeroLength*/ true,
2052 CGF
.needsEHCleanup(dtorKind
));
2055 // Pop the cleanup block.
2056 CGF
.PopCleanupBlock();
2059 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr
*E
) {
2060 const Expr
*Arg
= E
->getArgument();
2061 Address Ptr
= EmitPointerWithAlignment(Arg
);
2063 // Null check the pointer.
2065 // We could avoid this null check if we can determine that the object
2066 // destruction is trivial and doesn't require an array cookie; we can
2067 // unconditionally perform the operator delete call in that case. For now, we
2068 // assume that deleted pointers are null rarely enough that it's better to
2069 // keep the branch. This might be worth revisiting for a -O0 code size win.
2070 llvm::BasicBlock
*DeleteNotNull
= createBasicBlock("delete.notnull");
2071 llvm::BasicBlock
*DeleteEnd
= createBasicBlock("delete.end");
2073 llvm::Value
*IsNull
= Builder
.CreateIsNull(Ptr
.getPointer(), "isnull");
2075 Builder
.CreateCondBr(IsNull
, DeleteEnd
, DeleteNotNull
);
2076 EmitBlock(DeleteNotNull
);
2077 Ptr
.setKnownNonNull();
2079 QualType DeleteTy
= E
->getDestroyedType();
2081 // A destroying operator delete overrides the entire operation of the
2082 // delete expression.
2083 if (E
->getOperatorDelete()->isDestroyingOperatorDelete()) {
2084 EmitDestroyingObjectDelete(*this, E
, Ptr
, DeleteTy
);
2085 EmitBlock(DeleteEnd
);
2089 // We might be deleting a pointer to array. If so, GEP down to the
2090 // first non-array element.
2091 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2092 if (DeleteTy
->isConstantArrayType()) {
2093 llvm::Value
*Zero
= Builder
.getInt32(0);
2094 SmallVector
<llvm::Value
*,8> GEP
;
2096 GEP
.push_back(Zero
); // point at the outermost array
2098 // For each layer of array type we're pointing at:
2099 while (const ConstantArrayType
*Arr
2100 = getContext().getAsConstantArrayType(DeleteTy
)) {
2101 // 1. Unpeel the array type.
2102 DeleteTy
= Arr
->getElementType();
2104 // 2. GEP to the first element of the array.
2105 GEP
.push_back(Zero
);
2108 Ptr
= Address(Builder
.CreateInBoundsGEP(Ptr
.getElementType(),
2109 Ptr
.getPointer(), GEP
, "del.first"),
2110 ConvertTypeForMem(DeleteTy
), Ptr
.getAlignment(),
2111 Ptr
.isKnownNonNull());
2114 assert(ConvertTypeForMem(DeleteTy
) == Ptr
.getElementType());
2116 if (E
->isArrayForm()) {
2117 EmitArrayDelete(*this, E
, Ptr
, DeleteTy
);
2118 EmitBlock(DeleteEnd
);
2120 if (!EmitObjectDelete(*this, E
, Ptr
, DeleteTy
, DeleteEnd
))
2121 EmitBlock(DeleteEnd
);
2125 static bool isGLValueFromPointerDeref(const Expr
*E
) {
2126 E
= E
->IgnoreParens();
2128 if (const auto *CE
= dyn_cast
<CastExpr
>(E
)) {
2129 if (!CE
->getSubExpr()->isGLValue())
2131 return isGLValueFromPointerDeref(CE
->getSubExpr());
2134 if (const auto *OVE
= dyn_cast
<OpaqueValueExpr
>(E
))
2135 return isGLValueFromPointerDeref(OVE
->getSourceExpr());
2137 if (const auto *BO
= dyn_cast
<BinaryOperator
>(E
))
2138 if (BO
->getOpcode() == BO_Comma
)
2139 return isGLValueFromPointerDeref(BO
->getRHS());
2141 if (const auto *ACO
= dyn_cast
<AbstractConditionalOperator
>(E
))
2142 return isGLValueFromPointerDeref(ACO
->getTrueExpr()) ||
2143 isGLValueFromPointerDeref(ACO
->getFalseExpr());
2145 // C++11 [expr.sub]p1:
2146 // The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2147 if (isa
<ArraySubscriptExpr
>(E
))
2150 if (const auto *UO
= dyn_cast
<UnaryOperator
>(E
))
2151 if (UO
->getOpcode() == UO_Deref
)
2157 static llvm::Value
*EmitTypeidFromVTable(CodeGenFunction
&CGF
, const Expr
*E
,
2158 llvm::Type
*StdTypeInfoPtrTy
) {
2159 // Get the vtable pointer.
2160 Address ThisPtr
= CGF
.EmitLValue(E
).getAddress(CGF
);
2162 QualType SrcRecordTy
= E
->getType();
2164 // C++ [class.cdtor]p4:
2165 // If the operand of typeid refers to the object under construction or
2166 // destruction and the static type of the operand is neither the constructor
2167 // or destructor’s class nor one of its bases, the behavior is undefined.
2168 CGF
.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation
, E
->getExprLoc(),
2169 ThisPtr
.getPointer(), SrcRecordTy
);
2171 // C++ [expr.typeid]p2:
2172 // If the glvalue expression is obtained by applying the unary * operator to
2173 // a pointer and the pointer is a null pointer value, the typeid expression
2174 // throws the std::bad_typeid exception.
2176 // However, this paragraph's intent is not clear. We choose a very generous
2177 // interpretation which implores us to consider comma operators, conditional
2178 // operators, parentheses and other such constructs.
2179 if (CGF
.CGM
.getCXXABI().shouldTypeidBeNullChecked(
2180 isGLValueFromPointerDeref(E
), SrcRecordTy
)) {
2181 llvm::BasicBlock
*BadTypeidBlock
=
2182 CGF
.createBasicBlock("typeid.bad_typeid");
2183 llvm::BasicBlock
*EndBlock
= CGF
.createBasicBlock("typeid.end");
2185 llvm::Value
*IsNull
= CGF
.Builder
.CreateIsNull(ThisPtr
.getPointer());
2186 CGF
.Builder
.CreateCondBr(IsNull
, BadTypeidBlock
, EndBlock
);
2188 CGF
.EmitBlock(BadTypeidBlock
);
2189 CGF
.CGM
.getCXXABI().EmitBadTypeidCall(CGF
);
2190 CGF
.EmitBlock(EndBlock
);
2193 return CGF
.CGM
.getCXXABI().EmitTypeid(CGF
, SrcRecordTy
, ThisPtr
,
2197 llvm::Value
*CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr
*E
) {
2198 llvm::Type
*StdTypeInfoPtrTy
=
2199 ConvertType(E
->getType())->getPointerTo();
2201 if (E
->isTypeOperand()) {
2202 llvm::Constant
*TypeInfo
=
2203 CGM
.GetAddrOfRTTIDescriptor(E
->getTypeOperand(getContext()));
2204 return Builder
.CreateBitCast(TypeInfo
, StdTypeInfoPtrTy
);
2207 // C++ [expr.typeid]p2:
2208 // When typeid is applied to a glvalue expression whose type is a
2209 // polymorphic class type, the result refers to a std::type_info object
2210 // representing the type of the most derived object (that is, the dynamic
2211 // type) to which the glvalue refers.
2212 // If the operand is already most derived object, no need to look up vtable.
2213 if (E
->isPotentiallyEvaluated() && !E
->isMostDerived(getContext()))
2214 return EmitTypeidFromVTable(*this, E
->getExprOperand(),
2217 QualType OperandTy
= E
->getExprOperand()->getType();
2218 return Builder
.CreateBitCast(CGM
.GetAddrOfRTTIDescriptor(OperandTy
),
2222 static llvm::Value
*EmitDynamicCastToNull(CodeGenFunction
&CGF
,
2224 llvm::Type
*DestLTy
= CGF
.ConvertType(DestTy
);
2225 if (DestTy
->isPointerType())
2226 return llvm::Constant::getNullValue(DestLTy
);
2228 /// C++ [expr.dynamic.cast]p9:
2229 /// A failed cast to reference type throws std::bad_cast
2230 if (!CGF
.CGM
.getCXXABI().EmitBadCastCall(CGF
))
2233 CGF
.EmitBlock(CGF
.createBasicBlock("dynamic_cast.end"));
2234 return llvm::UndefValue::get(DestLTy
);
2237 llvm::Value
*CodeGenFunction::EmitDynamicCast(Address ThisAddr
,
2238 const CXXDynamicCastExpr
*DCE
) {
2239 CGM
.EmitExplicitCastExprType(DCE
, this);
2240 QualType DestTy
= DCE
->getTypeAsWritten();
2242 QualType SrcTy
= DCE
->getSubExpr()->getType();
2244 // C++ [expr.dynamic.cast]p7:
2245 // If T is "pointer to cv void," then the result is a pointer to the most
2246 // derived object pointed to by v.
2247 const PointerType
*DestPTy
= DestTy
->getAs
<PointerType
>();
2249 bool isDynamicCastToVoid
;
2250 QualType SrcRecordTy
;
2251 QualType DestRecordTy
;
2253 isDynamicCastToVoid
= DestPTy
->getPointeeType()->isVoidType();
2254 SrcRecordTy
= SrcTy
->castAs
<PointerType
>()->getPointeeType();
2255 DestRecordTy
= DestPTy
->getPointeeType();
2257 isDynamicCastToVoid
= false;
2258 SrcRecordTy
= SrcTy
;
2259 DestRecordTy
= DestTy
->castAs
<ReferenceType
>()->getPointeeType();
2262 // C++ [class.cdtor]p5:
2263 // If the operand of the dynamic_cast refers to the object under
2264 // construction or destruction and the static type of the operand is not a
2265 // pointer to or object of the constructor or destructor’s own class or one
2266 // of its bases, the dynamic_cast results in undefined behavior.
2267 EmitTypeCheck(TCK_DynamicOperation
, DCE
->getExprLoc(), ThisAddr
.getPointer(),
2270 if (DCE
->isAlwaysNull())
2271 if (llvm::Value
*T
= EmitDynamicCastToNull(*this, DestTy
))
2274 assert(SrcRecordTy
->isRecordType() && "source type must be a record type!");
2276 // C++ [expr.dynamic.cast]p4:
2277 // If the value of v is a null pointer value in the pointer case, the result
2278 // is the null pointer value of type T.
2279 bool ShouldNullCheckSrcValue
=
2280 CGM
.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy
->isPointerType(),
2283 llvm::BasicBlock
*CastNull
= nullptr;
2284 llvm::BasicBlock
*CastNotNull
= nullptr;
2285 llvm::BasicBlock
*CastEnd
= createBasicBlock("dynamic_cast.end");
2287 if (ShouldNullCheckSrcValue
) {
2288 CastNull
= createBasicBlock("dynamic_cast.null");
2289 CastNotNull
= createBasicBlock("dynamic_cast.notnull");
2291 llvm::Value
*IsNull
= Builder
.CreateIsNull(ThisAddr
.getPointer());
2292 Builder
.CreateCondBr(IsNull
, CastNull
, CastNotNull
);
2293 EmitBlock(CastNotNull
);
2297 if (isDynamicCastToVoid
) {
2298 Value
= CGM
.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr
, SrcRecordTy
,
2301 assert(DestRecordTy
->isRecordType() &&
2302 "destination type must be a record type!");
2303 Value
= CGM
.getCXXABI().EmitDynamicCastCall(*this, ThisAddr
, SrcRecordTy
,
2304 DestTy
, DestRecordTy
, CastEnd
);
2305 CastNotNull
= Builder
.GetInsertBlock();
2308 if (ShouldNullCheckSrcValue
) {
2309 EmitBranch(CastEnd
);
2311 EmitBlock(CastNull
);
2312 EmitBranch(CastEnd
);
2317 if (ShouldNullCheckSrcValue
) {
2318 llvm::PHINode
*PHI
= Builder
.CreatePHI(Value
->getType(), 2);
2319 PHI
->addIncoming(Value
, CastNotNull
);
2320 PHI
->addIncoming(llvm::Constant::getNullValue(Value
->getType()), CastNull
);