1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/MatrixBuilder.h"
39 #include "llvm/Support/ConvertUTF.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Path.h"
42 #include "llvm/Support/SaveAndRestore.h"
43 #include "llvm/Transforms/Utils/SanitizerStats.h"
47 using namespace clang
;
48 using namespace CodeGen
;
50 //===--------------------------------------------------------------------===//
51 // Miscellaneous Helper Methods
52 //===--------------------------------------------------------------------===//
54 llvm::Value
*CodeGenFunction::EmitCastToVoidPtr(llvm::Value
*value
) {
55 unsigned addressSpace
=
56 cast
<llvm::PointerType
>(value
->getType())->getAddressSpace();
58 llvm::PointerType
*destType
= Int8PtrTy
;
60 destType
= llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace
);
62 if (value
->getType() == destType
) return value
;
63 return Builder
.CreateBitCast(value
, destType
);
66 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
68 Address
CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type
*Ty
,
71 llvm::Value
*ArraySize
) {
72 auto Alloca
= CreateTempAlloca(Ty
, Name
, ArraySize
);
73 Alloca
->setAlignment(Align
.getAsAlign());
74 return Address(Alloca
, Ty
, Align
);
77 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
78 /// block. The alloca is casted to default address space if necessary.
79 Address
CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
, CharUnits Align
,
81 llvm::Value
*ArraySize
,
82 Address
*AllocaAddr
) {
83 auto Alloca
= CreateTempAllocaWithoutCast(Ty
, Align
, Name
, ArraySize
);
86 llvm::Value
*V
= Alloca
.getPointer();
87 // Alloca always returns a pointer in alloca address space, which may
88 // be different from the type defined by the language. For example,
89 // in C++ the auto variables are in the default address space. Therefore
90 // cast alloca to the default address space when necessary.
91 if (getASTAllocaAddressSpace() != LangAS::Default
) {
92 auto DestAddrSpace
= getContext().getTargetAddressSpace(LangAS::Default
);
93 llvm::IRBuilderBase::InsertPointGuard
IPG(Builder
);
94 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
95 // otherwise alloca is inserted at the current insertion point of the
98 Builder
.SetInsertPoint(getPostAllocaInsertPoint());
99 V
= getTargetHooks().performAddrSpaceCast(
100 *this, V
, getASTAllocaAddressSpace(), LangAS::Default
,
101 Ty
->getPointerTo(DestAddrSpace
), /*non-null*/ true);
104 return Address(V
, Ty
, Align
);
107 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
108 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
109 /// insertion point of the builder.
110 llvm::AllocaInst
*CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
,
112 llvm::Value
*ArraySize
) {
114 return Builder
.CreateAlloca(Ty
, ArraySize
, Name
);
115 return new llvm::AllocaInst(Ty
, CGM
.getDataLayout().getAllocaAddrSpace(),
116 ArraySize
, Name
, AllocaInsertPt
);
119 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
120 /// default alignment of the corresponding LLVM type, which is *not*
121 /// guaranteed to be related in any way to the expected alignment of
122 /// an AST type that might have been lowered to Ty.
123 Address
CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
126 CharUnits::fromQuantity(CGM
.getDataLayout().getPrefTypeAlignment(Ty
));
127 return CreateTempAlloca(Ty
, Align
, Name
);
130 Address
CodeGenFunction::CreateIRTemp(QualType Ty
, const Twine
&Name
) {
131 CharUnits Align
= getContext().getTypeAlignInChars(Ty
);
132 return CreateTempAlloca(ConvertType(Ty
), Align
, Name
);
135 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, const Twine
&Name
,
137 // FIXME: Should we prefer the preferred type alignment here?
138 return CreateMemTemp(Ty
, getContext().getTypeAlignInChars(Ty
), Name
, Alloca
);
141 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, CharUnits Align
,
142 const Twine
&Name
, Address
*Alloca
) {
143 Address Result
= CreateTempAlloca(ConvertTypeForMem(Ty
), Align
, Name
,
144 /*ArraySize=*/nullptr, Alloca
);
146 if (Ty
->isConstantMatrixType()) {
147 auto *ArrayTy
= cast
<llvm::ArrayType
>(Result
.getElementType());
148 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
149 ArrayTy
->getNumElements());
152 Builder
.CreateBitCast(Result
.getPointer(), VectorTy
->getPointerTo()),
153 VectorTy
, Result
.getAlignment());
158 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
, CharUnits Align
,
160 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty
), Align
, Name
);
163 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
,
165 return CreateMemTempWithoutCast(Ty
, getContext().getTypeAlignInChars(Ty
),
169 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
170 /// expression and compare the result against zero, returning an Int1Ty value.
171 llvm::Value
*CodeGenFunction::EvaluateExprAsBool(const Expr
*E
) {
172 PGO
.setCurrentStmt(E
);
173 if (const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>()) {
174 llvm::Value
*MemPtr
= EmitScalarExpr(E
);
175 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr
, MPT
);
178 QualType BoolTy
= getContext().BoolTy
;
179 SourceLocation Loc
= E
->getExprLoc();
180 CGFPOptionsRAII
FPOptsRAII(*this, E
);
181 if (!E
->getType()->isAnyComplexType())
182 return EmitScalarConversion(EmitScalarExpr(E
), E
->getType(), BoolTy
, Loc
);
184 return EmitComplexToScalarConversion(EmitComplexExpr(E
), E
->getType(), BoolTy
,
188 /// EmitIgnoredExpr - Emit code to compute the specified expression,
189 /// ignoring the result.
190 void CodeGenFunction::EmitIgnoredExpr(const Expr
*E
) {
192 return (void)EmitAnyExpr(E
, AggValueSlot::ignored(), true);
194 // if this is a bitfield-resulting conditional operator, we can special case
195 // emit this. The normal 'EmitLValue' version of this is particularly
196 // difficult to codegen for, since creating a single "LValue" for two
197 // different sized arguments here is not particularly doable.
198 if (const auto *CondOp
= dyn_cast
<AbstractConditionalOperator
>(
199 E
->IgnoreParenNoopCasts(getContext()))) {
200 if (CondOp
->getObjectKind() == OK_BitField
)
201 return EmitIgnoredConditionalOperator(CondOp
);
204 // Just emit it as an l-value and drop the result.
208 /// EmitAnyExpr - Emit code to compute the specified expression which
209 /// can have any type. The result is returned as an RValue struct.
210 /// If this is an aggregate expression, AggSlot indicates where the
211 /// result should be returned.
212 RValue
CodeGenFunction::EmitAnyExpr(const Expr
*E
,
213 AggValueSlot aggSlot
,
215 switch (getEvaluationKind(E
->getType())) {
217 return RValue::get(EmitScalarExpr(E
, ignoreResult
));
219 return RValue::getComplex(EmitComplexExpr(E
, ignoreResult
, ignoreResult
));
221 if (!ignoreResult
&& aggSlot
.isIgnored())
222 aggSlot
= CreateAggTemp(E
->getType(), "agg-temp");
223 EmitAggExpr(E
, aggSlot
);
224 return aggSlot
.asRValue();
226 llvm_unreachable("bad evaluation kind");
229 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
230 /// always be accessible even if no aggregate location is provided.
231 RValue
CodeGenFunction::EmitAnyExprToTemp(const Expr
*E
) {
232 AggValueSlot AggSlot
= AggValueSlot::ignored();
234 if (hasAggregateEvaluationKind(E
->getType()))
235 AggSlot
= CreateAggTemp(E
->getType(), "agg.tmp");
236 return EmitAnyExpr(E
, AggSlot
);
239 /// EmitAnyExprToMem - Evaluate an expression into a given memory
241 void CodeGenFunction::EmitAnyExprToMem(const Expr
*E
,
245 // FIXME: This function should take an LValue as an argument.
246 switch (getEvaluationKind(E
->getType())) {
248 EmitComplexExprIntoLValue(E
, MakeAddrLValue(Location
, E
->getType()),
252 case TEK_Aggregate
: {
253 EmitAggExpr(E
, AggValueSlot::forAddr(Location
, Quals
,
254 AggValueSlot::IsDestructed_t(IsInit
),
255 AggValueSlot::DoesNotNeedGCBarriers
,
256 AggValueSlot::IsAliased_t(!IsInit
),
257 AggValueSlot::MayOverlap
));
262 RValue RV
= RValue::get(EmitScalarExpr(E
, /*Ignore*/ false));
263 LValue LV
= MakeAddrLValue(Location
, E
->getType());
264 EmitStoreThroughLValue(RV
, LV
);
268 llvm_unreachable("bad evaluation kind");
272 pushTemporaryCleanup(CodeGenFunction
&CGF
, const MaterializeTemporaryExpr
*M
,
273 const Expr
*E
, Address ReferenceTemporary
) {
274 // Objective-C++ ARC:
275 // If we are binding a reference to a temporary that has ownership, we
276 // need to perform retain/release operations on the temporary.
278 // FIXME: This should be looking at E, not M.
279 if (auto Lifetime
= M
->getType().getObjCLifetime()) {
281 case Qualifiers::OCL_None
:
282 case Qualifiers::OCL_ExplicitNone
:
283 // Carry on to normal cleanup handling.
286 case Qualifiers::OCL_Autoreleasing
:
287 // Nothing to do; cleaned up by an autorelease pool.
290 case Qualifiers::OCL_Strong
:
291 case Qualifiers::OCL_Weak
:
292 switch (StorageDuration Duration
= M
->getStorageDuration()) {
294 // Note: we intentionally do not register a cleanup to release
295 // the object on program termination.
299 // FIXME: We should probably register a cleanup in this case.
303 case SD_FullExpression
:
304 CodeGenFunction::Destroyer
*Destroy
;
305 CleanupKind CleanupKind
;
306 if (Lifetime
== Qualifiers::OCL_Strong
) {
307 const ValueDecl
*VD
= M
->getExtendingDecl();
309 VD
&& isa
<VarDecl
>(VD
) && VD
->hasAttr
<ObjCPreciseLifetimeAttr
>();
310 CleanupKind
= CGF
.getARCCleanupKind();
311 Destroy
= Precise
? &CodeGenFunction::destroyARCStrongPrecise
312 : &CodeGenFunction::destroyARCStrongImprecise
;
314 // __weak objects always get EH cleanups; otherwise, exceptions
315 // could cause really nasty crashes instead of mere leaks.
316 CleanupKind
= NormalAndEHCleanup
;
317 Destroy
= &CodeGenFunction::destroyARCWeak
;
319 if (Duration
== SD_FullExpression
)
320 CGF
.pushDestroy(CleanupKind
, ReferenceTemporary
,
321 M
->getType(), *Destroy
,
322 CleanupKind
& EHCleanup
);
324 CGF
.pushLifetimeExtendedDestroy(CleanupKind
, ReferenceTemporary
,
326 *Destroy
, CleanupKind
& EHCleanup
);
330 llvm_unreachable("temporary cannot have dynamic storage duration");
332 llvm_unreachable("unknown storage duration");
336 CXXDestructorDecl
*ReferenceTemporaryDtor
= nullptr;
337 if (const RecordType
*RT
=
338 E
->getType()->getBaseElementTypeUnsafe()->getAs
<RecordType
>()) {
339 // Get the destructor for the reference temporary.
340 auto *ClassDecl
= cast
<CXXRecordDecl
>(RT
->getDecl());
341 if (!ClassDecl
->hasTrivialDestructor())
342 ReferenceTemporaryDtor
= ClassDecl
->getDestructor();
345 if (!ReferenceTemporaryDtor
)
348 // Call the destructor for the temporary.
349 switch (M
->getStorageDuration()) {
352 llvm::FunctionCallee CleanupFn
;
353 llvm::Constant
*CleanupArg
;
354 if (E
->getType()->isArrayType()) {
355 CleanupFn
= CodeGenFunction(CGF
.CGM
).generateDestroyHelper(
356 ReferenceTemporary
, E
->getType(),
357 CodeGenFunction::destroyCXXObject
, CGF
.getLangOpts().Exceptions
,
358 dyn_cast_or_null
<VarDecl
>(M
->getExtendingDecl()));
359 CleanupArg
= llvm::Constant::getNullValue(CGF
.Int8PtrTy
);
361 CleanupFn
= CGF
.CGM
.getAddrAndTypeOfCXXStructor(
362 GlobalDecl(ReferenceTemporaryDtor
, Dtor_Complete
));
363 CleanupArg
= cast
<llvm::Constant
>(ReferenceTemporary
.getPointer());
365 CGF
.CGM
.getCXXABI().registerGlobalDtor(
366 CGF
, *cast
<VarDecl
>(M
->getExtendingDecl()), CleanupFn
, CleanupArg
);
370 case SD_FullExpression
:
371 CGF
.pushDestroy(NormalAndEHCleanup
, ReferenceTemporary
, E
->getType(),
372 CodeGenFunction::destroyCXXObject
,
373 CGF
.getLangOpts().Exceptions
);
377 CGF
.pushLifetimeExtendedDestroy(NormalAndEHCleanup
,
378 ReferenceTemporary
, E
->getType(),
379 CodeGenFunction::destroyCXXObject
,
380 CGF
.getLangOpts().Exceptions
);
384 llvm_unreachable("temporary cannot have dynamic storage duration");
388 static Address
createReferenceTemporary(CodeGenFunction
&CGF
,
389 const MaterializeTemporaryExpr
*M
,
391 Address
*Alloca
= nullptr) {
392 auto &TCG
= CGF
.getTargetHooks();
393 switch (M
->getStorageDuration()) {
394 case SD_FullExpression
:
396 // If we have a constant temporary array or record try to promote it into a
397 // constant global under the same rules a normal constant would've been
398 // promoted. This is easier on the optimizer and generally emits fewer
400 QualType Ty
= Inner
->getType();
401 if (CGF
.CGM
.getCodeGenOpts().MergeAllConstants
&&
402 (Ty
->isArrayType() || Ty
->isRecordType()) &&
403 CGF
.CGM
.isTypeConstant(Ty
, true))
404 if (auto Init
= ConstantEmitter(CGF
).tryEmitAbstract(Inner
, Ty
)) {
405 auto AS
= CGF
.CGM
.GetGlobalConstantAddressSpace();
406 auto *GV
= new llvm::GlobalVariable(
407 CGF
.CGM
.getModule(), Init
->getType(), /*isConstant=*/true,
408 llvm::GlobalValue::PrivateLinkage
, Init
, ".ref.tmp", nullptr,
409 llvm::GlobalValue::NotThreadLocal
,
410 CGF
.getContext().getTargetAddressSpace(AS
));
411 CharUnits alignment
= CGF
.getContext().getTypeAlignInChars(Ty
);
412 GV
->setAlignment(alignment
.getAsAlign());
413 llvm::Constant
*C
= GV
;
414 if (AS
!= LangAS::Default
)
415 C
= TCG
.performAddrSpaceCast(
416 CGF
.CGM
, GV
, AS
, LangAS::Default
,
417 GV
->getValueType()->getPointerTo(
418 CGF
.getContext().getTargetAddressSpace(LangAS::Default
)));
419 // FIXME: Should we put the new global into a COMDAT?
420 return Address(C
, GV
->getValueType(), alignment
);
422 return CGF
.CreateMemTemp(Ty
, "ref.tmp", Alloca
);
426 return CGF
.CGM
.GetAddrOfGlobalTemporary(M
, Inner
);
429 llvm_unreachable("temporary can't have dynamic storage duration");
431 llvm_unreachable("unknown storage duration");
434 /// Helper method to check if the underlying ABI is AAPCS
435 static bool isAAPCS(const TargetInfo
&TargetInfo
) {
436 return TargetInfo
.getABI().startswith("aapcs");
439 LValue
CodeGenFunction::
440 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*M
) {
441 const Expr
*E
= M
->getSubExpr();
443 assert((!M
->getExtendingDecl() || !isa
<VarDecl
>(M
->getExtendingDecl()) ||
444 !cast
<VarDecl
>(M
->getExtendingDecl())->isARCPseudoStrong()) &&
445 "Reference should never be pseudo-strong!");
447 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
448 // as that will cause the lifetime adjustment to be lost for ARC
449 auto ownership
= M
->getType().getObjCLifetime();
450 if (ownership
!= Qualifiers::OCL_None
&&
451 ownership
!= Qualifiers::OCL_ExplicitNone
) {
452 Address Object
= createReferenceTemporary(*this, M
, E
);
453 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(Object
.getPointer())) {
454 llvm::Type
*Ty
= ConvertTypeForMem(E
->getType());
455 Object
= Address(llvm::ConstantExpr::getBitCast(
456 Var
, Ty
->getPointerTo(Object
.getAddressSpace())),
457 Ty
, Object
.getAlignment());
459 // createReferenceTemporary will promote the temporary to a global with a
460 // constant initializer if it can. It can only do this to a value of
461 // ARC-manageable type if the value is global and therefore "immune" to
462 // ref-counting operations. Therefore we have no need to emit either a
463 // dynamic initialization or a cleanup and we can just return the address
465 if (Var
->hasInitializer())
466 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
468 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
470 LValue RefTempDst
= MakeAddrLValue(Object
, M
->getType(),
471 AlignmentSource::Decl
);
473 switch (getEvaluationKind(E
->getType())) {
474 default: llvm_unreachable("expected scalar or aggregate expression");
476 EmitScalarInit(E
, M
->getExtendingDecl(), RefTempDst
, false);
478 case TEK_Aggregate
: {
479 EmitAggExpr(E
, AggValueSlot::forAddr(Object
,
480 E
->getType().getQualifiers(),
481 AggValueSlot::IsDestructed
,
482 AggValueSlot::DoesNotNeedGCBarriers
,
483 AggValueSlot::IsNotAliased
,
484 AggValueSlot::DoesNotOverlap
));
489 pushTemporaryCleanup(*this, M
, E
, Object
);
493 SmallVector
<const Expr
*, 2> CommaLHSs
;
494 SmallVector
<SubobjectAdjustment
, 2> Adjustments
;
495 E
= E
->skipRValueSubobjectAdjustments(CommaLHSs
, Adjustments
);
497 for (const auto &Ignored
: CommaLHSs
)
498 EmitIgnoredExpr(Ignored
);
500 if (const auto *opaque
= dyn_cast
<OpaqueValueExpr
>(E
)) {
501 if (opaque
->getType()->isRecordType()) {
502 assert(Adjustments
.empty());
503 return EmitOpaqueValueLValue(opaque
);
507 // Create and initialize the reference temporary.
508 Address Alloca
= Address::invalid();
509 Address Object
= createReferenceTemporary(*this, M
, E
, &Alloca
);
510 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(
511 Object
.getPointer()->stripPointerCasts())) {
512 llvm::Type
*TemporaryType
= ConvertTypeForMem(E
->getType());
513 Object
= Address(llvm::ConstantExpr::getBitCast(
514 cast
<llvm::Constant
>(Object
.getPointer()),
515 TemporaryType
->getPointerTo()),
517 Object
.getAlignment());
518 // If the temporary is a global and has a constant initializer or is a
519 // constant temporary that we promoted to a global, we may have already
521 if (!Var
->hasInitializer()) {
522 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
523 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
526 switch (M
->getStorageDuration()) {
528 if (auto *Size
= EmitLifetimeStart(
529 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
530 Alloca
.getPointer())) {
531 pushCleanupAfterFullExpr
<CallLifetimeEnd
>(NormalEHLifetimeMarker
,
536 case SD_FullExpression
: {
537 if (!ShouldEmitLifetimeMarkers
)
540 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
541 // marker. Instead, start the lifetime of a conditional temporary earlier
542 // so that it's unconditional. Don't do this with sanitizers which need
543 // more precise lifetime marks.
544 ConditionalEvaluation
*OldConditional
= nullptr;
545 CGBuilderTy::InsertPoint OldIP
;
546 if (isInConditionalBranch() && !E
->getType().isDestructedType() &&
547 !SanOpts
.has(SanitizerKind::HWAddress
) &&
548 !SanOpts
.has(SanitizerKind::Memory
) &&
549 !CGM
.getCodeGenOpts().SanitizeAddressUseAfterScope
) {
550 OldConditional
= OutermostConditional
;
551 OutermostConditional
= nullptr;
553 OldIP
= Builder
.saveIP();
554 llvm::BasicBlock
*Block
= OldConditional
->getStartingBlock();
555 Builder
.restoreIP(CGBuilderTy::InsertPoint(
556 Block
, llvm::BasicBlock::iterator(Block
->back())));
559 if (auto *Size
= EmitLifetimeStart(
560 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
561 Alloca
.getPointer())) {
562 pushFullExprCleanup
<CallLifetimeEnd
>(NormalEHLifetimeMarker
, Alloca
,
566 if (OldConditional
) {
567 OutermostConditional
= OldConditional
;
568 Builder
.restoreIP(OldIP
);
576 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
578 pushTemporaryCleanup(*this, M
, E
, Object
);
580 // Perform derived-to-base casts and/or field accesses, to get from the
581 // temporary object we created (and, potentially, for which we extended
582 // the lifetime) to the subobject we're binding the reference to.
583 for (SubobjectAdjustment
&Adjustment
: llvm::reverse(Adjustments
)) {
584 switch (Adjustment
.Kind
) {
585 case SubobjectAdjustment::DerivedToBaseAdjustment
:
587 GetAddressOfBaseClass(Object
, Adjustment
.DerivedToBase
.DerivedClass
,
588 Adjustment
.DerivedToBase
.BasePath
->path_begin(),
589 Adjustment
.DerivedToBase
.BasePath
->path_end(),
590 /*NullCheckValue=*/ false, E
->getExprLoc());
593 case SubobjectAdjustment::FieldAdjustment
: {
594 LValue LV
= MakeAddrLValue(Object
, E
->getType(), AlignmentSource::Decl
);
595 LV
= EmitLValueForField(LV
, Adjustment
.Field
);
596 assert(LV
.isSimple() &&
597 "materialized temporary field is not a simple lvalue");
598 Object
= LV
.getAddress(*this);
602 case SubobjectAdjustment::MemberPointerAdjustment
: {
603 llvm::Value
*Ptr
= EmitScalarExpr(Adjustment
.Ptr
.RHS
);
604 Object
= EmitCXXMemberDataPointerAddress(E
, Object
, Ptr
,
611 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
615 CodeGenFunction::EmitReferenceBindingToExpr(const Expr
*E
) {
616 // Emit the expression as an lvalue.
617 LValue LV
= EmitLValue(E
);
618 assert(LV
.isSimple());
619 llvm::Value
*Value
= LV
.getPointer(*this);
621 if (sanitizePerformTypeCheck() && !E
->getType()->isFunctionType()) {
622 // C++11 [dcl.ref]p5 (as amended by core issue 453):
623 // If a glvalue to which a reference is directly bound designates neither
624 // an existing object or function of an appropriate type nor a region of
625 // storage of suitable size and alignment to contain an object of the
626 // reference's type, the behavior is undefined.
627 QualType Ty
= E
->getType();
628 EmitTypeCheck(TCK_ReferenceBinding
, E
->getExprLoc(), Value
, Ty
);
631 return RValue::get(Value
);
635 /// getAccessedFieldNo - Given an encoded value and a result number, return the
636 /// input field number being accessed.
637 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx
,
638 const llvm::Constant
*Elts
) {
639 return cast
<llvm::ConstantInt
>(Elts
->getAggregateElement(Idx
))
643 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
644 static llvm::Value
*emitHash16Bytes(CGBuilderTy
&Builder
, llvm::Value
*Low
,
646 llvm::Value
*KMul
= Builder
.getInt64(0x9ddfea08eb382d69ULL
);
647 llvm::Value
*K47
= Builder
.getInt64(47);
648 llvm::Value
*A0
= Builder
.CreateMul(Builder
.CreateXor(Low
, High
), KMul
);
649 llvm::Value
*A1
= Builder
.CreateXor(Builder
.CreateLShr(A0
, K47
), A0
);
650 llvm::Value
*B0
= Builder
.CreateMul(Builder
.CreateXor(High
, A1
), KMul
);
651 llvm::Value
*B1
= Builder
.CreateXor(Builder
.CreateLShr(B0
, K47
), B0
);
652 return Builder
.CreateMul(B1
, KMul
);
655 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK
) {
656 return TCK
== TCK_DowncastPointer
|| TCK
== TCK_Upcast
||
657 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
;
660 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
) {
661 CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
662 return (RD
&& RD
->hasDefinition() && RD
->isDynamicClass()) &&
663 (TCK
== TCK_MemberAccess
|| TCK
== TCK_MemberCall
||
664 TCK
== TCK_DowncastPointer
|| TCK
== TCK_DowncastReference
||
665 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
);
668 bool CodeGenFunction::sanitizePerformTypeCheck() const {
669 return SanOpts
.has(SanitizerKind::Null
) ||
670 SanOpts
.has(SanitizerKind::Alignment
) ||
671 SanOpts
.has(SanitizerKind::ObjectSize
) ||
672 SanOpts
.has(SanitizerKind::Vptr
);
675 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
,
676 llvm::Value
*Ptr
, QualType Ty
,
678 SanitizerSet SkippedChecks
,
679 llvm::Value
*ArraySize
) {
680 if (!sanitizePerformTypeCheck())
683 // Don't check pointers outside the default address space. The null check
684 // isn't correct, the object-size check isn't supported by LLVM, and we can't
685 // communicate the addresses to the runtime handler for the vptr check.
686 if (Ptr
->getType()->getPointerAddressSpace())
689 // Don't check pointers to volatile data. The behavior here is implementation-
691 if (Ty
.isVolatileQualified())
694 SanitizerScope
SanScope(this);
696 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 3> Checks
;
697 llvm::BasicBlock
*Done
= nullptr;
699 // Quickly determine whether we have a pointer to an alloca. It's possible
700 // to skip null checks, and some alignment checks, for these pointers. This
701 // can reduce compile-time significantly.
702 auto PtrToAlloca
= dyn_cast
<llvm::AllocaInst
>(Ptr
->stripPointerCasts());
704 llvm::Value
*True
= llvm::ConstantInt::getTrue(getLLVMContext());
705 llvm::Value
*IsNonNull
= nullptr;
706 bool IsGuaranteedNonNull
=
707 SkippedChecks
.has(SanitizerKind::Null
) || PtrToAlloca
;
708 bool AllowNullPointers
= isNullPointerAllowed(TCK
);
709 if ((SanOpts
.has(SanitizerKind::Null
) || AllowNullPointers
) &&
710 !IsGuaranteedNonNull
) {
711 // The glvalue must not be an empty glvalue.
712 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
714 // The IR builder can constant-fold the null check if the pointer points to
716 IsGuaranteedNonNull
= IsNonNull
== True
;
718 // Skip the null check if the pointer is known to be non-null.
719 if (!IsGuaranteedNonNull
) {
720 if (AllowNullPointers
) {
721 // When performing pointer casts, it's OK if the value is null.
722 // Skip the remaining checks in that case.
723 Done
= createBasicBlock("null");
724 llvm::BasicBlock
*Rest
= createBasicBlock("not.null");
725 Builder
.CreateCondBr(IsNonNull
, Rest
, Done
);
728 Checks
.push_back(std::make_pair(IsNonNull
, SanitizerKind::Null
));
733 if (SanOpts
.has(SanitizerKind::ObjectSize
) &&
734 !SkippedChecks
.has(SanitizerKind::ObjectSize
) &&
735 !Ty
->isIncompleteType()) {
736 uint64_t TySize
= CGM
.getMinimumObjectSize(Ty
).getQuantity();
737 llvm::Value
*Size
= llvm::ConstantInt::get(IntPtrTy
, TySize
);
739 Size
= Builder
.CreateMul(Size
, ArraySize
);
741 // Degenerate case: new X[0] does not need an objectsize check.
742 llvm::Constant
*ConstantSize
= dyn_cast
<llvm::Constant
>(Size
);
743 if (!ConstantSize
|| !ConstantSize
->isNullValue()) {
744 // The glvalue must refer to a large enough storage region.
745 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
747 // FIXME: Get object address space
748 llvm::Type
*Tys
[2] = { IntPtrTy
, Int8PtrTy
};
749 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::objectsize
, Tys
);
750 llvm::Value
*Min
= Builder
.getFalse();
751 llvm::Value
*NullIsUnknown
= Builder
.getFalse();
752 llvm::Value
*Dynamic
= Builder
.getFalse();
753 llvm::Value
*CastAddr
= Builder
.CreateBitCast(Ptr
, Int8PtrTy
);
754 llvm::Value
*LargeEnough
= Builder
.CreateICmpUGE(
755 Builder
.CreateCall(F
, {CastAddr
, Min
, NullIsUnknown
, Dynamic
}), Size
);
756 Checks
.push_back(std::make_pair(LargeEnough
, SanitizerKind::ObjectSize
));
760 llvm::MaybeAlign AlignVal
;
761 llvm::Value
*PtrAsInt
= nullptr;
763 if (SanOpts
.has(SanitizerKind::Alignment
) &&
764 !SkippedChecks
.has(SanitizerKind::Alignment
)) {
765 AlignVal
= Alignment
.getAsMaybeAlign();
766 if (!Ty
->isIncompleteType() && !AlignVal
)
767 AlignVal
= CGM
.getNaturalTypeAlignment(Ty
, nullptr, nullptr,
768 /*ForPointeeType=*/true)
771 // The glvalue must be suitably aligned.
772 if (AlignVal
&& *AlignVal
> llvm::Align(1) &&
773 (!PtrToAlloca
|| PtrToAlloca
->getAlign() < *AlignVal
)) {
774 PtrAsInt
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
775 llvm::Value
*Align
= Builder
.CreateAnd(
776 PtrAsInt
, llvm::ConstantInt::get(IntPtrTy
, AlignVal
->value() - 1));
777 llvm::Value
*Aligned
=
778 Builder
.CreateICmpEQ(Align
, llvm::ConstantInt::get(IntPtrTy
, 0));
780 Checks
.push_back(std::make_pair(Aligned
, SanitizerKind::Alignment
));
784 if (Checks
.size() > 0) {
785 llvm::Constant
*StaticData
[] = {
786 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(Ty
),
787 llvm::ConstantInt::get(Int8Ty
, AlignVal
? llvm::Log2(*AlignVal
) : 1),
788 llvm::ConstantInt::get(Int8Ty
, TCK
)};
789 EmitCheck(Checks
, SanitizerHandler::TypeMismatch
, StaticData
,
790 PtrAsInt
? PtrAsInt
: Ptr
);
793 // If possible, check that the vptr indicates that there is a subobject of
794 // type Ty at offset zero within this object.
796 // C++11 [basic.life]p5,6:
797 // [For storage which does not refer to an object within its lifetime]
798 // The program has undefined behavior if:
799 // -- the [pointer or glvalue] is used to access a non-static data member
800 // or call a non-static member function
801 if (SanOpts
.has(SanitizerKind::Vptr
) &&
802 !SkippedChecks
.has(SanitizerKind::Vptr
) && isVptrCheckRequired(TCK
, Ty
)) {
803 // Ensure that the pointer is non-null before loading it. If there is no
804 // compile-time guarantee, reuse the run-time null check or emit a new one.
805 if (!IsGuaranteedNonNull
) {
807 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
809 Done
= createBasicBlock("vptr.null");
810 llvm::BasicBlock
*VptrNotNull
= createBasicBlock("vptr.not.null");
811 Builder
.CreateCondBr(IsNonNull
, VptrNotNull
, Done
);
812 EmitBlock(VptrNotNull
);
815 // Compute a hash of the mangled name of the type.
817 // FIXME: This is not guaranteed to be deterministic! Move to a
818 // fingerprinting mechanism once LLVM provides one. For the time
819 // being the implementation happens to be deterministic.
820 SmallString
<64> MangledName
;
821 llvm::raw_svector_ostream
Out(MangledName
);
822 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
.getUnqualifiedType(),
825 // Contained in NoSanitizeList based on the mangled type.
826 if (!CGM
.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr
,
828 llvm::hash_code TypeHash
= hash_value(Out
.str());
830 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
831 llvm::Value
*Low
= llvm::ConstantInt::get(Int64Ty
, TypeHash
);
832 llvm::Type
*VPtrTy
= llvm::PointerType::get(IntPtrTy
, 0);
833 Address
VPtrAddr(Builder
.CreateBitCast(Ptr
, VPtrTy
), IntPtrTy
,
835 llvm::Value
*VPtrVal
= Builder
.CreateLoad(VPtrAddr
);
836 llvm::Value
*High
= Builder
.CreateZExt(VPtrVal
, Int64Ty
);
838 llvm::Value
*Hash
= emitHash16Bytes(Builder
, Low
, High
);
839 Hash
= Builder
.CreateTrunc(Hash
, IntPtrTy
);
841 // Look the hash up in our cache.
842 const int CacheSize
= 128;
843 llvm::Type
*HashTable
= llvm::ArrayType::get(IntPtrTy
, CacheSize
);
844 llvm::Value
*Cache
= CGM
.CreateRuntimeVariable(HashTable
,
845 "__ubsan_vptr_type_cache");
846 llvm::Value
*Slot
= Builder
.CreateAnd(Hash
,
847 llvm::ConstantInt::get(IntPtrTy
,
849 llvm::Value
*Indices
[] = { Builder
.getInt32(0), Slot
};
850 llvm::Value
*CacheVal
= Builder
.CreateAlignedLoad(
851 IntPtrTy
, Builder
.CreateInBoundsGEP(HashTable
, Cache
, Indices
),
854 // If the hash isn't in the cache, call a runtime handler to perform the
855 // hard work of checking whether the vptr is for an object of the right
856 // type. This will either fill in the cache and return, or produce a
858 llvm::Value
*EqualHash
= Builder
.CreateICmpEQ(CacheVal
, Hash
);
859 llvm::Constant
*StaticData
[] = {
860 EmitCheckSourceLocation(Loc
),
861 EmitCheckTypeDescriptor(Ty
),
862 CGM
.GetAddrOfRTTIDescriptor(Ty
.getUnqualifiedType()),
863 llvm::ConstantInt::get(Int8Ty
, TCK
)
865 llvm::Value
*DynamicData
[] = { Ptr
, Hash
};
866 EmitCheck(std::make_pair(EqualHash
, SanitizerKind::Vptr
),
867 SanitizerHandler::DynamicTypeCacheMiss
, StaticData
,
873 Builder
.CreateBr(Done
);
878 /// Determine whether this expression refers to a flexible array member in a
879 /// struct. We disable array bounds checks for such members.
880 static bool isFlexibleArrayMemberExpr(const Expr
*E
,
881 unsigned StrictFlexArraysLevel
) {
882 // For compatibility with existing code, we treat arrays of length 0 or
883 // 1 as flexible array members.
884 // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
885 // the two mechanisms.
886 const ArrayType
*AT
= E
->getType()->castAsArrayTypeUnsafe();
887 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
)) {
888 // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
889 // was produced by macro expansion.
890 if (StrictFlexArraysLevel
>= 2 && CAT
->getSize().ugt(0))
892 // FIXME: While the default -fstrict-flex-arrays=0 permits Size>1 trailing
893 // arrays to be treated as flexible-array-members, we still emit ubsan
894 // checks as if they are not.
895 if (CAT
->getSize().ugt(1))
897 } else if (!isa
<IncompleteArrayType
>(AT
))
900 E
= E
->IgnoreParens();
902 // A flexible array member must be the last member in the class.
903 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
)) {
904 // FIXME: If the base type of the member expr is not FD->getParent(),
905 // this should not be treated as a flexible array member access.
906 if (const auto *FD
= dyn_cast
<FieldDecl
>(ME
->getMemberDecl())) {
907 // FIXME: Sema doesn't treat a T[1] union member as a flexible array
908 // member, only a T[0] or T[] member gets that treatment.
909 if (FD
->getParent()->isUnion())
911 RecordDecl::field_iterator
FI(
912 DeclContext::decl_iterator(const_cast<FieldDecl
*>(FD
)));
913 return ++FI
== FD
->getParent()->field_end();
915 } else if (const auto *IRE
= dyn_cast
<ObjCIvarRefExpr
>(E
)) {
916 return IRE
->getDecl()->getNextIvar() == nullptr;
922 llvm::Value
*CodeGenFunction::LoadPassedObjectSize(const Expr
*E
,
924 ASTContext
&C
= getContext();
925 uint64_t EltSize
= C
.getTypeSizeInChars(EltTy
).getQuantity();
929 auto *ArrayDeclRef
= dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts());
933 auto *ParamDecl
= dyn_cast
<ParmVarDecl
>(ArrayDeclRef
->getDecl());
937 auto *POSAttr
= ParamDecl
->getAttr
<PassObjectSizeAttr
>();
941 // Don't load the size if it's a lower bound.
942 int POSType
= POSAttr
->getType();
943 if (POSType
!= 0 && POSType
!= 1)
946 // Find the implicit size parameter.
947 auto PassedSizeIt
= SizeArguments
.find(ParamDecl
);
948 if (PassedSizeIt
== SizeArguments
.end())
951 const ImplicitParamDecl
*PassedSizeDecl
= PassedSizeIt
->second
;
952 assert(LocalDeclMap
.count(PassedSizeDecl
) && "Passed size not loadable");
953 Address AddrOfSize
= LocalDeclMap
.find(PassedSizeDecl
)->second
;
954 llvm::Value
*SizeInBytes
= EmitLoadOfScalar(AddrOfSize
, /*Volatile=*/false,
955 C
.getSizeType(), E
->getExprLoc());
956 llvm::Value
*SizeOfElement
=
957 llvm::ConstantInt::get(SizeInBytes
->getType(), EltSize
);
958 return Builder
.CreateUDiv(SizeInBytes
, SizeOfElement
);
961 /// If Base is known to point to the start of an array, return the length of
962 /// that array. Return 0 if the length cannot be determined.
963 static llvm::Value
*getArrayIndexingBound(CodeGenFunction
&CGF
,
965 QualType
&IndexedType
,
966 unsigned StrictFlexArraysLevel
) {
967 // For the vector indexing extension, the bound is the number of elements.
968 if (const VectorType
*VT
= Base
->getType()->getAs
<VectorType
>()) {
969 IndexedType
= Base
->getType();
970 return CGF
.Builder
.getInt32(VT
->getNumElements());
973 Base
= Base
->IgnoreParens();
975 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
976 if (CE
->getCastKind() == CK_ArrayToPointerDecay
&&
977 !isFlexibleArrayMemberExpr(CE
->getSubExpr(), StrictFlexArraysLevel
)) {
978 IndexedType
= CE
->getSubExpr()->getType();
979 const ArrayType
*AT
= IndexedType
->castAsArrayTypeUnsafe();
980 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
))
981 return CGF
.Builder
.getInt(CAT
->getSize());
982 else if (const auto *VAT
= dyn_cast
<VariableArrayType
>(AT
))
983 return CGF
.getVLASize(VAT
).NumElts
;
984 // Ignore pass_object_size here. It's not applicable on decayed pointers.
988 QualType EltTy
{Base
->getType()->getPointeeOrArrayElementType(), 0};
989 if (llvm::Value
*POS
= CGF
.LoadPassedObjectSize(Base
, EltTy
)) {
990 IndexedType
= Base
->getType();
997 void CodeGenFunction::EmitBoundsCheck(const Expr
*E
, const Expr
*Base
,
998 llvm::Value
*Index
, QualType IndexType
,
1000 assert(SanOpts
.has(SanitizerKind::ArrayBounds
) &&
1001 "should not be called unless adding bounds checks");
1002 SanitizerScope
SanScope(this);
1004 const unsigned StrictFlexArraysLevel
= getLangOpts().StrictFlexArrays
;
1006 QualType IndexedType
;
1007 llvm::Value
*Bound
=
1008 getArrayIndexingBound(*this, Base
, IndexedType
, StrictFlexArraysLevel
);
1012 bool IndexSigned
= IndexType
->isSignedIntegerOrEnumerationType();
1013 llvm::Value
*IndexVal
= Builder
.CreateIntCast(Index
, SizeTy
, IndexSigned
);
1014 llvm::Value
*BoundVal
= Builder
.CreateIntCast(Bound
, SizeTy
, false);
1016 llvm::Constant
*StaticData
[] = {
1017 EmitCheckSourceLocation(E
->getExprLoc()),
1018 EmitCheckTypeDescriptor(IndexedType
),
1019 EmitCheckTypeDescriptor(IndexType
)
1021 llvm::Value
*Check
= Accessed
? Builder
.CreateICmpULT(IndexVal
, BoundVal
)
1022 : Builder
.CreateICmpULE(IndexVal
, BoundVal
);
1023 EmitCheck(std::make_pair(Check
, SanitizerKind::ArrayBounds
),
1024 SanitizerHandler::OutOfBounds
, StaticData
, Index
);
1028 CodeGenFunction::ComplexPairTy
CodeGenFunction::
1029 EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
1030 bool isInc
, bool isPre
) {
1031 ComplexPairTy InVal
= EmitLoadOfComplex(LV
, E
->getExprLoc());
1033 llvm::Value
*NextVal
;
1034 if (isa
<llvm::IntegerType
>(InVal
.first
->getType())) {
1035 uint64_t AmountVal
= isInc
? 1 : -1;
1036 NextVal
= llvm::ConstantInt::get(InVal
.first
->getType(), AmountVal
, true);
1038 // Add the inc/dec to the real part.
1039 NextVal
= Builder
.CreateAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1041 QualType ElemTy
= E
->getType()->castAs
<ComplexType
>()->getElementType();
1042 llvm::APFloat
FVal(getContext().getFloatTypeSemantics(ElemTy
), 1);
1045 NextVal
= llvm::ConstantFP::get(getLLVMContext(), FVal
);
1047 // Add the inc/dec to the real part.
1048 NextVal
= Builder
.CreateFAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1051 ComplexPairTy
IncVal(NextVal
, InVal
.second
);
1053 // Store the updated result through the lvalue.
1054 EmitStoreOfComplex(IncVal
, LV
, /*init*/ false);
1055 if (getLangOpts().OpenMP
)
1056 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1059 // If this is a postinc, return the value read from memory, otherwise use the
1061 return isPre
? IncVal
: InVal
;
1064 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr
*E
,
1065 CodeGenFunction
*CGF
) {
1066 // Bind VLAs in the cast type.
1067 if (CGF
&& E
->getType()->isVariablyModifiedType())
1068 CGF
->EmitVariablyModifiedType(E
->getType());
1070 if (CGDebugInfo
*DI
= getModuleDebugInfo())
1071 DI
->EmitExplicitCastType(E
->getType());
1074 //===----------------------------------------------------------------------===//
1075 // LValue Expression Emission
1076 //===----------------------------------------------------------------------===//
1078 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1079 /// derive a more accurate bound on the alignment of the pointer.
1080 Address
CodeGenFunction::EmitPointerWithAlignment(const Expr
*E
,
1081 LValueBaseInfo
*BaseInfo
,
1082 TBAAAccessInfo
*TBAAInfo
) {
1083 // We allow this with ObjC object pointers because of fragile ABIs.
1084 assert(E
->getType()->isPointerType() ||
1085 E
->getType()->isObjCObjectPointerType());
1086 E
= E
->IgnoreParens();
1089 if (const CastExpr
*CE
= dyn_cast
<CastExpr
>(E
)) {
1090 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(CE
))
1091 CGM
.EmitExplicitCastExprType(ECE
, this);
1093 switch (CE
->getCastKind()) {
1094 // Non-converting casts (but not C's implicit conversion from void*).
1097 case CK_AddressSpaceConversion
:
1098 if (auto PtrTy
= CE
->getSubExpr()->getType()->getAs
<PointerType
>()) {
1099 if (PtrTy
->getPointeeType()->isVoidType())
1102 LValueBaseInfo InnerBaseInfo
;
1103 TBAAAccessInfo InnerTBAAInfo
;
1104 Address Addr
= EmitPointerWithAlignment(CE
->getSubExpr(),
1107 if (BaseInfo
) *BaseInfo
= InnerBaseInfo
;
1108 if (TBAAInfo
) *TBAAInfo
= InnerTBAAInfo
;
1110 if (isa
<ExplicitCastExpr
>(CE
)) {
1111 LValueBaseInfo TargetTypeBaseInfo
;
1112 TBAAAccessInfo TargetTypeTBAAInfo
;
1113 CharUnits Align
= CGM
.getNaturalPointeeTypeAlignment(
1114 E
->getType(), &TargetTypeBaseInfo
, &TargetTypeTBAAInfo
);
1116 *TBAAInfo
= CGM
.mergeTBAAInfoForCast(*TBAAInfo
,
1117 TargetTypeTBAAInfo
);
1118 // If the source l-value is opaque, honor the alignment of the
1120 if (InnerBaseInfo
.getAlignmentSource() != AlignmentSource::Decl
) {
1122 BaseInfo
->mergeForCast(TargetTypeBaseInfo
);
1123 Addr
= Address(Addr
.getPointer(), Addr
.getElementType(), Align
);
1127 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
) &&
1128 CE
->getCastKind() == CK_BitCast
) {
1129 if (auto PT
= E
->getType()->getAs
<PointerType
>())
1130 EmitVTablePtrCheckForCast(PT
->getPointeeType(), Addr
,
1132 CodeGenFunction::CFITCK_UnrelatedCast
,
1136 llvm::Type
*ElemTy
= ConvertTypeForMem(E
->getType()->getPointeeType());
1137 Addr
= Builder
.CreateElementBitCast(Addr
, ElemTy
);
1138 if (CE
->getCastKind() == CK_AddressSpaceConversion
)
1139 Addr
= Builder
.CreateAddrSpaceCast(Addr
, ConvertType(E
->getType()));
1144 // Array-to-pointer decay.
1145 case CK_ArrayToPointerDecay
:
1146 return EmitArrayToPointerDecay(CE
->getSubExpr(), BaseInfo
, TBAAInfo
);
1148 // Derived-to-base conversions.
1149 case CK_UncheckedDerivedToBase
:
1150 case CK_DerivedToBase
: {
1151 // TODO: Support accesses to members of base classes in TBAA. For now, we
1152 // conservatively pretend that the complete object is of the base class
1155 *TBAAInfo
= CGM
.getTBAAAccessInfo(E
->getType());
1156 Address Addr
= EmitPointerWithAlignment(CE
->getSubExpr(), BaseInfo
);
1157 auto Derived
= CE
->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1158 return GetAddressOfBaseClass(Addr
, Derived
,
1159 CE
->path_begin(), CE
->path_end(),
1160 ShouldNullCheckClassCastValue(CE
),
1164 // TODO: Is there any reason to treat base-to-derived conversions
1172 if (const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(E
)) {
1173 if (UO
->getOpcode() == UO_AddrOf
) {
1174 LValue LV
= EmitLValue(UO
->getSubExpr());
1175 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1176 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1177 return LV
.getAddress(*this);
1181 // std::addressof and variants.
1182 if (auto *Call
= dyn_cast
<CallExpr
>(E
)) {
1183 switch (Call
->getBuiltinCallee()) {
1186 case Builtin::BIaddressof
:
1187 case Builtin::BI__addressof
:
1188 case Builtin::BI__builtin_addressof
: {
1189 LValue LV
= EmitLValue(Call
->getArg(0));
1190 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1191 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1192 return LV
.getAddress(*this);
1197 // TODO: conditional operators, comma.
1199 // Otherwise, use the alignment of the type.
1201 CGM
.getNaturalPointeeTypeAlignment(E
->getType(), BaseInfo
, TBAAInfo
);
1202 llvm::Type
*ElemTy
= ConvertTypeForMem(E
->getType()->getPointeeType());
1203 return Address(EmitScalarExpr(E
), ElemTy
, Align
);
1206 llvm::Value
*CodeGenFunction::EmitNonNullRValueCheck(RValue RV
, QualType T
) {
1207 llvm::Value
*V
= RV
.getScalarVal();
1208 if (auto MPT
= T
->getAs
<MemberPointerType
>())
1209 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, V
, MPT
);
1210 return Builder
.CreateICmpNE(V
, llvm::Constant::getNullValue(V
->getType()));
1213 RValue
CodeGenFunction::GetUndefRValue(QualType Ty
) {
1214 if (Ty
->isVoidType())
1215 return RValue::get(nullptr);
1217 switch (getEvaluationKind(Ty
)) {
1220 ConvertType(Ty
->castAs
<ComplexType
>()->getElementType());
1221 llvm::Value
*U
= llvm::UndefValue::get(EltTy
);
1222 return RValue::getComplex(std::make_pair(U
, U
));
1225 // If this is a use of an undefined aggregate type, the aggregate must have an
1226 // identifiable address. Just because the contents of the value are undefined
1227 // doesn't mean that the address can't be taken and compared.
1228 case TEK_Aggregate
: {
1229 Address DestPtr
= CreateMemTemp(Ty
, "undef.agg.tmp");
1230 return RValue::getAggregate(DestPtr
);
1234 return RValue::get(llvm::UndefValue::get(ConvertType(Ty
)));
1236 llvm_unreachable("bad evaluation kind");
1239 RValue
CodeGenFunction::EmitUnsupportedRValue(const Expr
*E
,
1241 ErrorUnsupported(E
, Name
);
1242 return GetUndefRValue(E
->getType());
1245 LValue
CodeGenFunction::EmitUnsupportedLValue(const Expr
*E
,
1247 ErrorUnsupported(E
, Name
);
1248 llvm::Type
*ElTy
= ConvertType(E
->getType());
1249 llvm::Type
*Ty
= llvm::PointerType::getUnqual(ElTy
);
1250 return MakeAddrLValue(
1251 Address(llvm::UndefValue::get(Ty
), ElTy
, CharUnits::One()), E
->getType());
1254 bool CodeGenFunction::IsWrappedCXXThis(const Expr
*Obj
) {
1255 const Expr
*Base
= Obj
;
1256 while (!isa
<CXXThisExpr
>(Base
)) {
1257 // The result of a dynamic_cast can be null.
1258 if (isa
<CXXDynamicCastExpr
>(Base
))
1261 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
1262 Base
= CE
->getSubExpr();
1263 } else if (const auto *PE
= dyn_cast
<ParenExpr
>(Base
)) {
1264 Base
= PE
->getSubExpr();
1265 } else if (const auto *UO
= dyn_cast
<UnaryOperator
>(Base
)) {
1266 if (UO
->getOpcode() == UO_Extension
)
1267 Base
= UO
->getSubExpr();
1277 LValue
CodeGenFunction::EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
) {
1279 if (SanOpts
.has(SanitizerKind::ArrayBounds
) && isa
<ArraySubscriptExpr
>(E
))
1280 LV
= EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
), /*Accessed*/true);
1283 if (!isa
<DeclRefExpr
>(E
) && !LV
.isBitField() && LV
.isSimple()) {
1284 SanitizerSet SkippedChecks
;
1285 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
)) {
1286 bool IsBaseCXXThis
= IsWrappedCXXThis(ME
->getBase());
1288 SkippedChecks
.set(SanitizerKind::Alignment
, true);
1289 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(ME
->getBase()))
1290 SkippedChecks
.set(SanitizerKind::Null
, true);
1292 EmitTypeCheck(TCK
, E
->getExprLoc(), LV
.getPointer(*this), E
->getType(),
1293 LV
.getAlignment(), SkippedChecks
);
1298 /// EmitLValue - Emit code to compute a designator that specifies the location
1299 /// of the expression.
1301 /// This can return one of two things: a simple address or a bitfield reference.
1302 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1303 /// an LLVM pointer type.
1305 /// If this returns a bitfield reference, nothing about the pointee type of the
1306 /// LLVM value is known: For example, it may not be a pointer to an integer.
1308 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1309 /// this method guarantees that the returned pointer type will point to an LLVM
1310 /// type of the same size of the lvalue's type. If the lvalue has a variable
1311 /// length type, this is not possible.
1313 LValue
CodeGenFunction::EmitLValue(const Expr
*E
) {
1314 ApplyDebugLocation
DL(*this, E
);
1315 switch (E
->getStmtClass()) {
1316 default: return EmitUnsupportedLValue(E
, "l-value expression");
1318 case Expr::ObjCPropertyRefExprClass
:
1319 llvm_unreachable("cannot emit a property reference directly");
1321 case Expr::ObjCSelectorExprClass
:
1322 return EmitObjCSelectorLValue(cast
<ObjCSelectorExpr
>(E
));
1323 case Expr::ObjCIsaExprClass
:
1324 return EmitObjCIsaExpr(cast
<ObjCIsaExpr
>(E
));
1325 case Expr::BinaryOperatorClass
:
1326 return EmitBinaryOperatorLValue(cast
<BinaryOperator
>(E
));
1327 case Expr::CompoundAssignOperatorClass
: {
1328 QualType Ty
= E
->getType();
1329 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1330 Ty
= AT
->getValueType();
1331 if (!Ty
->isAnyComplexType())
1332 return EmitCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1333 return EmitComplexCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1335 case Expr::CallExprClass
:
1336 case Expr::CXXMemberCallExprClass
:
1337 case Expr::CXXOperatorCallExprClass
:
1338 case Expr::UserDefinedLiteralClass
:
1339 return EmitCallExprLValue(cast
<CallExpr
>(E
));
1340 case Expr::CXXRewrittenBinaryOperatorClass
:
1341 return EmitLValue(cast
<CXXRewrittenBinaryOperator
>(E
)->getSemanticForm());
1342 case Expr::VAArgExprClass
:
1343 return EmitVAArgExprLValue(cast
<VAArgExpr
>(E
));
1344 case Expr::DeclRefExprClass
:
1345 return EmitDeclRefLValue(cast
<DeclRefExpr
>(E
));
1346 case Expr::ConstantExprClass
: {
1347 const ConstantExpr
*CE
= cast
<ConstantExpr
>(E
);
1348 if (llvm::Value
*Result
= ConstantEmitter(*this).tryEmitConstantExpr(CE
)) {
1349 QualType RetType
= cast
<CallExpr
>(CE
->getSubExpr()->IgnoreImplicit())
1350 ->getCallReturnType(getContext())
1352 return MakeNaturalAlignAddrLValue(Result
, RetType
);
1354 return EmitLValue(cast
<ConstantExpr
>(E
)->getSubExpr());
1356 case Expr::ParenExprClass
:
1357 return EmitLValue(cast
<ParenExpr
>(E
)->getSubExpr());
1358 case Expr::GenericSelectionExprClass
:
1359 return EmitLValue(cast
<GenericSelectionExpr
>(E
)->getResultExpr());
1360 case Expr::PredefinedExprClass
:
1361 return EmitPredefinedLValue(cast
<PredefinedExpr
>(E
));
1362 case Expr::StringLiteralClass
:
1363 return EmitStringLiteralLValue(cast
<StringLiteral
>(E
));
1364 case Expr::ObjCEncodeExprClass
:
1365 return EmitObjCEncodeExprLValue(cast
<ObjCEncodeExpr
>(E
));
1366 case Expr::PseudoObjectExprClass
:
1367 return EmitPseudoObjectLValue(cast
<PseudoObjectExpr
>(E
));
1368 case Expr::InitListExprClass
:
1369 return EmitInitListLValue(cast
<InitListExpr
>(E
));
1370 case Expr::CXXTemporaryObjectExprClass
:
1371 case Expr::CXXConstructExprClass
:
1372 return EmitCXXConstructLValue(cast
<CXXConstructExpr
>(E
));
1373 case Expr::CXXBindTemporaryExprClass
:
1374 return EmitCXXBindTemporaryLValue(cast
<CXXBindTemporaryExpr
>(E
));
1375 case Expr::CXXUuidofExprClass
:
1376 return EmitCXXUuidofLValue(cast
<CXXUuidofExpr
>(E
));
1377 case Expr::LambdaExprClass
:
1378 return EmitAggExprToLValue(E
);
1380 case Expr::ExprWithCleanupsClass
: {
1381 const auto *cleanups
= cast
<ExprWithCleanups
>(E
);
1382 RunCleanupsScope
Scope(*this);
1383 LValue LV
= EmitLValue(cleanups
->getSubExpr());
1384 if (LV
.isSimple()) {
1385 // Defend against branches out of gnu statement expressions surrounded by
1387 Address Addr
= LV
.getAddress(*this);
1388 llvm::Value
*V
= Addr
.getPointer();
1389 Scope
.ForceCleanup({&V
});
1390 return LValue::MakeAddr(Addr
.withPointer(V
), LV
.getType(), getContext(),
1391 LV
.getBaseInfo(), LV
.getTBAAInfo());
1393 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1394 // bitfield lvalue or some other non-simple lvalue?
1398 case Expr::CXXDefaultArgExprClass
: {
1399 auto *DAE
= cast
<CXXDefaultArgExpr
>(E
);
1400 CXXDefaultArgExprScope
Scope(*this, DAE
);
1401 return EmitLValue(DAE
->getExpr());
1403 case Expr::CXXDefaultInitExprClass
: {
1404 auto *DIE
= cast
<CXXDefaultInitExpr
>(E
);
1405 CXXDefaultInitExprScope
Scope(*this, DIE
);
1406 return EmitLValue(DIE
->getExpr());
1408 case Expr::CXXTypeidExprClass
:
1409 return EmitCXXTypeidLValue(cast
<CXXTypeidExpr
>(E
));
1411 case Expr::ObjCMessageExprClass
:
1412 return EmitObjCMessageExprLValue(cast
<ObjCMessageExpr
>(E
));
1413 case Expr::ObjCIvarRefExprClass
:
1414 return EmitObjCIvarRefLValue(cast
<ObjCIvarRefExpr
>(E
));
1415 case Expr::StmtExprClass
:
1416 return EmitStmtExprLValue(cast
<StmtExpr
>(E
));
1417 case Expr::UnaryOperatorClass
:
1418 return EmitUnaryOpLValue(cast
<UnaryOperator
>(E
));
1419 case Expr::ArraySubscriptExprClass
:
1420 return EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
));
1421 case Expr::MatrixSubscriptExprClass
:
1422 return EmitMatrixSubscriptExpr(cast
<MatrixSubscriptExpr
>(E
));
1423 case Expr::OMPArraySectionExprClass
:
1424 return EmitOMPArraySectionExpr(cast
<OMPArraySectionExpr
>(E
));
1425 case Expr::ExtVectorElementExprClass
:
1426 return EmitExtVectorElementExpr(cast
<ExtVectorElementExpr
>(E
));
1427 case Expr::MemberExprClass
:
1428 return EmitMemberExpr(cast
<MemberExpr
>(E
));
1429 case Expr::CompoundLiteralExprClass
:
1430 return EmitCompoundLiteralLValue(cast
<CompoundLiteralExpr
>(E
));
1431 case Expr::ConditionalOperatorClass
:
1432 return EmitConditionalOperatorLValue(cast
<ConditionalOperator
>(E
));
1433 case Expr::BinaryConditionalOperatorClass
:
1434 return EmitConditionalOperatorLValue(cast
<BinaryConditionalOperator
>(E
));
1435 case Expr::ChooseExprClass
:
1436 return EmitLValue(cast
<ChooseExpr
>(E
)->getChosenSubExpr());
1437 case Expr::OpaqueValueExprClass
:
1438 return EmitOpaqueValueLValue(cast
<OpaqueValueExpr
>(E
));
1439 case Expr::SubstNonTypeTemplateParmExprClass
:
1440 return EmitLValue(cast
<SubstNonTypeTemplateParmExpr
>(E
)->getReplacement());
1441 case Expr::ImplicitCastExprClass
:
1442 case Expr::CStyleCastExprClass
:
1443 case Expr::CXXFunctionalCastExprClass
:
1444 case Expr::CXXStaticCastExprClass
:
1445 case Expr::CXXDynamicCastExprClass
:
1446 case Expr::CXXReinterpretCastExprClass
:
1447 case Expr::CXXConstCastExprClass
:
1448 case Expr::CXXAddrspaceCastExprClass
:
1449 case Expr::ObjCBridgedCastExprClass
:
1450 return EmitCastLValue(cast
<CastExpr
>(E
));
1452 case Expr::MaterializeTemporaryExprClass
:
1453 return EmitMaterializeTemporaryExpr(cast
<MaterializeTemporaryExpr
>(E
));
1455 case Expr::CoawaitExprClass
:
1456 return EmitCoawaitLValue(cast
<CoawaitExpr
>(E
));
1457 case Expr::CoyieldExprClass
:
1458 return EmitCoyieldLValue(cast
<CoyieldExpr
>(E
));
1462 /// Given an object of the given canonical type, can we safely copy a
1463 /// value out of it based on its initializer?
1464 static bool isConstantEmittableObjectType(QualType type
) {
1465 assert(type
.isCanonical());
1466 assert(!type
->isReferenceType());
1468 // Must be const-qualified but non-volatile.
1469 Qualifiers qs
= type
.getLocalQualifiers();
1470 if (!qs
.hasConst() || qs
.hasVolatile()) return false;
1472 // Otherwise, all object types satisfy this except C++ classes with
1473 // mutable subobjects or non-trivial copy/destroy behavior.
1474 if (const auto *RT
= dyn_cast
<RecordType
>(type
))
1475 if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl()))
1476 if (RD
->hasMutableFields() || !RD
->isTrivial())
1482 /// Can we constant-emit a load of a reference to a variable of the
1483 /// given type? This is different from predicates like
1484 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1485 /// in situations that don't necessarily satisfy the language's rules
1486 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1487 /// to do this with const float variables even if those variables
1488 /// aren't marked 'constexpr'.
1489 enum ConstantEmissionKind
{
1491 CEK_AsReferenceOnly
,
1492 CEK_AsValueOrReference
,
1495 static ConstantEmissionKind
checkVarTypeForConstantEmission(QualType type
) {
1496 type
= type
.getCanonicalType();
1497 if (const auto *ref
= dyn_cast
<ReferenceType
>(type
)) {
1498 if (isConstantEmittableObjectType(ref
->getPointeeType()))
1499 return CEK_AsValueOrReference
;
1500 return CEK_AsReferenceOnly
;
1502 if (isConstantEmittableObjectType(type
))
1503 return CEK_AsValueOnly
;
1507 /// Try to emit a reference to the given value without producing it as
1508 /// an l-value. This is just an optimization, but it avoids us needing
1509 /// to emit global copies of variables if they're named without triggering
1510 /// a formal use in a context where we can't emit a direct reference to them,
1511 /// for instance if a block or lambda or a member of a local class uses a
1512 /// const int variable or constexpr variable from an enclosing function.
1513 CodeGenFunction::ConstantEmission
1514 CodeGenFunction::tryEmitAsConstant(DeclRefExpr
*refExpr
) {
1515 ValueDecl
*value
= refExpr
->getDecl();
1517 // The value needs to be an enum constant or a constant variable.
1518 ConstantEmissionKind CEK
;
1519 if (isa
<ParmVarDecl
>(value
)) {
1521 } else if (auto *var
= dyn_cast
<VarDecl
>(value
)) {
1522 CEK
= checkVarTypeForConstantEmission(var
->getType());
1523 } else if (isa
<EnumConstantDecl
>(value
)) {
1524 CEK
= CEK_AsValueOnly
;
1528 if (CEK
== CEK_None
) return ConstantEmission();
1530 Expr::EvalResult result
;
1531 bool resultIsReference
;
1532 QualType resultType
;
1534 // It's best to evaluate all the way as an r-value if that's permitted.
1535 if (CEK
!= CEK_AsReferenceOnly
&&
1536 refExpr
->EvaluateAsRValue(result
, getContext())) {
1537 resultIsReference
= false;
1538 resultType
= refExpr
->getType();
1540 // Otherwise, try to evaluate as an l-value.
1541 } else if (CEK
!= CEK_AsValueOnly
&&
1542 refExpr
->EvaluateAsLValue(result
, getContext())) {
1543 resultIsReference
= true;
1544 resultType
= value
->getType();
1548 return ConstantEmission();
1551 // In any case, if the initializer has side-effects, abandon ship.
1552 if (result
.HasSideEffects
)
1553 return ConstantEmission();
1555 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1556 // referencing a global host variable by copy. In this case the lambda should
1557 // make a copy of the value of the global host variable. The DRE of the
1558 // captured reference variable cannot be emitted as load from the host
1559 // global variable as compile time constant, since the host variable is not
1560 // accessible on device. The DRE of the captured reference variable has to be
1561 // loaded from captures.
1562 if (CGM
.getLangOpts().CUDAIsDevice
&& result
.Val
.isLValue() &&
1563 refExpr
->refersToEnclosingVariableOrCapture()) {
1564 auto *MD
= dyn_cast_or_null
<CXXMethodDecl
>(CurCodeDecl
);
1565 if (MD
&& MD
->getParent()->isLambda() &&
1566 MD
->getOverloadedOperator() == OO_Call
) {
1567 const APValue::LValueBase
&base
= result
.Val
.getLValueBase();
1568 if (const ValueDecl
*D
= base
.dyn_cast
<const ValueDecl
*>()) {
1569 if (const VarDecl
*VD
= dyn_cast
<const VarDecl
>(D
)) {
1570 if (!VD
->hasAttr
<CUDADeviceAttr
>()) {
1571 return ConstantEmission();
1578 // Emit as a constant.
1579 auto C
= ConstantEmitter(*this).emitAbstract(refExpr
->getLocation(),
1580 result
.Val
, resultType
);
1582 // Make sure we emit a debug reference to the global variable.
1583 // This should probably fire even for
1584 if (isa
<VarDecl
>(value
)) {
1585 if (!getContext().DeclMustBeEmitted(cast
<VarDecl
>(value
)))
1586 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1588 assert(isa
<EnumConstantDecl
>(value
));
1589 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1592 // If we emitted a reference constant, we need to dereference that.
1593 if (resultIsReference
)
1594 return ConstantEmission::forReference(C
);
1596 return ConstantEmission::forValue(C
);
1599 static DeclRefExpr
*tryToConvertMemberExprToDeclRefExpr(CodeGenFunction
&CGF
,
1600 const MemberExpr
*ME
) {
1601 if (auto *VD
= dyn_cast
<VarDecl
>(ME
->getMemberDecl())) {
1602 // Try to emit static variable member expressions as DREs.
1603 return DeclRefExpr::Create(
1604 CGF
.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD
,
1605 /*RefersToEnclosingVariableOrCapture=*/false, ME
->getExprLoc(),
1606 ME
->getType(), ME
->getValueKind(), nullptr, nullptr, ME
->isNonOdrUse());
1611 CodeGenFunction::ConstantEmission
1612 CodeGenFunction::tryEmitAsConstant(const MemberExpr
*ME
) {
1613 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, ME
))
1614 return tryEmitAsConstant(DRE
);
1615 return ConstantEmission();
1618 llvm::Value
*CodeGenFunction::emitScalarConstant(
1619 const CodeGenFunction::ConstantEmission
&Constant
, Expr
*E
) {
1620 assert(Constant
&& "not a constant");
1621 if (Constant
.isReference())
1622 return EmitLoadOfLValue(Constant
.getReferenceLValue(*this, E
),
1625 return Constant
.getValue();
1628 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(LValue lvalue
,
1629 SourceLocation Loc
) {
1630 return EmitLoadOfScalar(lvalue
.getAddress(*this), lvalue
.isVolatile(),
1631 lvalue
.getType(), Loc
, lvalue
.getBaseInfo(),
1632 lvalue
.getTBAAInfo(), lvalue
.isNontemporal());
1635 static bool hasBooleanRepresentation(QualType Ty
) {
1636 if (Ty
->isBooleanType())
1639 if (const EnumType
*ET
= Ty
->getAs
<EnumType
>())
1640 return ET
->getDecl()->getIntegerType()->isBooleanType();
1642 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1643 return hasBooleanRepresentation(AT
->getValueType());
1648 static bool getRangeForType(CodeGenFunction
&CGF
, QualType Ty
,
1649 llvm::APInt
&Min
, llvm::APInt
&End
,
1650 bool StrictEnums
, bool IsBool
) {
1651 const EnumType
*ET
= Ty
->getAs
<EnumType
>();
1652 bool IsRegularCPlusPlusEnum
= CGF
.getLangOpts().CPlusPlus
&& StrictEnums
&&
1653 ET
&& !ET
->getDecl()->isFixed();
1654 if (!IsBool
&& !IsRegularCPlusPlusEnum
)
1658 Min
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 0);
1659 End
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 2);
1661 const EnumDecl
*ED
= ET
->getDecl();
1662 ED
->getValueRange(End
, Min
);
1667 llvm::MDNode
*CodeGenFunction::getRangeForLoadFromType(QualType Ty
) {
1668 llvm::APInt Min
, End
;
1669 if (!getRangeForType(*this, Ty
, Min
, End
, CGM
.getCodeGenOpts().StrictEnums
,
1670 hasBooleanRepresentation(Ty
)))
1673 llvm::MDBuilder
MDHelper(getLLVMContext());
1674 return MDHelper
.createRange(Min
, End
);
1677 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
1678 SourceLocation Loc
) {
1679 bool HasBoolCheck
= SanOpts
.has(SanitizerKind::Bool
);
1680 bool HasEnumCheck
= SanOpts
.has(SanitizerKind::Enum
);
1681 if (!HasBoolCheck
&& !HasEnumCheck
)
1684 bool IsBool
= hasBooleanRepresentation(Ty
) ||
1685 NSAPI(CGM
.getContext()).isObjCBOOLType(Ty
);
1686 bool NeedsBoolCheck
= HasBoolCheck
&& IsBool
;
1687 bool NeedsEnumCheck
= HasEnumCheck
&& Ty
->getAs
<EnumType
>();
1688 if (!NeedsBoolCheck
&& !NeedsEnumCheck
)
1691 // Single-bit booleans don't need to be checked. Special-case this to avoid
1692 // a bit width mismatch when handling bitfield values. This is handled by
1693 // EmitFromMemory for the non-bitfield case.
1695 cast
<llvm::IntegerType
>(Value
->getType())->getBitWidth() == 1)
1698 llvm::APInt Min
, End
;
1699 if (!getRangeForType(*this, Ty
, Min
, End
, /*StrictEnums=*/true, IsBool
))
1702 auto &Ctx
= getLLVMContext();
1703 SanitizerScope
SanScope(this);
1707 Check
= Builder
.CreateICmpULE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1709 llvm::Value
*Upper
=
1710 Builder
.CreateICmpSLE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1711 llvm::Value
*Lower
=
1712 Builder
.CreateICmpSGE(Value
, llvm::ConstantInt::get(Ctx
, Min
));
1713 Check
= Builder
.CreateAnd(Upper
, Lower
);
1715 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
),
1716 EmitCheckTypeDescriptor(Ty
)};
1717 SanitizerMask Kind
=
1718 NeedsEnumCheck
? SanitizerKind::Enum
: SanitizerKind::Bool
;
1719 EmitCheck(std::make_pair(Check
, Kind
), SanitizerHandler::LoadInvalidValue
,
1720 StaticArgs
, EmitCheckValue(Value
));
1724 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(Address Addr
, bool Volatile
,
1727 LValueBaseInfo BaseInfo
,
1728 TBAAAccessInfo TBAAInfo
,
1729 bool isNontemporal
) {
1730 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1731 if (GV
->isThreadLocal())
1732 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
));
1734 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1735 // Boolean vectors use `iN` as storage type.
1736 if (ClangVecTy
->isExtVectorBoolType()) {
1737 llvm::Type
*ValTy
= ConvertType(Ty
);
1738 unsigned ValNumElems
=
1739 cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1740 // Load the `iP` storage object (P is the padded vector size).
1741 auto *RawIntV
= Builder
.CreateLoad(Addr
, Volatile
, "load_bits");
1742 const auto *RawIntTy
= RawIntV
->getType();
1743 assert(RawIntTy
->isIntegerTy() && "compressed iN storage for bitvectors");
1744 // Bitcast iP --> <P x i1>.
1745 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1746 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1747 llvm::Value
*V
= Builder
.CreateBitCast(RawIntV
, PaddedVecTy
);
1748 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1749 V
= emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1751 return EmitFromMemory(V
, Ty
);
1754 // Handle vectors of size 3 like size 4 for better performance.
1755 const llvm::Type
*EltTy
= Addr
.getElementType();
1756 const auto *VTy
= cast
<llvm::FixedVectorType
>(EltTy
);
1758 if (!CGM
.getCodeGenOpts().PreserveVec3Type
&& VTy
->getNumElements() == 3) {
1760 // Bitcast to vec4 type.
1761 llvm::VectorType
*vec4Ty
=
1762 llvm::FixedVectorType::get(VTy
->getElementType(), 4);
1763 Address Cast
= Builder
.CreateElementBitCast(Addr
, vec4Ty
, "castToVec4");
1765 llvm::Value
*V
= Builder
.CreateLoad(Cast
, Volatile
, "loadVec4");
1767 // Shuffle vector to get vec3.
1768 V
= Builder
.CreateShuffleVector(V
, ArrayRef
<int>{0, 1, 2}, "extractVec");
1769 return EmitFromMemory(V
, Ty
);
1773 // Atomic operations have to be done on integral types.
1774 LValue AtomicLValue
=
1775 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1776 if (Ty
->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue
)) {
1777 return EmitAtomicLoad(AtomicLValue
, Loc
).getScalarVal();
1780 llvm::LoadInst
*Load
= Builder
.CreateLoad(Addr
, Volatile
);
1781 if (isNontemporal
) {
1782 llvm::MDNode
*Node
= llvm::MDNode::get(
1783 Load
->getContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1784 Load
->setMetadata(CGM
.getModule().getMDKindID("nontemporal"), Node
);
1787 CGM
.DecorateInstructionWithTBAA(Load
, TBAAInfo
);
1789 if (EmitScalarRangeCheck(Load
, Ty
, Loc
)) {
1790 // In order to prevent the optimizer from throwing away the check, don't
1791 // attach range metadata to the load.
1792 } else if (CGM
.getCodeGenOpts().OptimizationLevel
> 0)
1793 if (llvm::MDNode
*RangeInfo
= getRangeForLoadFromType(Ty
))
1794 Load
->setMetadata(llvm::LLVMContext::MD_range
, RangeInfo
);
1796 return EmitFromMemory(Load
, Ty
);
1799 llvm::Value
*CodeGenFunction::EmitToMemory(llvm::Value
*Value
, QualType Ty
) {
1800 // Bool has a different representation in memory than in registers.
1801 if (hasBooleanRepresentation(Ty
)) {
1802 // This should really always be an i1, but sometimes it's already
1803 // an i8, and it's awkward to track those cases down.
1804 if (Value
->getType()->isIntegerTy(1))
1805 return Builder
.CreateZExt(Value
, ConvertTypeForMem(Ty
), "frombool");
1806 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1807 "wrong value rep of bool");
1813 llvm::Value
*CodeGenFunction::EmitFromMemory(llvm::Value
*Value
, QualType Ty
) {
1814 // Bool has a different representation in memory than in registers.
1815 if (hasBooleanRepresentation(Ty
)) {
1816 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1817 "wrong value rep of bool");
1818 return Builder
.CreateTrunc(Value
, Builder
.getInt1Ty(), "tobool");
1820 if (Ty
->isExtVectorBoolType()) {
1821 const auto *RawIntTy
= Value
->getType();
1822 // Bitcast iP --> <P x i1>.
1823 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1824 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1825 auto *V
= Builder
.CreateBitCast(Value
, PaddedVecTy
);
1826 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1827 llvm::Type
*ValTy
= ConvertType(Ty
);
1828 unsigned ValNumElems
= cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1829 return emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1835 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1836 // MatrixType), if it points to a array (the memory type of MatrixType).
1837 static Address
MaybeConvertMatrixAddress(Address Addr
, CodeGenFunction
&CGF
,
1838 bool IsVector
= true) {
1839 auto *ArrayTy
= dyn_cast
<llvm::ArrayType
>(Addr
.getElementType());
1840 if (ArrayTy
&& IsVector
) {
1841 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
1842 ArrayTy
->getNumElements());
1844 return Address(CGF
.Builder
.CreateElementBitCast(Addr
, VectorTy
));
1846 auto *VectorTy
= dyn_cast
<llvm::VectorType
>(Addr
.getElementType());
1847 if (VectorTy
&& !IsVector
) {
1848 auto *ArrayTy
= llvm::ArrayType::get(
1849 VectorTy
->getElementType(),
1850 cast
<llvm::FixedVectorType
>(VectorTy
)->getNumElements());
1852 return Address(CGF
.Builder
.CreateElementBitCast(Addr
, ArrayTy
));
1858 // Emit a store of a matrix LValue. This may require casting the original
1859 // pointer to memory address (ArrayType) to a pointer to the value type
1861 static void EmitStoreOfMatrixScalar(llvm::Value
*value
, LValue lvalue
,
1862 bool isInit
, CodeGenFunction
&CGF
) {
1863 Address Addr
= MaybeConvertMatrixAddress(lvalue
.getAddress(CGF
), CGF
,
1864 value
->getType()->isVectorTy());
1865 CGF
.EmitStoreOfScalar(value
, Addr
, lvalue
.isVolatile(), lvalue
.getType(),
1866 lvalue
.getBaseInfo(), lvalue
.getTBAAInfo(), isInit
,
1867 lvalue
.isNontemporal());
1870 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
1871 bool Volatile
, QualType Ty
,
1872 LValueBaseInfo BaseInfo
,
1873 TBAAAccessInfo TBAAInfo
,
1874 bool isInit
, bool isNontemporal
) {
1875 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1876 if (GV
->isThreadLocal())
1877 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
));
1879 llvm::Type
*SrcTy
= Value
->getType();
1880 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1881 auto *VecTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
);
1882 if (VecTy
&& ClangVecTy
->isExtVectorBoolType()) {
1883 auto *MemIntTy
= cast
<llvm::IntegerType
>(Addr
.getElementType());
1884 // Expand to the memory bit width.
1885 unsigned MemNumElems
= MemIntTy
->getPrimitiveSizeInBits();
1886 // <N x i1> --> <P x i1>.
1887 Value
= emitBoolVecConversion(Value
, MemNumElems
, "insertvec");
1889 Value
= Builder
.CreateBitCast(Value
, MemIntTy
);
1890 } else if (!CGM
.getCodeGenOpts().PreserveVec3Type
) {
1891 // Handle vec3 special.
1892 if (VecTy
&& cast
<llvm::FixedVectorType
>(VecTy
)->getNumElements() == 3) {
1893 // Our source is a vec3, do a shuffle vector to make it a vec4.
1894 Value
= Builder
.CreateShuffleVector(Value
, ArrayRef
<int>{0, 1, 2, -1},
1896 SrcTy
= llvm::FixedVectorType::get(VecTy
->getElementType(), 4);
1898 if (Addr
.getElementType() != SrcTy
) {
1899 Addr
= Builder
.CreateElementBitCast(Addr
, SrcTy
, "storetmp");
1904 Value
= EmitToMemory(Value
, Ty
);
1906 LValue AtomicLValue
=
1907 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1908 if (Ty
->isAtomicType() ||
1909 (!isInit
&& LValueIsSuitableForInlineAtomic(AtomicLValue
))) {
1910 EmitAtomicStore(RValue::get(Value
), AtomicLValue
, isInit
);
1914 llvm::StoreInst
*Store
= Builder
.CreateStore(Value
, Addr
, Volatile
);
1915 if (isNontemporal
) {
1916 llvm::MDNode
*Node
=
1917 llvm::MDNode::get(Store
->getContext(),
1918 llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1919 Store
->setMetadata(CGM
.getModule().getMDKindID("nontemporal"), Node
);
1922 CGM
.DecorateInstructionWithTBAA(Store
, TBAAInfo
);
1925 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
,
1927 if (lvalue
.getType()->isConstantMatrixType()) {
1928 EmitStoreOfMatrixScalar(value
, lvalue
, isInit
, *this);
1932 EmitStoreOfScalar(value
, lvalue
.getAddress(*this), lvalue
.isVolatile(),
1933 lvalue
.getType(), lvalue
.getBaseInfo(),
1934 lvalue
.getTBAAInfo(), isInit
, lvalue
.isNontemporal());
1937 // Emit a load of a LValue of matrix type. This may require casting the pointer
1938 // to memory address (ArrayType) to a pointer to the value type (VectorType).
1939 static RValue
EmitLoadOfMatrixLValue(LValue LV
, SourceLocation Loc
,
1940 CodeGenFunction
&CGF
) {
1941 assert(LV
.getType()->isConstantMatrixType());
1942 Address Addr
= MaybeConvertMatrixAddress(LV
.getAddress(CGF
), CGF
);
1943 LV
.setAddress(Addr
);
1944 return RValue::get(CGF
.EmitLoadOfScalar(LV
, Loc
));
1947 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1948 /// method emits the address of the lvalue, then loads the result as an rvalue,
1949 /// returning the rvalue.
1950 RValue
CodeGenFunction::EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
1951 if (LV
.isObjCWeak()) {
1952 // load of a __weak object.
1953 Address AddrWeakObj
= LV
.getAddress(*this);
1954 return RValue::get(CGM
.getObjCRuntime().EmitObjCWeakRead(*this,
1957 if (LV
.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak
) {
1958 // In MRC mode, we do a load+autorelease.
1959 if (!getLangOpts().ObjCAutoRefCount
) {
1960 return RValue::get(EmitARCLoadWeak(LV
.getAddress(*this)));
1963 // In ARC mode, we load retained and then consume the value.
1964 llvm::Value
*Object
= EmitARCLoadWeakRetained(LV
.getAddress(*this));
1965 Object
= EmitObjCConsumeObject(LV
.getType(), Object
);
1966 return RValue::get(Object
);
1969 if (LV
.isSimple()) {
1970 assert(!LV
.getType()->isFunctionType());
1972 if (LV
.getType()->isConstantMatrixType())
1973 return EmitLoadOfMatrixLValue(LV
, Loc
, *this);
1975 // Everything needs a load.
1976 return RValue::get(EmitLoadOfScalar(LV
, Loc
));
1979 if (LV
.isVectorElt()) {
1980 llvm::LoadInst
*Load
= Builder
.CreateLoad(LV
.getVectorAddress(),
1981 LV
.isVolatileQualified());
1982 return RValue::get(Builder
.CreateExtractElement(Load
, LV
.getVectorIdx(),
1986 // If this is a reference to a subset of the elements of a vector, either
1987 // shuffle the input or extract/insert them as appropriate.
1988 if (LV
.isExtVectorElt()) {
1989 return EmitLoadOfExtVectorElementLValue(LV
);
1992 // Global Register variables always invoke intrinsics
1993 if (LV
.isGlobalReg())
1994 return EmitLoadOfGlobalRegLValue(LV
);
1996 if (LV
.isMatrixElt()) {
1997 llvm::Value
*Idx
= LV
.getMatrixIdx();
1998 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
1999 const auto *const MatTy
= LV
.getType()->castAs
<ConstantMatrixType
>();
2000 llvm::MatrixBuilder
MB(Builder
);
2001 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2003 llvm::LoadInst
*Load
=
2004 Builder
.CreateLoad(LV
.getMatrixAddress(), LV
.isVolatileQualified());
2005 return RValue::get(Builder
.CreateExtractElement(Load
, Idx
, "matrixext"));
2008 assert(LV
.isBitField() && "Unknown LValue type!");
2009 return EmitLoadOfBitfieldLValue(LV
, Loc
);
2012 RValue
CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV
,
2013 SourceLocation Loc
) {
2014 const CGBitFieldInfo
&Info
= LV
.getBitFieldInfo();
2016 // Get the output type.
2017 llvm::Type
*ResLTy
= ConvertType(LV
.getType());
2019 Address Ptr
= LV
.getBitFieldAddress();
2021 Builder
.CreateLoad(Ptr
, LV
.isVolatileQualified(), "bf.load");
2023 bool UseVolatile
= LV
.isVolatileQualified() &&
2024 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2025 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2026 const unsigned StorageSize
=
2027 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2028 if (Info
.IsSigned
) {
2029 assert(static_cast<unsigned>(Offset
+ Info
.Size
) <= StorageSize
);
2030 unsigned HighBits
= StorageSize
- Offset
- Info
.Size
;
2032 Val
= Builder
.CreateShl(Val
, HighBits
, "bf.shl");
2033 if (Offset
+ HighBits
)
2034 Val
= Builder
.CreateAShr(Val
, Offset
+ HighBits
, "bf.ashr");
2037 Val
= Builder
.CreateLShr(Val
, Offset
, "bf.lshr");
2038 if (static_cast<unsigned>(Offset
) + Info
.Size
< StorageSize
)
2039 Val
= Builder
.CreateAnd(
2040 Val
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
), "bf.clear");
2042 Val
= Builder
.CreateIntCast(Val
, ResLTy
, Info
.IsSigned
, "bf.cast");
2043 EmitScalarRangeCheck(Val
, LV
.getType(), Loc
);
2044 return RValue::get(Val
);
2047 // If this is a reference to a subset of the elements of a vector, create an
2048 // appropriate shufflevector.
2049 RValue
CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV
) {
2050 llvm::Value
*Vec
= Builder
.CreateLoad(LV
.getExtVectorAddress(),
2051 LV
.isVolatileQualified());
2053 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2055 // If the result of the expression is a non-vector type, we must be extracting
2056 // a single element. Just codegen as an extractelement.
2057 const VectorType
*ExprVT
= LV
.getType()->getAs
<VectorType
>();
2059 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2060 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2061 return RValue::get(Builder
.CreateExtractElement(Vec
, Elt
));
2064 // Always use shuffle vector to try to retain the original program structure
2065 unsigned NumResultElts
= ExprVT
->getNumElements();
2067 SmallVector
<int, 4> Mask
;
2068 for (unsigned i
= 0; i
!= NumResultElts
; ++i
)
2069 Mask
.push_back(getAccessedFieldNo(i
, Elts
));
2071 Vec
= Builder
.CreateShuffleVector(Vec
, Mask
);
2072 return RValue::get(Vec
);
2075 /// Generates lvalue for partial ext_vector access.
2076 Address
CodeGenFunction::EmitExtVectorElementLValue(LValue LV
) {
2077 Address VectorAddress
= LV
.getExtVectorAddress();
2078 QualType EQT
= LV
.getType()->castAs
<VectorType
>()->getElementType();
2079 llvm::Type
*VectorElementTy
= CGM
.getTypes().ConvertType(EQT
);
2081 Address CastToPointerElement
=
2082 Builder
.CreateElementBitCast(VectorAddress
, VectorElementTy
,
2083 "conv.ptr.element");
2085 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2086 unsigned ix
= getAccessedFieldNo(0, Elts
);
2088 Address VectorBasePtrPlusIx
=
2089 Builder
.CreateConstInBoundsGEP(CastToPointerElement
, ix
,
2092 return VectorBasePtrPlusIx
;
2095 /// Load of global gamed gegisters are always calls to intrinsics.
2096 RValue
CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV
) {
2097 assert((LV
.getType()->isIntegerType() || LV
.getType()->isPointerType()) &&
2098 "Bad type for register variable");
2099 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2100 cast
<llvm::MetadataAsValue
>(LV
.getGlobalReg())->getMetadata());
2102 // We accept integer and pointer types only
2103 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(LV
.getType());
2104 llvm::Type
*Ty
= OrigTy
;
2105 if (OrigTy
->isPointerTy())
2106 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2107 llvm::Type
*Types
[] = { Ty
};
2109 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::read_register
, Types
);
2110 llvm::Value
*Call
= Builder
.CreateCall(
2111 F
, llvm::MetadataAsValue::get(Ty
->getContext(), RegName
));
2112 if (OrigTy
->isPointerTy())
2113 Call
= Builder
.CreateIntToPtr(Call
, OrigTy
);
2114 return RValue::get(Call
);
2117 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2118 /// lvalue, where both are guaranteed to the have the same type, and that type
2120 void CodeGenFunction::EmitStoreThroughLValue(RValue Src
, LValue Dst
,
2122 if (!Dst
.isSimple()) {
2123 if (Dst
.isVectorElt()) {
2124 // Read/modify/write the vector, inserting the new element.
2125 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getVectorAddress(),
2126 Dst
.isVolatileQualified());
2127 auto *IRStoreTy
= dyn_cast
<llvm::IntegerType
>(Vec
->getType());
2129 auto *IRVecTy
= llvm::FixedVectorType::get(
2130 Builder
.getInt1Ty(), IRStoreTy
->getPrimitiveSizeInBits());
2131 Vec
= Builder
.CreateBitCast(Vec
, IRVecTy
);
2134 Vec
= Builder
.CreateInsertElement(Vec
, Src
.getScalarVal(),
2135 Dst
.getVectorIdx(), "vecins");
2137 // <N x i1> --> <iN>.
2138 Vec
= Builder
.CreateBitCast(Vec
, IRStoreTy
);
2140 Builder
.CreateStore(Vec
, Dst
.getVectorAddress(),
2141 Dst
.isVolatileQualified());
2145 // If this is an update of extended vector elements, insert them as
2147 if (Dst
.isExtVectorElt())
2148 return EmitStoreThroughExtVectorComponentLValue(Src
, Dst
);
2150 if (Dst
.isGlobalReg())
2151 return EmitStoreThroughGlobalRegLValue(Src
, Dst
);
2153 if (Dst
.isMatrixElt()) {
2154 llvm::Value
*Idx
= Dst
.getMatrixIdx();
2155 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2156 const auto *const MatTy
= Dst
.getType()->castAs
<ConstantMatrixType
>();
2157 llvm::MatrixBuilder
MB(Builder
);
2158 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2160 llvm::Instruction
*Load
= Builder
.CreateLoad(Dst
.getMatrixAddress());
2162 Builder
.CreateInsertElement(Load
, Src
.getScalarVal(), Idx
, "matins");
2163 Builder
.CreateStore(Vec
, Dst
.getMatrixAddress(),
2164 Dst
.isVolatileQualified());
2168 assert(Dst
.isBitField() && "Unknown LValue type");
2169 return EmitStoreThroughBitfieldLValue(Src
, Dst
);
2172 // There's special magic for assigning into an ARC-qualified l-value.
2173 if (Qualifiers::ObjCLifetime Lifetime
= Dst
.getQuals().getObjCLifetime()) {
2175 case Qualifiers::OCL_None
:
2176 llvm_unreachable("present but none");
2178 case Qualifiers::OCL_ExplicitNone
:
2182 case Qualifiers::OCL_Strong
:
2184 Src
= RValue::get(EmitARCRetain(Dst
.getType(), Src
.getScalarVal()));
2187 EmitARCStoreStrong(Dst
, Src
.getScalarVal(), /*ignore*/ true);
2190 case Qualifiers::OCL_Weak
:
2192 // Initialize and then skip the primitive store.
2193 EmitARCInitWeak(Dst
.getAddress(*this), Src
.getScalarVal());
2195 EmitARCStoreWeak(Dst
.getAddress(*this), Src
.getScalarVal(),
2199 case Qualifiers::OCL_Autoreleasing
:
2200 Src
= RValue::get(EmitObjCExtendObjectLifetime(Dst
.getType(),
2201 Src
.getScalarVal()));
2202 // fall into the normal path
2207 if (Dst
.isObjCWeak() && !Dst
.isNonGC()) {
2208 // load of a __weak object.
2209 Address LvalueDst
= Dst
.getAddress(*this);
2210 llvm::Value
*src
= Src
.getScalarVal();
2211 CGM
.getObjCRuntime().EmitObjCWeakAssign(*this, src
, LvalueDst
);
2215 if (Dst
.isObjCStrong() && !Dst
.isNonGC()) {
2216 // load of a __strong object.
2217 Address LvalueDst
= Dst
.getAddress(*this);
2218 llvm::Value
*src
= Src
.getScalarVal();
2219 if (Dst
.isObjCIvar()) {
2220 assert(Dst
.getBaseIvarExp() && "BaseIvarExp is NULL");
2221 llvm::Type
*ResultType
= IntPtrTy
;
2222 Address dst
= EmitPointerWithAlignment(Dst
.getBaseIvarExp());
2223 llvm::Value
*RHS
= dst
.getPointer();
2224 RHS
= Builder
.CreatePtrToInt(RHS
, ResultType
, "sub.ptr.rhs.cast");
2226 Builder
.CreatePtrToInt(LvalueDst
.getPointer(), ResultType
,
2227 "sub.ptr.lhs.cast");
2228 llvm::Value
*BytesBetween
= Builder
.CreateSub(LHS
, RHS
, "ivar.offset");
2229 CGM
.getObjCRuntime().EmitObjCIvarAssign(*this, src
, dst
,
2231 } else if (Dst
.isGlobalObjCRef()) {
2232 CGM
.getObjCRuntime().EmitObjCGlobalAssign(*this, src
, LvalueDst
,
2233 Dst
.isThreadLocalRef());
2236 CGM
.getObjCRuntime().EmitObjCStrongCastAssign(*this, src
, LvalueDst
);
2240 assert(Src
.isScalar() && "Can't emit an agg store with this method");
2241 EmitStoreOfScalar(Src
.getScalarVal(), Dst
, isInit
);
2244 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
2245 llvm::Value
**Result
) {
2246 const CGBitFieldInfo
&Info
= Dst
.getBitFieldInfo();
2247 llvm::Type
*ResLTy
= ConvertTypeForMem(Dst
.getType());
2248 Address Ptr
= Dst
.getBitFieldAddress();
2250 // Get the source value, truncated to the width of the bit-field.
2251 llvm::Value
*SrcVal
= Src
.getScalarVal();
2253 // Cast the source to the storage type and shift it into place.
2254 SrcVal
= Builder
.CreateIntCast(SrcVal
, Ptr
.getElementType(),
2255 /*isSigned=*/false);
2256 llvm::Value
*MaskedVal
= SrcVal
;
2258 const bool UseVolatile
=
2259 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&& Dst
.isVolatileQualified() &&
2260 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2261 const unsigned StorageSize
=
2262 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2263 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2264 // See if there are other bits in the bitfield's storage we'll need to load
2265 // and mask together with source before storing.
2266 if (StorageSize
!= Info
.Size
) {
2267 assert(StorageSize
> Info
.Size
&& "Invalid bitfield size.");
2269 Builder
.CreateLoad(Ptr
, Dst
.isVolatileQualified(), "bf.load");
2271 // Mask the source value as needed.
2272 if (!hasBooleanRepresentation(Dst
.getType()))
2273 SrcVal
= Builder
.CreateAnd(
2274 SrcVal
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
),
2278 SrcVal
= Builder
.CreateShl(SrcVal
, Offset
, "bf.shl");
2280 // Mask out the original value.
2281 Val
= Builder
.CreateAnd(
2282 Val
, ~llvm::APInt::getBitsSet(StorageSize
, Offset
, Offset
+ Info
.Size
),
2285 // Or together the unchanged values and the source value.
2286 SrcVal
= Builder
.CreateOr(Val
, SrcVal
, "bf.set");
2288 assert(Offset
== 0);
2289 // According to the AACPS:
2290 // When a volatile bit-field is written, and its container does not overlap
2291 // with any non-bit-field member, its container must be read exactly once
2292 // and written exactly once using the access width appropriate to the type
2293 // of the container. The two accesses are not atomic.
2294 if (Dst
.isVolatileQualified() && isAAPCS(CGM
.getTarget()) &&
2295 CGM
.getCodeGenOpts().ForceAAPCSBitfieldLoad
)
2296 Builder
.CreateLoad(Ptr
, true, "bf.load");
2299 // Write the new value back out.
2300 Builder
.CreateStore(SrcVal
, Ptr
, Dst
.isVolatileQualified());
2302 // Return the new value of the bit-field, if requested.
2304 llvm::Value
*ResultVal
= MaskedVal
;
2306 // Sign extend the value if needed.
2307 if (Info
.IsSigned
) {
2308 assert(Info
.Size
<= StorageSize
);
2309 unsigned HighBits
= StorageSize
- Info
.Size
;
2311 ResultVal
= Builder
.CreateShl(ResultVal
, HighBits
, "bf.result.shl");
2312 ResultVal
= Builder
.CreateAShr(ResultVal
, HighBits
, "bf.result.ashr");
2316 ResultVal
= Builder
.CreateIntCast(ResultVal
, ResLTy
, Info
.IsSigned
,
2318 *Result
= EmitFromMemory(ResultVal
, Dst
.getType());
2322 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src
,
2324 // This access turns into a read/modify/write of the vector. Load the input
2326 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getExtVectorAddress(),
2327 Dst
.isVolatileQualified());
2328 const llvm::Constant
*Elts
= Dst
.getExtVectorElts();
2330 llvm::Value
*SrcVal
= Src
.getScalarVal();
2332 if (const VectorType
*VTy
= Dst
.getType()->getAs
<VectorType
>()) {
2333 unsigned NumSrcElts
= VTy
->getNumElements();
2334 unsigned NumDstElts
=
2335 cast
<llvm::FixedVectorType
>(Vec
->getType())->getNumElements();
2336 if (NumDstElts
== NumSrcElts
) {
2337 // Use shuffle vector is the src and destination are the same number of
2338 // elements and restore the vector mask since it is on the side it will be
2340 SmallVector
<int, 4> Mask(NumDstElts
);
2341 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2342 Mask
[getAccessedFieldNo(i
, Elts
)] = i
;
2344 Vec
= Builder
.CreateShuffleVector(SrcVal
, Mask
);
2345 } else if (NumDstElts
> NumSrcElts
) {
2346 // Extended the source vector to the same length and then shuffle it
2347 // into the destination.
2348 // FIXME: since we're shuffling with undef, can we just use the indices
2349 // into that? This could be simpler.
2350 SmallVector
<int, 4> ExtMask
;
2351 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2352 ExtMask
.push_back(i
);
2353 ExtMask
.resize(NumDstElts
, -1);
2354 llvm::Value
*ExtSrcVal
= Builder
.CreateShuffleVector(SrcVal
, ExtMask
);
2356 SmallVector
<int, 4> Mask
;
2357 for (unsigned i
= 0; i
!= NumDstElts
; ++i
)
2360 // When the vector size is odd and .odd or .hi is used, the last element
2361 // of the Elts constant array will be one past the size of the vector.
2362 // Ignore the last element here, if it is greater than the mask size.
2363 if (getAccessedFieldNo(NumSrcElts
- 1, Elts
) == Mask
.size())
2366 // modify when what gets shuffled in
2367 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2368 Mask
[getAccessedFieldNo(i
, Elts
)] = i
+ NumDstElts
;
2369 Vec
= Builder
.CreateShuffleVector(Vec
, ExtSrcVal
, Mask
);
2371 // We should never shorten the vector
2372 llvm_unreachable("unexpected shorten vector length");
2375 // If the Src is a scalar (not a vector) it must be updating one element.
2376 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2377 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2378 Vec
= Builder
.CreateInsertElement(Vec
, SrcVal
, Elt
);
2381 Builder
.CreateStore(Vec
, Dst
.getExtVectorAddress(),
2382 Dst
.isVolatileQualified());
2385 /// Store of global named registers are always calls to intrinsics.
2386 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
) {
2387 assert((Dst
.getType()->isIntegerType() || Dst
.getType()->isPointerType()) &&
2388 "Bad type for register variable");
2389 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2390 cast
<llvm::MetadataAsValue
>(Dst
.getGlobalReg())->getMetadata());
2391 assert(RegName
&& "Register LValue is not metadata");
2393 // We accept integer and pointer types only
2394 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(Dst
.getType());
2395 llvm::Type
*Ty
= OrigTy
;
2396 if (OrigTy
->isPointerTy())
2397 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2398 llvm::Type
*Types
[] = { Ty
};
2400 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
2401 llvm::Value
*Value
= Src
.getScalarVal();
2402 if (OrigTy
->isPointerTy())
2403 Value
= Builder
.CreatePtrToInt(Value
, Ty
);
2405 F
, {llvm::MetadataAsValue::get(Ty
->getContext(), RegName
), Value
});
2408 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2409 // generating write-barries API. It is currently a global, ivar,
2411 static void setObjCGCLValueClass(const ASTContext
&Ctx
, const Expr
*E
,
2413 bool IsMemberAccess
=false) {
2414 if (Ctx
.getLangOpts().getGC() == LangOptions::NonGC
)
2417 if (isa
<ObjCIvarRefExpr
>(E
)) {
2418 QualType ExpTy
= E
->getType();
2419 if (IsMemberAccess
&& ExpTy
->isPointerType()) {
2420 // If ivar is a structure pointer, assigning to field of
2421 // this struct follows gcc's behavior and makes it a non-ivar
2422 // writer-barrier conservatively.
2423 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2424 if (ExpTy
->isRecordType()) {
2425 LV
.setObjCIvar(false);
2429 LV
.setObjCIvar(true);
2430 auto *Exp
= cast
<ObjCIvarRefExpr
>(const_cast<Expr
*>(E
));
2431 LV
.setBaseIvarExp(Exp
->getBase());
2432 LV
.setObjCArray(E
->getType()->isArrayType());
2436 if (const auto *Exp
= dyn_cast
<DeclRefExpr
>(E
)) {
2437 if (const auto *VD
= dyn_cast
<VarDecl
>(Exp
->getDecl())) {
2438 if (VD
->hasGlobalStorage()) {
2439 LV
.setGlobalObjCRef(true);
2440 LV
.setThreadLocalRef(VD
->getTLSKind() != VarDecl::TLS_None
);
2443 LV
.setObjCArray(E
->getType()->isArrayType());
2447 if (const auto *Exp
= dyn_cast
<UnaryOperator
>(E
)) {
2448 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2452 if (const auto *Exp
= dyn_cast
<ParenExpr
>(E
)) {
2453 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2454 if (LV
.isObjCIvar()) {
2455 // If cast is to a structure pointer, follow gcc's behavior and make it
2456 // a non-ivar write-barrier.
2457 QualType ExpTy
= E
->getType();
2458 if (ExpTy
->isPointerType())
2459 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2460 if (ExpTy
->isRecordType())
2461 LV
.setObjCIvar(false);
2466 if (const auto *Exp
= dyn_cast
<GenericSelectionExpr
>(E
)) {
2467 setObjCGCLValueClass(Ctx
, Exp
->getResultExpr(), LV
);
2471 if (const auto *Exp
= dyn_cast
<ImplicitCastExpr
>(E
)) {
2472 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2476 if (const auto *Exp
= dyn_cast
<CStyleCastExpr
>(E
)) {
2477 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2481 if (const auto *Exp
= dyn_cast
<ObjCBridgedCastExpr
>(E
)) {
2482 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2486 if (const auto *Exp
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
2487 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
);
2488 if (LV
.isObjCIvar() && !LV
.isObjCArray())
2489 // Using array syntax to assigning to what an ivar points to is not
2490 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2491 LV
.setObjCIvar(false);
2492 else if (LV
.isGlobalObjCRef() && !LV
.isObjCArray())
2493 // Using array syntax to assigning to what global points to is not
2494 // same as assigning to the global itself. {id *G;} G[i] = 0;
2495 LV
.setGlobalObjCRef(false);
2499 if (const auto *Exp
= dyn_cast
<MemberExpr
>(E
)) {
2500 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
, true);
2501 // We don't know if member is an 'ivar', but this flag is looked at
2502 // only in the context of LV.isObjCIvar().
2503 LV
.setObjCArray(E
->getType()->isArrayType());
2508 static llvm::Value
*
2509 EmitBitCastOfLValueToProperType(CodeGenFunction
&CGF
,
2510 llvm::Value
*V
, llvm::Type
*IRType
,
2511 StringRef Name
= StringRef()) {
2512 unsigned AS
= cast
<llvm::PointerType
>(V
->getType())->getAddressSpace();
2513 return CGF
.Builder
.CreateBitCast(V
, IRType
->getPointerTo(AS
), Name
);
2516 static LValue
EmitThreadPrivateVarDeclLValue(
2517 CodeGenFunction
&CGF
, const VarDecl
*VD
, QualType T
, Address Addr
,
2518 llvm::Type
*RealVarTy
, SourceLocation Loc
) {
2519 if (CGF
.CGM
.getLangOpts().OpenMPIRBuilder
)
2520 Addr
= CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2521 CGF
, VD
, Addr
, Loc
);
2524 CGF
.CGM
.getOpenMPRuntime().getAddrOfThreadPrivate(CGF
, VD
, Addr
, Loc
);
2526 Addr
= CGF
.Builder
.CreateElementBitCast(Addr
, RealVarTy
);
2527 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2530 static Address
emitDeclTargetVarDeclLValue(CodeGenFunction
&CGF
,
2531 const VarDecl
*VD
, QualType T
) {
2532 llvm::Optional
<OMPDeclareTargetDeclAttr::MapTypeTy
> Res
=
2533 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD
);
2534 // Return an invalid address if variable is MT_To and unified
2535 // memory is not enabled. For all other cases: MT_Link and
2536 // MT_To with unified memory, return a valid address.
2537 if (!Res
|| (*Res
== OMPDeclareTargetDeclAttr::MT_To
&&
2538 !CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2539 return Address::invalid();
2540 assert(((*Res
== OMPDeclareTargetDeclAttr::MT_Link
) ||
2541 (*Res
== OMPDeclareTargetDeclAttr::MT_To
&&
2542 CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2543 "Expected link clause OR to clause with unified memory enabled.");
2544 QualType PtrTy
= CGF
.getContext().getPointerType(VD
->getType());
2545 Address Addr
= CGF
.CGM
.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD
);
2546 return CGF
.EmitLoadOfPointer(Addr
, PtrTy
->castAs
<PointerType
>());
2550 CodeGenFunction::EmitLoadOfReference(LValue RefLVal
,
2551 LValueBaseInfo
*PointeeBaseInfo
,
2552 TBAAAccessInfo
*PointeeTBAAInfo
) {
2553 llvm::LoadInst
*Load
=
2554 Builder
.CreateLoad(RefLVal
.getAddress(*this), RefLVal
.isVolatile());
2555 CGM
.DecorateInstructionWithTBAA(Load
, RefLVal
.getTBAAInfo());
2557 QualType PointeeType
= RefLVal
.getType()->getPointeeType();
2558 CharUnits Align
= CGM
.getNaturalTypeAlignment(
2559 PointeeType
, PointeeBaseInfo
, PointeeTBAAInfo
,
2560 /* forPointeeType= */ true);
2561 return Address(Load
, ConvertTypeForMem(PointeeType
), Align
);
2564 LValue
CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal
) {
2565 LValueBaseInfo PointeeBaseInfo
;
2566 TBAAAccessInfo PointeeTBAAInfo
;
2567 Address PointeeAddr
= EmitLoadOfReference(RefLVal
, &PointeeBaseInfo
,
2569 return MakeAddrLValue(PointeeAddr
, RefLVal
.getType()->getPointeeType(),
2570 PointeeBaseInfo
, PointeeTBAAInfo
);
2573 Address
CodeGenFunction::EmitLoadOfPointer(Address Ptr
,
2574 const PointerType
*PtrTy
,
2575 LValueBaseInfo
*BaseInfo
,
2576 TBAAAccessInfo
*TBAAInfo
) {
2577 llvm::Value
*Addr
= Builder
.CreateLoad(Ptr
);
2578 return Address(Addr
, ConvertTypeForMem(PtrTy
->getPointeeType()),
2579 CGM
.getNaturalTypeAlignment(PtrTy
->getPointeeType(), BaseInfo
,
2581 /*forPointeeType=*/true));
2584 LValue
CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr
,
2585 const PointerType
*PtrTy
) {
2586 LValueBaseInfo BaseInfo
;
2587 TBAAAccessInfo TBAAInfo
;
2588 Address Addr
= EmitLoadOfPointer(PtrAddr
, PtrTy
, &BaseInfo
, &TBAAInfo
);
2589 return MakeAddrLValue(Addr
, PtrTy
->getPointeeType(), BaseInfo
, TBAAInfo
);
2592 static LValue
EmitGlobalVarDeclLValue(CodeGenFunction
&CGF
,
2593 const Expr
*E
, const VarDecl
*VD
) {
2594 QualType T
= E
->getType();
2596 // If it's thread_local, emit a call to its wrapper function instead.
2597 if (VD
->getTLSKind() == VarDecl::TLS_Dynamic
&&
2598 CGF
.CGM
.getCXXABI().usesThreadWrapperFunction(VD
))
2599 return CGF
.CGM
.getCXXABI().EmitThreadLocalVarDeclLValue(CGF
, VD
, T
);
2600 // Check if the variable is marked as declare target with link clause in
2602 if (CGF
.getLangOpts().OpenMPIsDevice
) {
2603 Address Addr
= emitDeclTargetVarDeclLValue(CGF
, VD
, T
);
2605 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2608 llvm::Value
*V
= CGF
.CGM
.GetAddrOfGlobalVar(VD
);
2610 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2611 V
= CGF
.Builder
.CreateThreadLocalAddress(V
);
2613 llvm::Type
*RealVarTy
= CGF
.getTypes().ConvertTypeForMem(VD
->getType());
2614 V
= EmitBitCastOfLValueToProperType(CGF
, V
, RealVarTy
);
2615 CharUnits Alignment
= CGF
.getContext().getDeclAlign(VD
);
2616 Address
Addr(V
, RealVarTy
, Alignment
);
2617 // Emit reference to the private copy of the variable if it is an OpenMP
2618 // threadprivate variable.
2619 if (CGF
.getLangOpts().OpenMP
&& !CGF
.getLangOpts().OpenMPSimd
&&
2620 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2621 return EmitThreadPrivateVarDeclLValue(CGF
, VD
, T
, Addr
, RealVarTy
,
2624 LValue LV
= VD
->getType()->isReferenceType() ?
2625 CGF
.EmitLoadOfReferenceLValue(Addr
, VD
->getType(),
2626 AlignmentSource::Decl
) :
2627 CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2628 setObjCGCLValueClass(CGF
.getContext(), E
, LV
);
2632 static llvm::Constant
*EmitFunctionDeclPointer(CodeGenModule
&CGM
,
2634 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2635 if (FD
->hasAttr
<WeakRefAttr
>()) {
2636 ConstantAddress aliasee
= CGM
.GetWeakRefReference(FD
);
2637 return aliasee
.getPointer();
2640 llvm::Constant
*V
= CGM
.GetAddrOfFunction(GD
);
2641 if (!FD
->hasPrototype()) {
2642 if (const FunctionProtoType
*Proto
=
2643 FD
->getType()->getAs
<FunctionProtoType
>()) {
2644 // Ugly case: for a K&R-style definition, the type of the definition
2645 // isn't the same as the type of a use. Correct for this with a
2647 QualType NoProtoType
=
2648 CGM
.getContext().getFunctionNoProtoType(Proto
->getReturnType());
2649 NoProtoType
= CGM
.getContext().getPointerType(NoProtoType
);
2650 V
= llvm::ConstantExpr::getBitCast(V
,
2651 CGM
.getTypes().ConvertType(NoProtoType
));
2657 static LValue
EmitFunctionDeclLValue(CodeGenFunction
&CGF
, const Expr
*E
,
2659 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2660 llvm::Value
*V
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
2661 CharUnits Alignment
= CGF
.getContext().getDeclAlign(FD
);
2662 return CGF
.MakeAddrLValue(V
, E
->getType(), Alignment
,
2663 AlignmentSource::Decl
);
2666 static LValue
EmitCapturedFieldLValue(CodeGenFunction
&CGF
, const FieldDecl
*FD
,
2667 llvm::Value
*ThisValue
) {
2668 QualType TagType
= CGF
.getContext().getTagDeclType(FD
->getParent());
2669 LValue LV
= CGF
.MakeNaturalAlignAddrLValue(ThisValue
, TagType
);
2670 return CGF
.EmitLValueForField(LV
, FD
);
2673 /// Named Registers are named metadata pointing to the register name
2674 /// which will be read from/written to as an argument to the intrinsic
2675 /// @llvm.read/write_register.
2676 /// So far, only the name is being passed down, but other options such as
2677 /// register type, allocation type or even optimization options could be
2678 /// passed down via the metadata node.
2679 static LValue
EmitGlobalNamedRegister(const VarDecl
*VD
, CodeGenModule
&CGM
) {
2680 SmallString
<64> Name("llvm.named.register.");
2681 AsmLabelAttr
*Asm
= VD
->getAttr
<AsmLabelAttr
>();
2682 assert(Asm
->getLabel().size() < 64-Name
.size() &&
2683 "Register name too big");
2684 Name
.append(Asm
->getLabel());
2685 llvm::NamedMDNode
*M
=
2686 CGM
.getModule().getOrInsertNamedMetadata(Name
);
2687 if (M
->getNumOperands() == 0) {
2688 llvm::MDString
*Str
= llvm::MDString::get(CGM
.getLLVMContext(),
2690 llvm::Metadata
*Ops
[] = {Str
};
2691 M
->addOperand(llvm::MDNode::get(CGM
.getLLVMContext(), Ops
));
2694 CharUnits Alignment
= CGM
.getContext().getDeclAlign(VD
);
2697 llvm::MetadataAsValue::get(CGM
.getLLVMContext(), M
->getOperand(0));
2698 return LValue::MakeGlobalReg(Ptr
, Alignment
, VD
->getType());
2701 /// Determine whether we can emit a reference to \p VD from the current
2702 /// context, despite not necessarily having seen an odr-use of the variable in
2704 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction
&CGF
,
2705 const DeclRefExpr
*E
,
2708 // For a variable declared in an enclosing scope, do not emit a spurious
2709 // reference even if we have a capture, as that will emit an unwarranted
2710 // reference to our capture state, and will likely generate worse code than
2711 // emitting a local copy.
2712 if (E
->refersToEnclosingVariableOrCapture())
2715 // For a local declaration declared in this function, we can always reference
2716 // it even if we don't have an odr-use.
2717 if (VD
->hasLocalStorage()) {
2718 return VD
->getDeclContext() ==
2719 dyn_cast_or_null
<DeclContext
>(CGF
.CurCodeDecl
);
2722 // For a global declaration, we can emit a reference to it if we know
2723 // for sure that we are able to emit a definition of it.
2724 VD
= VD
->getDefinition(CGF
.getContext());
2728 // Don't emit a spurious reference if it might be to a variable that only
2729 // exists on a different device / target.
2730 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2731 // cross-target reference.
2732 if (CGF
.getLangOpts().OpenMP
|| CGF
.getLangOpts().CUDA
||
2733 CGF
.getLangOpts().OpenCL
) {
2737 // We can emit a spurious reference only if the linkage implies that we'll
2738 // be emitting a non-interposable symbol that will be retained until link
2740 switch (CGF
.CGM
.getLLVMLinkageVarDefinition(VD
, IsConstant
)) {
2741 case llvm::GlobalValue::ExternalLinkage
:
2742 case llvm::GlobalValue::LinkOnceODRLinkage
:
2743 case llvm::GlobalValue::WeakODRLinkage
:
2744 case llvm::GlobalValue::InternalLinkage
:
2745 case llvm::GlobalValue::PrivateLinkage
:
2752 LValue
CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr
*E
) {
2753 const NamedDecl
*ND
= E
->getDecl();
2754 QualType T
= E
->getType();
2756 assert(E
->isNonOdrUse() != NOUR_Unevaluated
&&
2757 "should not emit an unevaluated operand");
2759 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2760 // Global Named registers access via intrinsics only
2761 if (VD
->getStorageClass() == SC_Register
&&
2762 VD
->hasAttr
<AsmLabelAttr
>() && !VD
->isLocalVarDecl())
2763 return EmitGlobalNamedRegister(VD
, CGM
);
2765 // If this DeclRefExpr does not constitute an odr-use of the variable,
2766 // we're not permitted to emit a reference to it in general, and it might
2767 // not be captured if capture would be necessary for a use. Emit the
2768 // constant value directly instead.
2769 if (E
->isNonOdrUse() == NOUR_Constant
&&
2770 (VD
->getType()->isReferenceType() ||
2771 !canEmitSpuriousReferenceToVariable(*this, E
, VD
, true))) {
2772 VD
->getAnyInitializer(VD
);
2773 llvm::Constant
*Val
= ConstantEmitter(*this).emitAbstract(
2774 E
->getLocation(), *VD
->evaluateValue(), VD
->getType());
2775 assert(Val
&& "failed to emit constant expression");
2777 Address Addr
= Address::invalid();
2778 if (!VD
->getType()->isReferenceType()) {
2779 // Spill the constant value to a global.
2780 Addr
= CGM
.createUnnamedGlobalFrom(*VD
, Val
,
2781 getContext().getDeclAlign(VD
));
2782 llvm::Type
*VarTy
= getTypes().ConvertTypeForMem(VD
->getType());
2783 auto *PTy
= llvm::PointerType::get(
2784 VarTy
, getContext().getTargetAddressSpace(VD
->getType()));
2785 Addr
= Builder
.CreatePointerBitCastOrAddrSpaceCast(Addr
, PTy
, VarTy
);
2787 // Should we be using the alignment of the constant pointer we emitted?
2788 CharUnits Alignment
=
2789 CGM
.getNaturalTypeAlignment(E
->getType(),
2790 /* BaseInfo= */ nullptr,
2791 /* TBAAInfo= */ nullptr,
2792 /* forPointeeType= */ true);
2793 Addr
= Address(Val
, ConvertTypeForMem(E
->getType()), Alignment
);
2795 return MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2798 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2800 // Check for captured variables.
2801 if (E
->refersToEnclosingVariableOrCapture()) {
2802 VD
= VD
->getCanonicalDecl();
2803 if (auto *FD
= LambdaCaptureFields
.lookup(VD
))
2804 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2805 if (CapturedStmtInfo
) {
2806 auto I
= LocalDeclMap
.find(VD
);
2807 if (I
!= LocalDeclMap
.end()) {
2809 if (VD
->getType()->isReferenceType())
2810 CapLVal
= EmitLoadOfReferenceLValue(I
->second
, VD
->getType(),
2811 AlignmentSource::Decl
);
2813 CapLVal
= MakeAddrLValue(I
->second
, T
);
2814 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2816 if (getLangOpts().OpenMP
&&
2817 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2818 CapLVal
.setNontemporal(/*Value=*/true);
2822 EmitCapturedFieldLValue(*this, CapturedStmtInfo
->lookup(VD
),
2823 CapturedStmtInfo
->getContextValue());
2824 Address LValueAddress
= CapLVal
.getAddress(*this);
2825 CapLVal
= MakeAddrLValue(
2826 Address(LValueAddress
.getPointer(), LValueAddress
.getElementType(),
2827 getContext().getDeclAlign(VD
)),
2828 CapLVal
.getType(), LValueBaseInfo(AlignmentSource::Decl
),
2829 CapLVal
.getTBAAInfo());
2830 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2832 if (getLangOpts().OpenMP
&&
2833 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2834 CapLVal
.setNontemporal(/*Value=*/true);
2838 assert(isa
<BlockDecl
>(CurCodeDecl
));
2839 Address addr
= GetAddrOfBlockDecl(VD
);
2840 return MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2844 // FIXME: We should be able to assert this for FunctionDecls as well!
2845 // FIXME: We should be able to assert this for all DeclRefExprs, not just
2846 // those with a valid source location.
2847 assert((ND
->isUsed(false) || !isa
<VarDecl
>(ND
) || E
->isNonOdrUse() ||
2848 !E
->getLocation().isValid()) &&
2849 "Should not use decl without marking it used!");
2851 if (ND
->hasAttr
<WeakRefAttr
>()) {
2852 const auto *VD
= cast
<ValueDecl
>(ND
);
2853 ConstantAddress Aliasee
= CGM
.GetWeakRefReference(VD
);
2854 return MakeAddrLValue(Aliasee
, T
, AlignmentSource::Decl
);
2857 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2858 // Check if this is a global variable.
2859 if (VD
->hasLinkage() || VD
->isStaticDataMember())
2860 return EmitGlobalVarDeclLValue(*this, E
, VD
);
2862 Address addr
= Address::invalid();
2864 // The variable should generally be present in the local decl map.
2865 auto iter
= LocalDeclMap
.find(VD
);
2866 if (iter
!= LocalDeclMap
.end()) {
2867 addr
= iter
->second
;
2869 // Otherwise, it might be static local we haven't emitted yet for
2870 // some reason; most likely, because it's in an outer function.
2871 } else if (VD
->isStaticLocal()) {
2872 llvm::Constant
*var
= CGM
.getOrCreateStaticVarDecl(
2873 *VD
, CGM
.getLLVMLinkageVarDefinition(VD
, /*IsConstant=*/false));
2875 var
, ConvertTypeForMem(VD
->getType()), getContext().getDeclAlign(VD
));
2877 // No other cases for now.
2879 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2882 // Handle threadlocal function locals.
2883 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2885 addr
.withPointer(Builder
.CreateThreadLocalAddress(addr
.getPointer()));
2887 // Check for OpenMP threadprivate variables.
2888 if (getLangOpts().OpenMP
&& !getLangOpts().OpenMPSimd
&&
2889 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2890 return EmitThreadPrivateVarDeclLValue(
2891 *this, VD
, T
, addr
, getTypes().ConvertTypeForMem(VD
->getType()),
2895 // Drill into block byref variables.
2896 bool isBlockByref
= VD
->isEscapingByref();
2898 addr
= emitBlockByrefAddress(addr
, VD
);
2901 // Drill into reference types.
2902 LValue LV
= VD
->getType()->isReferenceType() ?
2903 EmitLoadOfReferenceLValue(addr
, VD
->getType(), AlignmentSource::Decl
) :
2904 MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2906 bool isLocalStorage
= VD
->hasLocalStorage();
2908 bool NonGCable
= isLocalStorage
&&
2909 !VD
->getType()->isReferenceType() &&
2912 LV
.getQuals().removeObjCGCAttr();
2916 bool isImpreciseLifetime
=
2917 (isLocalStorage
&& !VD
->hasAttr
<ObjCPreciseLifetimeAttr
>());
2918 if (isImpreciseLifetime
)
2919 LV
.setARCPreciseLifetime(ARCImpreciseLifetime
);
2920 setObjCGCLValueClass(getContext(), E
, LV
);
2924 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
)) {
2925 LValue LV
= EmitFunctionDeclLValue(*this, E
, FD
);
2927 // Emit debuginfo for the function declaration if the target wants to.
2928 if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
2929 if (CGDebugInfo
*DI
= CGM
.getModuleDebugInfo()) {
2931 cast
<llvm::Function
>(LV
.getPointer(*this)->stripPointerCasts());
2932 if (!Fn
->getSubprogram())
2933 DI
->EmitFunctionDecl(FD
, FD
->getLocation(), T
, Fn
);
2940 // FIXME: While we're emitting a binding from an enclosing scope, all other
2941 // DeclRefExprs we see should be implicitly treated as if they also refer to
2942 // an enclosing scope.
2943 if (const auto *BD
= dyn_cast
<BindingDecl
>(ND
)) {
2944 if (E
->refersToEnclosingVariableOrCapture()) {
2945 auto *FD
= LambdaCaptureFields
.lookup(BD
);
2946 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2948 return EmitLValue(BD
->getBinding());
2951 // We can form DeclRefExprs naming GUID declarations when reconstituting
2952 // non-type template parameters into expressions.
2953 if (const auto *GD
= dyn_cast
<MSGuidDecl
>(ND
))
2954 return MakeAddrLValue(CGM
.GetAddrOfMSGuidDecl(GD
), T
,
2955 AlignmentSource::Decl
);
2957 if (const auto *TPO
= dyn_cast
<TemplateParamObjectDecl
>(ND
))
2958 return MakeAddrLValue(CGM
.GetAddrOfTemplateParamObject(TPO
), T
,
2959 AlignmentSource::Decl
);
2961 llvm_unreachable("Unhandled DeclRefExpr");
2964 LValue
CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator
*E
) {
2965 // __extension__ doesn't affect lvalue-ness.
2966 if (E
->getOpcode() == UO_Extension
)
2967 return EmitLValue(E
->getSubExpr());
2969 QualType ExprTy
= getContext().getCanonicalType(E
->getSubExpr()->getType());
2970 switch (E
->getOpcode()) {
2971 default: llvm_unreachable("Unknown unary operator lvalue!");
2973 QualType T
= E
->getSubExpr()->getType()->getPointeeType();
2974 assert(!T
.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2976 LValueBaseInfo BaseInfo
;
2977 TBAAAccessInfo TBAAInfo
;
2978 Address Addr
= EmitPointerWithAlignment(E
->getSubExpr(), &BaseInfo
,
2980 LValue LV
= MakeAddrLValue(Addr
, T
, BaseInfo
, TBAAInfo
);
2981 LV
.getQuals().setAddressSpace(ExprTy
.getAddressSpace());
2983 // We should not generate __weak write barrier on indirect reference
2984 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2985 // But, we continue to generate __strong write barrier on indirect write
2986 // into a pointer to object.
2987 if (getLangOpts().ObjC
&&
2988 getLangOpts().getGC() != LangOptions::NonGC
&&
2990 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
2995 LValue LV
= EmitLValue(E
->getSubExpr());
2996 assert(LV
.isSimple() && "real/imag on non-ordinary l-value");
2998 // __real is valid on scalars. This is a faster way of testing that.
2999 // __imag can only produce an rvalue on scalars.
3000 if (E
->getOpcode() == UO_Real
&&
3001 !LV
.getAddress(*this).getElementType()->isStructTy()) {
3002 assert(E
->getSubExpr()->getType()->isArithmeticType());
3006 QualType T
= ExprTy
->castAs
<ComplexType
>()->getElementType();
3009 (E
->getOpcode() == UO_Real
3010 ? emitAddrOfRealComponent(LV
.getAddress(*this), LV
.getType())
3011 : emitAddrOfImagComponent(LV
.getAddress(*this), LV
.getType()));
3012 LValue ElemLV
= MakeAddrLValue(Component
, T
, LV
.getBaseInfo(),
3013 CGM
.getTBAAInfoForSubobject(LV
, T
));
3014 ElemLV
.getQuals().addQualifiers(LV
.getQuals());
3019 LValue LV
= EmitLValue(E
->getSubExpr());
3020 bool isInc
= E
->getOpcode() == UO_PreInc
;
3022 if (E
->getType()->isAnyComplexType())
3023 EmitComplexPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3025 EmitScalarPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3031 LValue
CodeGenFunction::EmitStringLiteralLValue(const StringLiteral
*E
) {
3032 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromLiteral(E
),
3033 E
->getType(), AlignmentSource::Decl
);
3036 LValue
CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
) {
3037 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromObjCEncode(E
),
3038 E
->getType(), AlignmentSource::Decl
);
3041 LValue
CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr
*E
) {
3042 auto SL
= E
->getFunctionName();
3043 assert(SL
!= nullptr && "No StringLiteral name in PredefinedExpr");
3044 StringRef FnName
= CurFn
->getName();
3045 if (FnName
.startswith("\01"))
3046 FnName
= FnName
.substr(1);
3047 StringRef NameItems
[] = {
3048 PredefinedExpr::getIdentKindName(E
->getIdentKind()), FnName
};
3049 std::string GVName
= llvm::join(NameItems
, NameItems
+ 2, ".");
3050 if (auto *BD
= dyn_cast_or_null
<BlockDecl
>(CurCodeDecl
)) {
3051 std::string Name
= std::string(SL
->getString());
3052 if (!Name
.empty()) {
3053 unsigned Discriminator
=
3054 CGM
.getCXXABI().getMangleContext().getBlockId(BD
, true);
3056 Name
+= "_" + Twine(Discriminator
+ 1).str();
3057 auto C
= CGM
.GetAddrOfConstantCString(Name
, GVName
.c_str());
3058 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3061 CGM
.GetAddrOfConstantCString(std::string(FnName
), GVName
.c_str());
3062 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3065 auto C
= CGM
.GetAddrOfConstantStringFromLiteral(SL
, GVName
);
3066 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3069 /// Emit a type description suitable for use by a runtime sanitizer library. The
3070 /// format of a type descriptor is
3073 /// { i16 TypeKind, i16 TypeInfo }
3076 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3077 /// integer, 1 for a floating point value, and -1 for anything else.
3078 llvm::Constant
*CodeGenFunction::EmitCheckTypeDescriptor(QualType T
) {
3079 // Only emit each type's descriptor once.
3080 if (llvm::Constant
*C
= CGM
.getTypeDescriptorFromMap(T
))
3083 uint16_t TypeKind
= -1;
3084 uint16_t TypeInfo
= 0;
3086 if (T
->isIntegerType()) {
3088 TypeInfo
= (llvm::Log2_32(getContext().getTypeSize(T
)) << 1) |
3089 (T
->isSignedIntegerType() ? 1 : 0);
3090 } else if (T
->isFloatingType()) {
3092 TypeInfo
= getContext().getTypeSize(T
);
3095 // Format the type name as if for a diagnostic, including quotes and
3096 // optionally an 'aka'.
3097 SmallString
<32> Buffer
;
3098 CGM
.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype
,
3099 (intptr_t)T
.getAsOpaquePtr(),
3100 StringRef(), StringRef(), None
, Buffer
,
3103 llvm::Constant
*Components
[] = {
3104 Builder
.getInt16(TypeKind
), Builder
.getInt16(TypeInfo
),
3105 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer
)
3107 llvm::Constant
*Descriptor
= llvm::ConstantStruct::getAnon(Components
);
3109 auto *GV
= new llvm::GlobalVariable(
3110 CGM
.getModule(), Descriptor
->getType(),
3111 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
, Descriptor
);
3112 GV
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3113 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(GV
);
3115 // Remember the descriptor for this type.
3116 CGM
.setTypeDescriptorInMap(T
, GV
);
3121 llvm::Value
*CodeGenFunction::EmitCheckValue(llvm::Value
*V
) {
3122 llvm::Type
*TargetTy
= IntPtrTy
;
3124 if (V
->getType() == TargetTy
)
3127 // Floating-point types which fit into intptr_t are bitcast to integers
3128 // and then passed directly (after zero-extension, if necessary).
3129 if (V
->getType()->isFloatingPointTy()) {
3130 unsigned Bits
= V
->getType()->getPrimitiveSizeInBits().getFixedSize();
3131 if (Bits
<= TargetTy
->getIntegerBitWidth())
3132 V
= Builder
.CreateBitCast(V
, llvm::Type::getIntNTy(getLLVMContext(),
3136 // Integers which fit in intptr_t are zero-extended and passed directly.
3137 if (V
->getType()->isIntegerTy() &&
3138 V
->getType()->getIntegerBitWidth() <= TargetTy
->getIntegerBitWidth())
3139 return Builder
.CreateZExt(V
, TargetTy
);
3141 // Pointers are passed directly, everything else is passed by address.
3142 if (!V
->getType()->isPointerTy()) {
3143 Address Ptr
= CreateDefaultAlignTempAlloca(V
->getType());
3144 Builder
.CreateStore(V
, Ptr
);
3145 V
= Ptr
.getPointer();
3147 return Builder
.CreatePtrToInt(V
, TargetTy
);
3150 /// Emit a representation of a SourceLocation for passing to a handler
3151 /// in a sanitizer runtime library. The format for this data is:
3153 /// struct SourceLocation {
3154 /// const char *Filename;
3155 /// int32_t Line, Column;
3158 /// For an invalid SourceLocation, the Filename pointer is null.
3159 llvm::Constant
*CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc
) {
3160 llvm::Constant
*Filename
;
3163 PresumedLoc PLoc
= getContext().getSourceManager().getPresumedLoc(Loc
);
3164 if (PLoc
.isValid()) {
3165 StringRef FilenameString
= PLoc
.getFilename();
3167 int PathComponentsToStrip
=
3168 CGM
.getCodeGenOpts().EmitCheckPathComponentsToStrip
;
3169 if (PathComponentsToStrip
< 0) {
3170 assert(PathComponentsToStrip
!= INT_MIN
);
3171 int PathComponentsToKeep
= -PathComponentsToStrip
;
3172 auto I
= llvm::sys::path::rbegin(FilenameString
);
3173 auto E
= llvm::sys::path::rend(FilenameString
);
3174 while (I
!= E
&& --PathComponentsToKeep
)
3177 FilenameString
= FilenameString
.substr(I
- E
);
3178 } else if (PathComponentsToStrip
> 0) {
3179 auto I
= llvm::sys::path::begin(FilenameString
);
3180 auto E
= llvm::sys::path::end(FilenameString
);
3181 while (I
!= E
&& PathComponentsToStrip
--)
3186 FilenameString
.substr(I
- llvm::sys::path::begin(FilenameString
));
3188 FilenameString
= llvm::sys::path::filename(FilenameString
);
3192 CGM
.GetAddrOfConstantCString(std::string(FilenameString
), ".src");
3193 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(
3194 cast
<llvm::GlobalVariable
>(FilenameGV
.getPointer()));
3195 Filename
= FilenameGV
.getPointer();
3196 Line
= PLoc
.getLine();
3197 Column
= PLoc
.getColumn();
3199 Filename
= llvm::Constant::getNullValue(Int8PtrTy
);
3203 llvm::Constant
*Data
[] = {Filename
, Builder
.getInt32(Line
),
3204 Builder
.getInt32(Column
)};
3206 return llvm::ConstantStruct::getAnon(Data
);
3210 /// Specify under what conditions this check can be recovered
3211 enum class CheckRecoverableKind
{
3212 /// Always terminate program execution if this check fails.
3214 /// Check supports recovering, runtime has both fatal (noreturn) and
3215 /// non-fatal handlers for this check.
3217 /// Runtime conditionally aborts, always need to support recovery.
3222 static CheckRecoverableKind
getRecoverableKind(SanitizerMask Kind
) {
3223 assert(Kind
.countPopulation() == 1);
3224 if (Kind
== SanitizerKind::Function
|| Kind
== SanitizerKind::Vptr
)
3225 return CheckRecoverableKind::AlwaysRecoverable
;
3226 else if (Kind
== SanitizerKind::Return
|| Kind
== SanitizerKind::Unreachable
)
3227 return CheckRecoverableKind::Unrecoverable
;
3229 return CheckRecoverableKind::Recoverable
;
3233 struct SanitizerHandlerInfo
{
3234 char const *const Name
;
3239 const SanitizerHandlerInfo SanitizerHandlers
[] = {
3240 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3241 LIST_SANITIZER_CHECKS
3242 #undef SANITIZER_CHECK
3245 static void emitCheckHandlerCall(CodeGenFunction
&CGF
,
3246 llvm::FunctionType
*FnType
,
3247 ArrayRef
<llvm::Value
*> FnArgs
,
3248 SanitizerHandler CheckHandler
,
3249 CheckRecoverableKind RecoverKind
, bool IsFatal
,
3250 llvm::BasicBlock
*ContBB
) {
3251 assert(IsFatal
|| RecoverKind
!= CheckRecoverableKind::Unrecoverable
);
3252 Optional
<ApplyDebugLocation
> DL
;
3253 if (!CGF
.Builder
.getCurrentDebugLocation()) {
3254 // Ensure that the call has at least an artificial debug location.
3255 DL
.emplace(CGF
, SourceLocation());
3257 bool NeedsAbortSuffix
=
3258 IsFatal
&& RecoverKind
!= CheckRecoverableKind::Unrecoverable
;
3259 bool MinimalRuntime
= CGF
.CGM
.getCodeGenOpts().SanitizeMinimalRuntime
;
3260 const SanitizerHandlerInfo
&CheckInfo
= SanitizerHandlers
[CheckHandler
];
3261 const StringRef CheckName
= CheckInfo
.Name
;
3262 std::string FnName
= "__ubsan_handle_" + CheckName
.str();
3263 if (CheckInfo
.Version
&& !MinimalRuntime
)
3264 FnName
+= "_v" + llvm::utostr(CheckInfo
.Version
);
3266 FnName
+= "_minimal";
3267 if (NeedsAbortSuffix
)
3270 !IsFatal
|| RecoverKind
== CheckRecoverableKind::AlwaysRecoverable
;
3272 llvm::AttrBuilder
B(CGF
.getLLVMContext());
3274 B
.addAttribute(llvm::Attribute::NoReturn
)
3275 .addAttribute(llvm::Attribute::NoUnwind
);
3277 B
.addUWTableAttr(llvm::UWTableKind::Default
);
3279 llvm::FunctionCallee Fn
= CGF
.CGM
.CreateRuntimeFunction(
3281 llvm::AttributeList::get(CGF
.getLLVMContext(),
3282 llvm::AttributeList::FunctionIndex
, B
),
3284 llvm::CallInst
*HandlerCall
= CGF
.EmitNounwindRuntimeCall(Fn
, FnArgs
);
3286 HandlerCall
->setDoesNotReturn();
3287 CGF
.Builder
.CreateUnreachable();
3289 CGF
.Builder
.CreateBr(ContBB
);
3293 void CodeGenFunction::EmitCheck(
3294 ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
3295 SanitizerHandler CheckHandler
, ArrayRef
<llvm::Constant
*> StaticArgs
,
3296 ArrayRef
<llvm::Value
*> DynamicArgs
) {
3297 assert(IsSanitizerScope
);
3298 assert(Checked
.size() > 0);
3299 assert(CheckHandler
>= 0 &&
3300 size_t(CheckHandler
) < std::size(SanitizerHandlers
));
3301 const StringRef CheckName
= SanitizerHandlers
[CheckHandler
].Name
;
3303 llvm::Value
*FatalCond
= nullptr;
3304 llvm::Value
*RecoverableCond
= nullptr;
3305 llvm::Value
*TrapCond
= nullptr;
3306 for (int i
= 0, n
= Checked
.size(); i
< n
; ++i
) {
3307 llvm::Value
*Check
= Checked
[i
].first
;
3308 // -fsanitize-trap= overrides -fsanitize-recover=.
3309 llvm::Value
*&Cond
=
3310 CGM
.getCodeGenOpts().SanitizeTrap
.has(Checked
[i
].second
)
3312 : CGM
.getCodeGenOpts().SanitizeRecover
.has(Checked
[i
].second
)
3315 Cond
= Cond
? Builder
.CreateAnd(Cond
, Check
) : Check
;
3319 EmitTrapCheck(TrapCond
, CheckHandler
);
3320 if (!FatalCond
&& !RecoverableCond
)
3323 llvm::Value
*JointCond
;
3324 if (FatalCond
&& RecoverableCond
)
3325 JointCond
= Builder
.CreateAnd(FatalCond
, RecoverableCond
);
3327 JointCond
= FatalCond
? FatalCond
: RecoverableCond
;
3330 CheckRecoverableKind RecoverKind
= getRecoverableKind(Checked
[0].second
);
3331 assert(SanOpts
.has(Checked
[0].second
));
3333 for (int i
= 1, n
= Checked
.size(); i
< n
; ++i
) {
3334 assert(RecoverKind
== getRecoverableKind(Checked
[i
].second
) &&
3335 "All recoverable kinds in a single check must be same!");
3336 assert(SanOpts
.has(Checked
[i
].second
));
3340 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3341 llvm::BasicBlock
*Handlers
= createBasicBlock("handler." + CheckName
);
3342 llvm::Instruction
*Branch
= Builder
.CreateCondBr(JointCond
, Cont
, Handlers
);
3343 // Give hint that we very much don't expect to execute the handler
3344 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3345 llvm::MDBuilder
MDHelper(getLLVMContext());
3346 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3347 Branch
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3348 EmitBlock(Handlers
);
3350 // Handler functions take an i8* pointing to the (handler-specific) static
3351 // information block, followed by a sequence of intptr_t arguments
3352 // representing operand values.
3353 SmallVector
<llvm::Value
*, 4> Args
;
3354 SmallVector
<llvm::Type
*, 4> ArgTypes
;
3355 if (!CGM
.getCodeGenOpts().SanitizeMinimalRuntime
) {
3356 Args
.reserve(DynamicArgs
.size() + 1);
3357 ArgTypes
.reserve(DynamicArgs
.size() + 1);
3359 // Emit handler arguments and create handler function type.
3360 if (!StaticArgs
.empty()) {
3361 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3363 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3364 llvm::GlobalVariable::PrivateLinkage
, Info
);
3365 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3366 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3367 Args
.push_back(Builder
.CreateBitCast(InfoPtr
, Int8PtrTy
));
3368 ArgTypes
.push_back(Int8PtrTy
);
3371 for (size_t i
= 0, n
= DynamicArgs
.size(); i
!= n
; ++i
) {
3372 Args
.push_back(EmitCheckValue(DynamicArgs
[i
]));
3373 ArgTypes
.push_back(IntPtrTy
);
3377 llvm::FunctionType
*FnType
=
3378 llvm::FunctionType::get(CGM
.VoidTy
, ArgTypes
, false);
3380 if (!FatalCond
|| !RecoverableCond
) {
3381 // Simple case: we need to generate a single handler call, either
3382 // fatal, or non-fatal.
3383 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
,
3384 (FatalCond
!= nullptr), Cont
);
3386 // Emit two handler calls: first one for set of unrecoverable checks,
3387 // another one for recoverable.
3388 llvm::BasicBlock
*NonFatalHandlerBB
=
3389 createBasicBlock("non_fatal." + CheckName
);
3390 llvm::BasicBlock
*FatalHandlerBB
= createBasicBlock("fatal." + CheckName
);
3391 Builder
.CreateCondBr(FatalCond
, NonFatalHandlerBB
, FatalHandlerBB
);
3392 EmitBlock(FatalHandlerBB
);
3393 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, true,
3395 EmitBlock(NonFatalHandlerBB
);
3396 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, false,
3403 void CodeGenFunction::EmitCfiSlowPathCheck(
3404 SanitizerMask Kind
, llvm::Value
*Cond
, llvm::ConstantInt
*TypeId
,
3405 llvm::Value
*Ptr
, ArrayRef
<llvm::Constant
*> StaticArgs
) {
3406 llvm::BasicBlock
*Cont
= createBasicBlock("cfi.cont");
3408 llvm::BasicBlock
*CheckBB
= createBasicBlock("cfi.slowpath");
3409 llvm::BranchInst
*BI
= Builder
.CreateCondBr(Cond
, Cont
, CheckBB
);
3411 llvm::MDBuilder
MDHelper(getLLVMContext());
3412 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3413 BI
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3417 bool WithDiag
= !CGM
.getCodeGenOpts().SanitizeTrap
.has(Kind
);
3419 llvm::CallInst
*CheckCall
;
3420 llvm::FunctionCallee SlowPathFn
;
3422 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3424 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3425 llvm::GlobalVariable::PrivateLinkage
, Info
);
3426 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3427 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3429 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3430 "__cfi_slowpath_diag",
3431 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
},
3433 CheckCall
= Builder
.CreateCall(
3434 SlowPathFn
, {TypeId
, Ptr
, Builder
.CreateBitCast(InfoPtr
, Int8PtrTy
)});
3436 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3438 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
}, false));
3439 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
});
3443 cast
<llvm::GlobalValue
>(SlowPathFn
.getCallee()->stripPointerCasts()));
3444 CheckCall
->setDoesNotThrow();
3449 // Emit a stub for __cfi_check function so that the linker knows about this
3450 // symbol in LTO mode.
3451 void CodeGenFunction::EmitCfiCheckStub() {
3452 llvm::Module
*M
= &CGM
.getModule();
3453 auto &Ctx
= M
->getContext();
3454 llvm::Function
*F
= llvm::Function::Create(
3455 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
}, false),
3456 llvm::GlobalValue::WeakAnyLinkage
, "__cfi_check", M
);
3458 llvm::BasicBlock
*BB
= llvm::BasicBlock::Create(Ctx
, "entry", F
);
3459 // FIXME: consider emitting an intrinsic call like
3460 // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3461 // which can be lowered in CrossDSOCFI pass to the actual contents of
3462 // __cfi_check. This would allow inlining of __cfi_check calls.
3463 llvm::CallInst::Create(
3464 llvm::Intrinsic::getDeclaration(M
, llvm::Intrinsic::trap
), "", BB
);
3465 llvm::ReturnInst::Create(Ctx
, nullptr, BB
);
3468 // This function is basically a switch over the CFI failure kind, which is
3469 // extracted from CFICheckFailData (1st function argument). Each case is either
3470 // llvm.trap or a call to one of the two runtime handlers, based on
3471 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3472 // failure kind) traps, but this should really never happen. CFICheckFailData
3473 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3474 // check kind; in this case __cfi_check_fail traps as well.
3475 void CodeGenFunction::EmitCfiCheckFail() {
3476 SanitizerScope
SanScope(this);
3477 FunctionArgList Args
;
3478 ImplicitParamDecl
ArgData(getContext(), getContext().VoidPtrTy
,
3479 ImplicitParamDecl::Other
);
3480 ImplicitParamDecl
ArgAddr(getContext(), getContext().VoidPtrTy
,
3481 ImplicitParamDecl::Other
);
3482 Args
.push_back(&ArgData
);
3483 Args
.push_back(&ArgAddr
);
3485 const CGFunctionInfo
&FI
=
3486 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy
, Args
);
3488 llvm::Function
*F
= llvm::Function::Create(
3489 llvm::FunctionType::get(VoidTy
, {VoidPtrTy
, VoidPtrTy
}, false),
3490 llvm::GlobalValue::WeakODRLinkage
, "__cfi_check_fail", &CGM
.getModule());
3492 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, F
, /*IsThunk=*/false);
3493 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, F
);
3494 F
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
3496 StartFunction(GlobalDecl(), CGM
.getContext().VoidTy
, F
, FI
, Args
,
3499 // This function is not affected by NoSanitizeList. This function does
3500 // not have a source location, but "src:*" would still apply. Revert any
3501 // changes to SanOpts made in StartFunction.
3502 SanOpts
= CGM
.getLangOpts().Sanitize
;
3505 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData
), /*Volatile=*/false,
3506 CGM
.getContext().VoidPtrTy
, ArgData
.getLocation());
3508 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr
), /*Volatile=*/false,
3509 CGM
.getContext().VoidPtrTy
, ArgAddr
.getLocation());
3511 // Data == nullptr means the calling module has trap behaviour for this check.
3512 llvm::Value
*DataIsNotNullPtr
=
3513 Builder
.CreateICmpNE(Data
, llvm::ConstantPointerNull::get(Int8PtrTy
));
3514 EmitTrapCheck(DataIsNotNullPtr
, SanitizerHandler::CFICheckFail
);
3516 llvm::StructType
*SourceLocationTy
=
3517 llvm::StructType::get(VoidPtrTy
, Int32Ty
, Int32Ty
);
3518 llvm::StructType
*CfiCheckFailDataTy
=
3519 llvm::StructType::get(Int8Ty
, SourceLocationTy
, VoidPtrTy
);
3521 llvm::Value
*V
= Builder
.CreateConstGEP2_32(
3523 Builder
.CreatePointerCast(Data
, CfiCheckFailDataTy
->getPointerTo(0)), 0,
3526 Address
CheckKindAddr(V
, Int8Ty
, getIntAlign());
3527 llvm::Value
*CheckKind
= Builder
.CreateLoad(CheckKindAddr
);
3529 llvm::Value
*AllVtables
= llvm::MetadataAsValue::get(
3530 CGM
.getLLVMContext(),
3531 llvm::MDString::get(CGM
.getLLVMContext(), "all-vtables"));
3532 llvm::Value
*ValidVtable
= Builder
.CreateZExt(
3533 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::type_test
),
3534 {Addr
, AllVtables
}),
3537 const std::pair
<int, SanitizerMask
> CheckKinds
[] = {
3538 {CFITCK_VCall
, SanitizerKind::CFIVCall
},
3539 {CFITCK_NVCall
, SanitizerKind::CFINVCall
},
3540 {CFITCK_DerivedCast
, SanitizerKind::CFIDerivedCast
},
3541 {CFITCK_UnrelatedCast
, SanitizerKind::CFIUnrelatedCast
},
3542 {CFITCK_ICall
, SanitizerKind::CFIICall
}};
3544 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 5> Checks
;
3545 for (auto CheckKindMaskPair
: CheckKinds
) {
3546 int Kind
= CheckKindMaskPair
.first
;
3547 SanitizerMask Mask
= CheckKindMaskPair
.second
;
3549 Builder
.CreateICmpNE(CheckKind
, llvm::ConstantInt::get(Int8Ty
, Kind
));
3550 if (CGM
.getLangOpts().Sanitize
.has(Mask
))
3551 EmitCheck(std::make_pair(Cond
, Mask
), SanitizerHandler::CFICheckFail
, {},
3552 {Data
, Addr
, ValidVtable
});
3554 EmitTrapCheck(Cond
, SanitizerHandler::CFICheckFail
);
3558 // The only reference to this function will be created during LTO link.
3559 // Make sure it survives until then.
3560 CGM
.addUsedGlobal(F
);
3563 void CodeGenFunction::EmitUnreachable(SourceLocation Loc
) {
3564 if (SanOpts
.has(SanitizerKind::Unreachable
)) {
3565 SanitizerScope
SanScope(this);
3566 EmitCheck(std::make_pair(static_cast<llvm::Value
*>(Builder
.getFalse()),
3567 SanitizerKind::Unreachable
),
3568 SanitizerHandler::BuiltinUnreachable
,
3569 EmitCheckSourceLocation(Loc
), None
);
3571 Builder
.CreateUnreachable();
3574 void CodeGenFunction::EmitTrapCheck(llvm::Value
*Checked
,
3575 SanitizerHandler CheckHandlerID
) {
3576 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3578 // If we're optimizing, collapse all calls to trap down to just one per
3579 // check-type per function to save on code size.
3580 if (TrapBBs
.size() <= CheckHandlerID
)
3581 TrapBBs
.resize(CheckHandlerID
+ 1);
3582 llvm::BasicBlock
*&TrapBB
= TrapBBs
[CheckHandlerID
];
3584 if (!CGM
.getCodeGenOpts().OptimizationLevel
|| !TrapBB
) {
3585 TrapBB
= createBasicBlock("trap");
3586 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3589 llvm::CallInst
*TrapCall
=
3590 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::ubsantrap
),
3591 llvm::ConstantInt::get(CGM
.Int8Ty
, CheckHandlerID
));
3593 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3594 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3595 CGM
.getCodeGenOpts().TrapFuncName
);
3596 TrapCall
->addFnAttr(A
);
3598 TrapCall
->setDoesNotReturn();
3599 TrapCall
->setDoesNotThrow();
3600 Builder
.CreateUnreachable();
3602 auto Call
= TrapBB
->begin();
3603 assert(isa
<llvm::CallInst
>(Call
) && "Expected call in trap BB");
3605 Call
->applyMergedLocation(Call
->getDebugLoc(),
3606 Builder
.getCurrentDebugLocation());
3607 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3613 llvm::CallInst
*CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID
) {
3614 llvm::CallInst
*TrapCall
=
3615 Builder
.CreateCall(CGM
.getIntrinsic(IntrID
));
3617 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3618 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3619 CGM
.getCodeGenOpts().TrapFuncName
);
3620 TrapCall
->addFnAttr(A
);
3626 Address
CodeGenFunction::EmitArrayToPointerDecay(const Expr
*E
,
3627 LValueBaseInfo
*BaseInfo
,
3628 TBAAAccessInfo
*TBAAInfo
) {
3629 assert(E
->getType()->isArrayType() &&
3630 "Array to pointer decay must have array source type!");
3632 // Expressions of array type can't be bitfields or vector elements.
3633 LValue LV
= EmitLValue(E
);
3634 Address Addr
= LV
.getAddress(*this);
3636 // If the array type was an incomplete type, we need to make sure
3637 // the decay ends up being the right type.
3638 llvm::Type
*NewTy
= ConvertType(E
->getType());
3639 Addr
= Builder
.CreateElementBitCast(Addr
, NewTy
);
3641 // Note that VLA pointers are always decayed, so we don't need to do
3643 if (!E
->getType()->isVariableArrayType()) {
3644 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3645 "Expected pointer to array");
3646 Addr
= Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
3649 // The result of this decay conversion points to an array element within the
3650 // base lvalue. However, since TBAA currently does not support representing
3651 // accesses to elements of member arrays, we conservatively represent accesses
3652 // to the pointee object as if it had no any base lvalue specified.
3653 // TODO: Support TBAA for member arrays.
3654 QualType EltType
= E
->getType()->castAsArrayTypeUnsafe()->getElementType();
3655 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
3656 if (TBAAInfo
) *TBAAInfo
= CGM
.getTBAAAccessInfo(EltType
);
3658 return Builder
.CreateElementBitCast(Addr
, ConvertTypeForMem(EltType
));
3661 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3662 /// array to pointer, return the array subexpression.
3663 static const Expr
*isSimpleArrayDecayOperand(const Expr
*E
) {
3664 // If this isn't just an array->pointer decay, bail out.
3665 const auto *CE
= dyn_cast
<CastExpr
>(E
);
3666 if (!CE
|| CE
->getCastKind() != CK_ArrayToPointerDecay
)
3669 // If this is a decay from variable width array, bail out.
3670 const Expr
*SubExpr
= CE
->getSubExpr();
3671 if (SubExpr
->getType()->isVariableArrayType())
3677 static llvm::Value
*emitArraySubscriptGEP(CodeGenFunction
&CGF
,
3678 llvm::Type
*elemType
,
3680 ArrayRef
<llvm::Value
*> indices
,
3684 const llvm::Twine
&name
= "arrayidx") {
3686 return CGF
.EmitCheckedInBoundsGEP(elemType
, ptr
, indices
, signedIndices
,
3687 CodeGenFunction::NotSubtraction
, loc
,
3690 return CGF
.Builder
.CreateGEP(elemType
, ptr
, indices
, name
);
3694 static CharUnits
getArrayElementAlign(CharUnits arrayAlign
,
3696 CharUnits eltSize
) {
3697 // If we have a constant index, we can use the exact offset of the
3698 // element we're accessing.
3699 if (auto constantIdx
= dyn_cast
<llvm::ConstantInt
>(idx
)) {
3700 CharUnits offset
= constantIdx
->getZExtValue() * eltSize
;
3701 return arrayAlign
.alignmentAtOffset(offset
);
3703 // Otherwise, use the worst-case alignment for any element.
3705 return arrayAlign
.alignmentOfArrayElement(eltSize
);
3709 static QualType
getFixedSizeElementType(const ASTContext
&ctx
,
3710 const VariableArrayType
*vla
) {
3713 eltType
= vla
->getElementType();
3714 } while ((vla
= ctx
.getAsVariableArrayType(eltType
)));
3718 /// Given an array base, check whether its member access belongs to a record
3719 /// with preserve_access_index attribute or not.
3720 static bool IsPreserveAIArrayBase(CodeGenFunction
&CGF
, const Expr
*ArrayBase
) {
3721 if (!ArrayBase
|| !CGF
.getDebugInfo())
3724 // Only support base as either a MemberExpr or DeclRefExpr.
3725 // DeclRefExpr to cover cases like:
3726 // struct s { int a; int b[10]; };
3729 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3730 // p->b[5] is a MemberExpr example.
3731 const Expr
*E
= ArrayBase
->IgnoreImpCasts();
3732 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
))
3733 return ME
->getMemberDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3735 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
3736 const auto *VarDef
= dyn_cast
<VarDecl
>(DRE
->getDecl());
3740 const auto *PtrT
= VarDef
->getType()->getAs
<PointerType
>();
3744 const auto *PointeeT
= PtrT
->getPointeeType()
3745 ->getUnqualifiedDesugaredType();
3746 if (const auto *RecT
= dyn_cast
<RecordType
>(PointeeT
))
3747 return RecT
->getDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3754 static Address
emitArraySubscriptGEP(CodeGenFunction
&CGF
, Address addr
,
3755 ArrayRef
<llvm::Value
*> indices
,
3756 QualType eltType
, bool inbounds
,
3757 bool signedIndices
, SourceLocation loc
,
3758 QualType
*arrayType
= nullptr,
3759 const Expr
*Base
= nullptr,
3760 const llvm::Twine
&name
= "arrayidx") {
3761 // All the indices except that last must be zero.
3763 for (auto *idx
: indices
.drop_back())
3764 assert(isa
<llvm::ConstantInt
>(idx
) &&
3765 cast
<llvm::ConstantInt
>(idx
)->isZero());
3768 // Determine the element size of the statically-sized base. This is
3769 // the thing that the indices are expressed in terms of.
3770 if (auto vla
= CGF
.getContext().getAsVariableArrayType(eltType
)) {
3771 eltType
= getFixedSizeElementType(CGF
.getContext(), vla
);
3774 // We can use that to compute the best alignment of the element.
3775 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(eltType
);
3776 CharUnits eltAlign
=
3777 getArrayElementAlign(addr
.getAlignment(), indices
.back(), eltSize
);
3779 llvm::Value
*eltPtr
;
3780 auto LastIndex
= dyn_cast
<llvm::ConstantInt
>(indices
.back());
3782 (!CGF
.IsInPreservedAIRegion
&& !IsPreserveAIArrayBase(CGF
, Base
))) {
3783 eltPtr
= emitArraySubscriptGEP(
3784 CGF
, addr
.getElementType(), addr
.getPointer(), indices
, inbounds
,
3785 signedIndices
, loc
, name
);
3787 // Remember the original array subscript for bpf target
3788 unsigned idx
= LastIndex
->getZExtValue();
3789 llvm::DIType
*DbgInfo
= nullptr;
3791 DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(*arrayType
, loc
);
3792 eltPtr
= CGF
.Builder
.CreatePreserveArrayAccessIndex(addr
.getElementType(),
3798 return Address(eltPtr
, CGF
.ConvertTypeForMem(eltType
), eltAlign
);
3801 LValue
CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
3803 // The index must always be an integer, which is not an aggregate. Emit it
3804 // in lexical order (this complexity is, sadly, required by C++17).
3805 llvm::Value
*IdxPre
=
3806 (E
->getLHS() == E
->getIdx()) ? EmitScalarExpr(E
->getIdx()) : nullptr;
3807 bool SignedIndices
= false;
3808 auto EmitIdxAfterBase
= [&, IdxPre
](bool Promote
) -> llvm::Value
* {
3810 if (E
->getLHS() != E
->getIdx()) {
3811 assert(E
->getRHS() == E
->getIdx() && "index was neither LHS nor RHS");
3812 Idx
= EmitScalarExpr(E
->getIdx());
3815 QualType IdxTy
= E
->getIdx()->getType();
3816 bool IdxSigned
= IdxTy
->isSignedIntegerOrEnumerationType();
3817 SignedIndices
|= IdxSigned
;
3819 if (SanOpts
.has(SanitizerKind::ArrayBounds
))
3820 EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, Accessed
);
3822 // Extend or truncate the index type to 32 or 64-bits.
3823 if (Promote
&& Idx
->getType() != IntPtrTy
)
3824 Idx
= Builder
.CreateIntCast(Idx
, IntPtrTy
, IdxSigned
, "idxprom");
3830 // If the base is a vector type, then we are forming a vector element lvalue
3831 // with this subscript.
3832 if (E
->getBase()->getType()->isVectorType() &&
3833 !isa
<ExtVectorElementExpr
>(E
->getBase())) {
3834 // Emit the vector as an lvalue to get its address.
3835 LValue LHS
= EmitLValue(E
->getBase());
3836 auto *Idx
= EmitIdxAfterBase(/*Promote*/false);
3837 assert(LHS
.isSimple() && "Can only subscript lvalue vectors here!");
3838 return LValue::MakeVectorElt(LHS
.getAddress(*this), Idx
,
3839 E
->getBase()->getType(), LHS
.getBaseInfo(),
3843 // All the other cases basically behave like simple offsetting.
3845 // Handle the extvector case we ignored above.
3846 if (isa
<ExtVectorElementExpr
>(E
->getBase())) {
3847 LValue LV
= EmitLValue(E
->getBase());
3848 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3849 Address Addr
= EmitExtVectorElementLValue(LV
);
3851 QualType EltType
= LV
.getType()->castAs
<VectorType
>()->getElementType();
3852 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, EltType
, /*inbounds*/ true,
3853 SignedIndices
, E
->getExprLoc());
3854 return MakeAddrLValue(Addr
, EltType
, LV
.getBaseInfo(),
3855 CGM
.getTBAAInfoForSubobject(LV
, EltType
));
3858 LValueBaseInfo EltBaseInfo
;
3859 TBAAAccessInfo EltTBAAInfo
;
3860 Address Addr
= Address::invalid();
3861 if (const VariableArrayType
*vla
=
3862 getContext().getAsVariableArrayType(E
->getType())) {
3863 // The base must be a pointer, which is not an aggregate. Emit
3864 // it. It needs to be emitted first in case it's what captures
3866 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3867 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3869 // The element count here is the total number of non-VLA elements.
3870 llvm::Value
*numElements
= getVLASize(vla
).NumElts
;
3872 // Effectively, the multiply by the VLA size is part of the GEP.
3873 // GEP indexes are signed, and scaling an index isn't permitted to
3874 // signed-overflow, so we use the same semantics for our explicit
3875 // multiply. We suppress this if overflow is not undefined behavior.
3876 if (getLangOpts().isSignedOverflowDefined()) {
3877 Idx
= Builder
.CreateMul(Idx
, numElements
);
3879 Idx
= Builder
.CreateNSWMul(Idx
, numElements
);
3882 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, vla
->getElementType(),
3883 !getLangOpts().isSignedOverflowDefined(),
3884 SignedIndices
, E
->getExprLoc());
3886 } else if (const ObjCObjectType
*OIT
= E
->getType()->getAs
<ObjCObjectType
>()){
3887 // Indexing over an interface, as in "NSString *P; P[4];"
3889 // Emit the base pointer.
3890 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3891 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3893 CharUnits InterfaceSize
= getContext().getTypeSizeInChars(OIT
);
3894 llvm::Value
*InterfaceSizeVal
=
3895 llvm::ConstantInt::get(Idx
->getType(), InterfaceSize
.getQuantity());
3897 llvm::Value
*ScaledIdx
= Builder
.CreateMul(Idx
, InterfaceSizeVal
);
3899 // We don't necessarily build correct LLVM struct types for ObjC
3900 // interfaces, so we can't rely on GEP to do this scaling
3901 // correctly, so we need to cast to i8*. FIXME: is this actually
3902 // true? A lot of other things in the fragile ABI would break...
3903 llvm::Type
*OrigBaseElemTy
= Addr
.getElementType();
3904 Addr
= Builder
.CreateElementBitCast(Addr
, Int8Ty
);
3907 CharUnits EltAlign
=
3908 getArrayElementAlign(Addr
.getAlignment(), Idx
, InterfaceSize
);
3909 llvm::Value
*EltPtr
=
3910 emitArraySubscriptGEP(*this, Addr
.getElementType(), Addr
.getPointer(),
3911 ScaledIdx
, false, SignedIndices
, E
->getExprLoc());
3912 Addr
= Address(EltPtr
, Addr
.getElementType(), EltAlign
);
3915 Addr
= Builder
.CreateElementBitCast(Addr
, OrigBaseElemTy
);
3916 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
3917 // If this is A[i] where A is an array, the frontend will have decayed the
3918 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3919 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3920 // "gep x, i" here. Emit one "gep A, 0, i".
3921 assert(Array
->getType()->isArrayType() &&
3922 "Array to pointer decay must have array source type!");
3924 // For simple multidimensional array indexing, set the 'accessed' flag for
3925 // better bounds-checking of the base expression.
3926 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
3927 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
3929 ArrayLV
= EmitLValue(Array
);
3930 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3932 // Propagate the alignment from the array itself to the result.
3933 QualType arrayType
= Array
->getType();
3934 Addr
= emitArraySubscriptGEP(
3935 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
3936 E
->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices
,
3937 E
->getExprLoc(), &arrayType
, E
->getBase());
3938 EltBaseInfo
= ArrayLV
.getBaseInfo();
3939 EltTBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, E
->getType());
3941 // The base must be a pointer; emit it with an estimate of its alignment.
3942 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3943 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3944 QualType ptrType
= E
->getBase()->getType();
3945 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, E
->getType(),
3946 !getLangOpts().isSignedOverflowDefined(),
3947 SignedIndices
, E
->getExprLoc(), &ptrType
,
3951 LValue LV
= MakeAddrLValue(Addr
, E
->getType(), EltBaseInfo
, EltTBAAInfo
);
3953 if (getLangOpts().ObjC
&&
3954 getLangOpts().getGC() != LangOptions::NonGC
) {
3955 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
3956 setObjCGCLValueClass(getContext(), E
, LV
);
3961 LValue
CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
) {
3963 !E
->isIncomplete() &&
3964 "incomplete matrix subscript expressions should be rejected during Sema");
3965 LValue Base
= EmitLValue(E
->getBase());
3966 llvm::Value
*RowIdx
= EmitScalarExpr(E
->getRowIdx());
3967 llvm::Value
*ColIdx
= EmitScalarExpr(E
->getColumnIdx());
3968 llvm::Value
*NumRows
= Builder
.getIntN(
3969 RowIdx
->getType()->getScalarSizeInBits(),
3970 E
->getBase()->getType()->castAs
<ConstantMatrixType
>()->getNumRows());
3971 llvm::Value
*FinalIdx
=
3972 Builder
.CreateAdd(Builder
.CreateMul(ColIdx
, NumRows
), RowIdx
);
3973 return LValue::MakeMatrixElt(
3974 MaybeConvertMatrixAddress(Base
.getAddress(*this), *this), FinalIdx
,
3975 E
->getBase()->getType(), Base
.getBaseInfo(), TBAAAccessInfo());
3978 static Address
emitOMPArraySectionBase(CodeGenFunction
&CGF
, const Expr
*Base
,
3979 LValueBaseInfo
&BaseInfo
,
3980 TBAAAccessInfo
&TBAAInfo
,
3981 QualType BaseTy
, QualType ElTy
,
3982 bool IsLowerBound
) {
3984 if (auto *ASE
= dyn_cast
<OMPArraySectionExpr
>(Base
->IgnoreParenImpCasts())) {
3985 BaseLVal
= CGF
.EmitOMPArraySectionExpr(ASE
, IsLowerBound
);
3986 if (BaseTy
->isArrayType()) {
3987 Address Addr
= BaseLVal
.getAddress(CGF
);
3988 BaseInfo
= BaseLVal
.getBaseInfo();
3990 // If the array type was an incomplete type, we need to make sure
3991 // the decay ends up being the right type.
3992 llvm::Type
*NewTy
= CGF
.ConvertType(BaseTy
);
3993 Addr
= CGF
.Builder
.CreateElementBitCast(Addr
, NewTy
);
3995 // Note that VLA pointers are always decayed, so we don't need to do
3997 if (!BaseTy
->isVariableArrayType()) {
3998 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3999 "Expected pointer to array");
4000 Addr
= CGF
.Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
4003 return CGF
.Builder
.CreateElementBitCast(Addr
,
4004 CGF
.ConvertTypeForMem(ElTy
));
4006 LValueBaseInfo TypeBaseInfo
;
4007 TBAAAccessInfo TypeTBAAInfo
;
4009 CGF
.CGM
.getNaturalTypeAlignment(ElTy
, &TypeBaseInfo
, &TypeTBAAInfo
);
4010 BaseInfo
.mergeForCast(TypeBaseInfo
);
4011 TBAAInfo
= CGF
.CGM
.mergeTBAAInfoForCast(TBAAInfo
, TypeTBAAInfo
);
4012 return Address(CGF
.Builder
.CreateLoad(BaseLVal
.getAddress(CGF
)),
4013 CGF
.ConvertTypeForMem(ElTy
), Align
);
4015 return CGF
.EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
4018 LValue
CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr
*E
,
4019 bool IsLowerBound
) {
4020 QualType BaseTy
= OMPArraySectionExpr::getBaseOriginalType(E
->getBase());
4021 QualType ResultExprTy
;
4022 if (auto *AT
= getContext().getAsArrayType(BaseTy
))
4023 ResultExprTy
= AT
->getElementType();
4025 ResultExprTy
= BaseTy
->getPointeeType();
4026 llvm::Value
*Idx
= nullptr;
4027 if (IsLowerBound
|| E
->getColonLocFirst().isInvalid()) {
4028 // Requesting lower bound or upper bound, but without provided length and
4029 // without ':' symbol for the default length -> length = 1.
4030 // Idx = LowerBound ?: 0;
4031 if (auto *LowerBound
= E
->getLowerBound()) {
4032 Idx
= Builder
.CreateIntCast(
4033 EmitScalarExpr(LowerBound
), IntPtrTy
,
4034 LowerBound
->getType()->hasSignedIntegerRepresentation());
4036 Idx
= llvm::ConstantInt::getNullValue(IntPtrTy
);
4038 // Try to emit length or lower bound as constant. If this is possible, 1
4039 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4040 // IR (LB + Len) - 1.
4041 auto &C
= CGM
.getContext();
4042 auto *Length
= E
->getLength();
4043 llvm::APSInt ConstLength
;
4045 // Idx = LowerBound + Length - 1;
4046 if (Optional
<llvm::APSInt
> CL
= Length
->getIntegerConstantExpr(C
)) {
4047 ConstLength
= CL
->zextOrTrunc(PointerWidthInBits
);
4050 auto *LowerBound
= E
->getLowerBound();
4051 llvm::APSInt
ConstLowerBound(PointerWidthInBits
, /*isUnsigned=*/false);
4053 if (Optional
<llvm::APSInt
> LB
= LowerBound
->getIntegerConstantExpr(C
)) {
4054 ConstLowerBound
= LB
->zextOrTrunc(PointerWidthInBits
);
4055 LowerBound
= nullptr;
4060 else if (!LowerBound
)
4063 if (Length
|| LowerBound
) {
4064 auto *LowerBoundVal
=
4066 ? Builder
.CreateIntCast(
4067 EmitScalarExpr(LowerBound
), IntPtrTy
,
4068 LowerBound
->getType()->hasSignedIntegerRepresentation())
4069 : llvm::ConstantInt::get(IntPtrTy
, ConstLowerBound
);
4072 ? Builder
.CreateIntCast(
4073 EmitScalarExpr(Length
), IntPtrTy
,
4074 Length
->getType()->hasSignedIntegerRepresentation())
4075 : llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4076 Idx
= Builder
.CreateAdd(LowerBoundVal
, LengthVal
, "lb_add_len",
4078 !getLangOpts().isSignedOverflowDefined());
4079 if (Length
&& LowerBound
) {
4080 Idx
= Builder
.CreateSub(
4081 Idx
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "idx_sub_1",
4082 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4085 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
+ ConstLowerBound
);
4087 // Idx = ArraySize - 1;
4088 QualType ArrayTy
= BaseTy
->isPointerType()
4089 ? E
->getBase()->IgnoreParenImpCasts()->getType()
4091 if (auto *VAT
= C
.getAsVariableArrayType(ArrayTy
)) {
4092 Length
= VAT
->getSizeExpr();
4093 if (Optional
<llvm::APSInt
> L
= Length
->getIntegerConstantExpr(C
)) {
4098 auto *CAT
= C
.getAsConstantArrayType(ArrayTy
);
4099 ConstLength
= CAT
->getSize();
4102 auto *LengthVal
= Builder
.CreateIntCast(
4103 EmitScalarExpr(Length
), IntPtrTy
,
4104 Length
->getType()->hasSignedIntegerRepresentation());
4105 Idx
= Builder
.CreateSub(
4106 LengthVal
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "len_sub_1",
4107 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4109 ConstLength
= ConstLength
.zextOrTrunc(PointerWidthInBits
);
4111 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4117 Address EltPtr
= Address::invalid();
4118 LValueBaseInfo BaseInfo
;
4119 TBAAAccessInfo TBAAInfo
;
4120 if (auto *VLA
= getContext().getAsVariableArrayType(ResultExprTy
)) {
4121 // The base must be a pointer, which is not an aggregate. Emit
4122 // it. It needs to be emitted first in case it's what captures
4125 emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
, TBAAInfo
,
4126 BaseTy
, VLA
->getElementType(), IsLowerBound
);
4127 // The element count here is the total number of non-VLA elements.
4128 llvm::Value
*NumElements
= getVLASize(VLA
).NumElts
;
4130 // Effectively, the multiply by the VLA size is part of the GEP.
4131 // GEP indexes are signed, and scaling an index isn't permitted to
4132 // signed-overflow, so we use the same semantics for our explicit
4133 // multiply. We suppress this if overflow is not undefined behavior.
4134 if (getLangOpts().isSignedOverflowDefined())
4135 Idx
= Builder
.CreateMul(Idx
, NumElements
);
4137 Idx
= Builder
.CreateNSWMul(Idx
, NumElements
);
4138 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, VLA
->getElementType(),
4139 !getLangOpts().isSignedOverflowDefined(),
4140 /*signedIndices=*/false, E
->getExprLoc());
4141 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
4142 // If this is A[i] where A is an array, the frontend will have decayed the
4143 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4144 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4145 // "gep x, i" here. Emit one "gep A, 0, i".
4146 assert(Array
->getType()->isArrayType() &&
4147 "Array to pointer decay must have array source type!");
4149 // For simple multidimensional array indexing, set the 'accessed' flag for
4150 // better bounds-checking of the base expression.
4151 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
4152 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
4154 ArrayLV
= EmitLValue(Array
);
4156 // Propagate the alignment from the array itself to the result.
4157 EltPtr
= emitArraySubscriptGEP(
4158 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
4159 ResultExprTy
, !getLangOpts().isSignedOverflowDefined(),
4160 /*signedIndices=*/false, E
->getExprLoc());
4161 BaseInfo
= ArrayLV
.getBaseInfo();
4162 TBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, ResultExprTy
);
4164 Address Base
= emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
,
4165 TBAAInfo
, BaseTy
, ResultExprTy
,
4167 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, ResultExprTy
,
4168 !getLangOpts().isSignedOverflowDefined(),
4169 /*signedIndices=*/false, E
->getExprLoc());
4172 return MakeAddrLValue(EltPtr
, ResultExprTy
, BaseInfo
, TBAAInfo
);
4175 LValue
CodeGenFunction::
4176 EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
) {
4177 // Emit the base vector as an l-value.
4180 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4182 // If it is a pointer to a vector, emit the address and form an lvalue with
4184 LValueBaseInfo BaseInfo
;
4185 TBAAAccessInfo TBAAInfo
;
4186 Address Ptr
= EmitPointerWithAlignment(E
->getBase(), &BaseInfo
, &TBAAInfo
);
4187 const auto *PT
= E
->getBase()->getType()->castAs
<PointerType
>();
4188 Base
= MakeAddrLValue(Ptr
, PT
->getPointeeType(), BaseInfo
, TBAAInfo
);
4189 Base
.getQuals().removeObjCGCAttr();
4190 } else if (E
->getBase()->isGLValue()) {
4191 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4192 // emit the base as an lvalue.
4193 assert(E
->getBase()->getType()->isVectorType());
4194 Base
= EmitLValue(E
->getBase());
4196 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4197 assert(E
->getBase()->getType()->isVectorType() &&
4198 "Result must be a vector");
4199 llvm::Value
*Vec
= EmitScalarExpr(E
->getBase());
4201 // Store the vector to memory (because LValue wants an address).
4202 Address VecMem
= CreateMemTemp(E
->getBase()->getType());
4203 Builder
.CreateStore(Vec
, VecMem
);
4204 Base
= MakeAddrLValue(VecMem
, E
->getBase()->getType(),
4205 AlignmentSource::Decl
);
4209 E
->getType().withCVRQualifiers(Base
.getQuals().getCVRQualifiers());
4211 // Encode the element access list into a vector of unsigned indices.
4212 SmallVector
<uint32_t, 4> Indices
;
4213 E
->getEncodedElementAccess(Indices
);
4215 if (Base
.isSimple()) {
4216 llvm::Constant
*CV
=
4217 llvm::ConstantDataVector::get(getLLVMContext(), Indices
);
4218 return LValue::MakeExtVectorElt(Base
.getAddress(*this), CV
, type
,
4219 Base
.getBaseInfo(), TBAAAccessInfo());
4221 assert(Base
.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4223 llvm::Constant
*BaseElts
= Base
.getExtVectorElts();
4224 SmallVector
<llvm::Constant
*, 4> CElts
;
4226 for (unsigned i
= 0, e
= Indices
.size(); i
!= e
; ++i
)
4227 CElts
.push_back(BaseElts
->getAggregateElement(Indices
[i
]));
4228 llvm::Constant
*CV
= llvm::ConstantVector::get(CElts
);
4229 return LValue::MakeExtVectorElt(Base
.getExtVectorAddress(), CV
, type
,
4230 Base
.getBaseInfo(), TBAAAccessInfo());
4233 LValue
CodeGenFunction::EmitMemberExpr(const MemberExpr
*E
) {
4234 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, E
)) {
4235 EmitIgnoredExpr(E
->getBase());
4236 return EmitDeclRefLValue(DRE
);
4239 Expr
*BaseExpr
= E
->getBase();
4240 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4243 LValueBaseInfo BaseInfo
;
4244 TBAAAccessInfo TBAAInfo
;
4245 Address Addr
= EmitPointerWithAlignment(BaseExpr
, &BaseInfo
, &TBAAInfo
);
4246 QualType PtrTy
= BaseExpr
->getType()->getPointeeType();
4247 SanitizerSet SkippedChecks
;
4248 bool IsBaseCXXThis
= IsWrappedCXXThis(BaseExpr
);
4250 SkippedChecks
.set(SanitizerKind::Alignment
, true);
4251 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(BaseExpr
))
4252 SkippedChecks
.set(SanitizerKind::Null
, true);
4253 EmitTypeCheck(TCK_MemberAccess
, E
->getExprLoc(), Addr
.getPointer(), PtrTy
,
4254 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
4255 BaseLV
= MakeAddrLValue(Addr
, PtrTy
, BaseInfo
, TBAAInfo
);
4257 BaseLV
= EmitCheckedLValue(BaseExpr
, TCK_MemberAccess
);
4259 NamedDecl
*ND
= E
->getMemberDecl();
4260 if (auto *Field
= dyn_cast
<FieldDecl
>(ND
)) {
4261 LValue LV
= EmitLValueForField(BaseLV
, Field
);
4262 setObjCGCLValueClass(getContext(), E
, LV
);
4263 if (getLangOpts().OpenMP
) {
4264 // If the member was explicitly marked as nontemporal, mark it as
4265 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4266 // to children as nontemporal too.
4267 if ((IsWrappedCXXThis(BaseExpr
) &&
4268 CGM
.getOpenMPRuntime().isNontemporalDecl(Field
)) ||
4269 BaseLV
.isNontemporal())
4270 LV
.setNontemporal(/*Value=*/true);
4275 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
))
4276 return EmitFunctionDeclLValue(*this, E
, FD
);
4278 llvm_unreachable("Unhandled member declaration!");
4281 /// Given that we are currently emitting a lambda, emit an l-value for
4282 /// one of its members.
4283 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
) {
4285 assert(cast
<CXXMethodDecl
>(CurCodeDecl
)->getParent()->isLambda());
4286 assert(cast
<CXXMethodDecl
>(CurCodeDecl
)->getParent() == Field
->getParent());
4288 QualType LambdaTagType
=
4289 getContext().getTagDeclType(Field
->getParent());
4290 LValue LambdaLV
= MakeNaturalAlignAddrLValue(CXXABIThisValue
, LambdaTagType
);
4291 return EmitLValueForField(LambdaLV
, Field
);
4294 /// Get the field index in the debug info. The debug info structure/union
4295 /// will ignore the unnamed bitfields.
4296 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl
*Rec
,
4297 unsigned FieldIndex
) {
4298 unsigned I
= 0, Skipped
= 0;
4300 for (auto *F
: Rec
->getDefinition()->fields()) {
4301 if (I
== FieldIndex
)
4303 if (F
->isUnnamedBitfield())
4308 return FieldIndex
- Skipped
;
4311 /// Get the address of a zero-sized field within a record. The resulting
4312 /// address doesn't necessarily have the right type.
4313 static Address
emitAddrOfZeroSizeField(CodeGenFunction
&CGF
, Address Base
,
4314 const FieldDecl
*Field
) {
4315 CharUnits Offset
= CGF
.getContext().toCharUnitsFromBits(
4316 CGF
.getContext().getFieldOffset(Field
));
4317 if (Offset
.isZero())
4319 Base
= CGF
.Builder
.CreateElementBitCast(Base
, CGF
.Int8Ty
);
4320 return CGF
.Builder
.CreateConstInBoundsByteGEP(Base
, Offset
);
4323 /// Drill down to the storage of a field without walking into
4324 /// reference types.
4326 /// The resulting address doesn't necessarily have the right type.
4327 static Address
emitAddrOfFieldStorage(CodeGenFunction
&CGF
, Address base
,
4328 const FieldDecl
*field
) {
4329 if (field
->isZeroSize(CGF
.getContext()))
4330 return emitAddrOfZeroSizeField(CGF
, base
, field
);
4332 const RecordDecl
*rec
= field
->getParent();
4335 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4337 return CGF
.Builder
.CreateStructGEP(base
, idx
, field
->getName());
4340 static Address
emitPreserveStructAccess(CodeGenFunction
&CGF
, LValue base
,
4341 Address addr
, const FieldDecl
*field
) {
4342 const RecordDecl
*rec
= field
->getParent();
4343 llvm::DIType
*DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(
4344 base
.getType(), rec
->getLocation());
4347 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4349 return CGF
.Builder
.CreatePreserveStructAccessIndex(
4350 addr
, idx
, CGF
.getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
);
4353 static bool hasAnyVptr(const QualType Type
, const ASTContext
&Context
) {
4354 const auto *RD
= Type
.getTypePtr()->getAsCXXRecordDecl();
4358 if (RD
->isDynamicClass())
4361 for (const auto &Base
: RD
->bases())
4362 if (hasAnyVptr(Base
.getType(), Context
))
4365 for (const FieldDecl
*Field
: RD
->fields())
4366 if (hasAnyVptr(Field
->getType(), Context
))
4372 LValue
CodeGenFunction::EmitLValueForField(LValue base
,
4373 const FieldDecl
*field
) {
4374 LValueBaseInfo BaseInfo
= base
.getBaseInfo();
4376 if (field
->isBitField()) {
4377 const CGRecordLayout
&RL
=
4378 CGM
.getTypes().getCGRecordLayout(field
->getParent());
4379 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(field
);
4380 const bool UseVolatile
= isAAPCS(CGM
.getTarget()) &&
4381 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&&
4382 Info
.VolatileStorageSize
!= 0 &&
4384 .withCVRQualifiers(base
.getVRQualifiers())
4385 .isVolatileQualified();
4386 Address Addr
= base
.getAddress(*this);
4387 unsigned Idx
= RL
.getLLVMFieldNo(field
);
4388 const RecordDecl
*rec
= field
->getParent();
4390 if (!IsInPreservedAIRegion
&&
4391 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4393 // For structs, we GEP to the field that the record layout suggests.
4394 Addr
= Builder
.CreateStructGEP(Addr
, Idx
, field
->getName());
4396 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateRecordType(
4397 getContext().getRecordType(rec
), rec
->getLocation());
4398 Addr
= Builder
.CreatePreserveStructAccessIndex(
4399 Addr
, Idx
, getDebugInfoFIndex(rec
, field
->getFieldIndex()),
4404 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
4405 // Get the access type.
4406 llvm::Type
*FieldIntTy
= llvm::Type::getIntNTy(getLLVMContext(), SS
);
4407 if (Addr
.getElementType() != FieldIntTy
)
4408 Addr
= Builder
.CreateElementBitCast(Addr
, FieldIntTy
);
4410 const unsigned VolatileOffset
= Info
.VolatileStorageOffset
.getQuantity();
4412 Addr
= Builder
.CreateConstInBoundsGEP(Addr
, VolatileOffset
);
4415 QualType fieldType
=
4416 field
->getType().withCVRQualifiers(base
.getVRQualifiers());
4417 // TODO: Support TBAA for bit fields.
4418 LValueBaseInfo
FieldBaseInfo(BaseInfo
.getAlignmentSource());
4419 return LValue::MakeBitfield(Addr
, Info
, fieldType
, FieldBaseInfo
,
4423 // Fields of may-alias structures are may-alias themselves.
4424 // FIXME: this should get propagated down through anonymous structs
4426 QualType FieldType
= field
->getType();
4427 const RecordDecl
*rec
= field
->getParent();
4428 AlignmentSource BaseAlignSource
= BaseInfo
.getAlignmentSource();
4429 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource
));
4430 TBAAAccessInfo FieldTBAAInfo
;
4431 if (base
.getTBAAInfo().isMayAlias() ||
4432 rec
->hasAttr
<MayAliasAttr
>() || FieldType
->isVectorType()) {
4433 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4434 } else if (rec
->isUnion()) {
4435 // TODO: Support TBAA for unions.
4436 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4438 // If no base type been assigned for the base access, then try to generate
4439 // one for this base lvalue.
4440 FieldTBAAInfo
= base
.getTBAAInfo();
4441 if (!FieldTBAAInfo
.BaseType
) {
4442 FieldTBAAInfo
.BaseType
= CGM
.getTBAABaseTypeInfo(base
.getType());
4443 assert(!FieldTBAAInfo
.Offset
&&
4444 "Nonzero offset for an access with no base type!");
4447 // Adjust offset to be relative to the base type.
4448 const ASTRecordLayout
&Layout
=
4449 getContext().getASTRecordLayout(field
->getParent());
4450 unsigned CharWidth
= getContext().getCharWidth();
4451 if (FieldTBAAInfo
.BaseType
)
4452 FieldTBAAInfo
.Offset
+=
4453 Layout
.getFieldOffset(field
->getFieldIndex()) / CharWidth
;
4455 // Update the final access type and size.
4456 FieldTBAAInfo
.AccessType
= CGM
.getTBAATypeInfo(FieldType
);
4457 FieldTBAAInfo
.Size
=
4458 getContext().getTypeSizeInChars(FieldType
).getQuantity();
4461 Address addr
= base
.getAddress(*this);
4462 if (auto *ClassDef
= dyn_cast
<CXXRecordDecl
>(rec
)) {
4463 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4464 ClassDef
->isDynamicClass()) {
4465 // Getting to any field of dynamic object requires stripping dynamic
4466 // information provided by invariant.group. This is because accessing
4467 // fields may leak the real address of dynamic object, which could result
4468 // in miscompilation when leaked pointer would be compared.
4469 auto *stripped
= Builder
.CreateStripInvariantGroup(addr
.getPointer());
4470 addr
= Address(stripped
, addr
.getElementType(), addr
.getAlignment());
4474 unsigned RecordCVR
= base
.getVRQualifiers();
4475 if (rec
->isUnion()) {
4476 // For unions, there is no pointer adjustment.
4477 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4478 hasAnyVptr(FieldType
, getContext()))
4479 // Because unions can easily skip invariant.barriers, we need to add
4480 // a barrier every time CXXRecord field with vptr is referenced.
4481 addr
= Builder
.CreateLaunderInvariantGroup(addr
);
4483 if (IsInPreservedAIRegion
||
4484 (getDebugInfo() && rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4485 // Remember the original union field index
4486 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(base
.getType(),
4487 rec
->getLocation());
4489 Builder
.CreatePreserveUnionAccessIndex(
4490 addr
.getPointer(), getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
),
4491 addr
.getElementType(), addr
.getAlignment());
4494 if (FieldType
->isReferenceType())
4495 addr
= Builder
.CreateElementBitCast(
4496 addr
, CGM
.getTypes().ConvertTypeForMem(FieldType
), field
->getName());
4498 if (!IsInPreservedAIRegion
&&
4499 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>()))
4500 // For structs, we GEP to the field that the record layout suggests.
4501 addr
= emitAddrOfFieldStorage(*this, addr
, field
);
4503 // Remember the original struct field index
4504 addr
= emitPreserveStructAccess(*this, base
, addr
, field
);
4507 // If this is a reference field, load the reference right now.
4508 if (FieldType
->isReferenceType()) {
4510 MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4511 if (RecordCVR
& Qualifiers::Volatile
)
4512 RefLVal
.getQuals().addVolatile();
4513 addr
= EmitLoadOfReference(RefLVal
, &FieldBaseInfo
, &FieldTBAAInfo
);
4515 // Qualifiers on the struct don't apply to the referencee.
4517 FieldType
= FieldType
->getPointeeType();
4520 // Make sure that the address is pointing to the right type. This is critical
4521 // for both unions and structs. A union needs a bitcast, a struct element
4522 // will need a bitcast if the LLVM type laid out doesn't match the desired
4524 addr
= Builder
.CreateElementBitCast(
4525 addr
, CGM
.getTypes().ConvertTypeForMem(FieldType
), field
->getName());
4527 if (field
->hasAttr
<AnnotateAttr
>())
4528 addr
= EmitFieldAnnotations(field
, addr
);
4530 LValue LV
= MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4531 LV
.getQuals().addCVRQualifiers(RecordCVR
);
4533 // __weak attribute on a field is ignored.
4534 if (LV
.getQuals().getObjCGCAttr() == Qualifiers::Weak
)
4535 LV
.getQuals().removeObjCGCAttr();
4541 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base
,
4542 const FieldDecl
*Field
) {
4543 QualType FieldType
= Field
->getType();
4545 if (!FieldType
->isReferenceType())
4546 return EmitLValueForField(Base
, Field
);
4548 Address V
= emitAddrOfFieldStorage(*this, Base
.getAddress(*this), Field
);
4550 // Make sure that the address is pointing to the right type.
4551 llvm::Type
*llvmType
= ConvertTypeForMem(FieldType
);
4552 V
= Builder
.CreateElementBitCast(V
, llvmType
, Field
->getName());
4554 // TODO: Generate TBAA information that describes this access as a structure
4555 // member access and not just an access to an object of the field's type. This
4556 // should be similar to what we do in EmitLValueForField().
4557 LValueBaseInfo BaseInfo
= Base
.getBaseInfo();
4558 AlignmentSource FieldAlignSource
= BaseInfo
.getAlignmentSource();
4559 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource
));
4560 return MakeAddrLValue(V
, FieldType
, FieldBaseInfo
,
4561 CGM
.getTBAAInfoForSubobject(Base
, FieldType
));
4564 LValue
CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
){
4565 if (E
->isFileScope()) {
4566 ConstantAddress GlobalPtr
= CGM
.GetAddrOfConstantCompoundLiteral(E
);
4567 return MakeAddrLValue(GlobalPtr
, E
->getType(), AlignmentSource::Decl
);
4569 if (E
->getType()->isVariablyModifiedType())
4570 // make sure to emit the VLA size.
4571 EmitVariablyModifiedType(E
->getType());
4573 Address DeclPtr
= CreateMemTemp(E
->getType(), ".compoundliteral");
4574 const Expr
*InitExpr
= E
->getInitializer();
4575 LValue Result
= MakeAddrLValue(DeclPtr
, E
->getType(), AlignmentSource::Decl
);
4577 EmitAnyExprToMem(InitExpr
, DeclPtr
, E
->getType().getQualifiers(),
4580 // Block-scope compound literals are destroyed at the end of the enclosing
4582 if (!getLangOpts().CPlusPlus
)
4583 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
4584 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind
), DeclPtr
,
4585 E
->getType(), getDestroyer(DtorKind
),
4586 DtorKind
& EHCleanup
);
4591 LValue
CodeGenFunction::EmitInitListLValue(const InitListExpr
*E
) {
4592 if (!E
->isGLValue())
4593 // Initializing an aggregate temporary in C++11: T{...}.
4594 return EmitAggExprToLValue(E
);
4596 // An lvalue initializer list must be initializing a reference.
4597 assert(E
->isTransparent() && "non-transparent glvalue init list");
4598 return EmitLValue(E
->getInit(0));
4601 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4602 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4603 /// LValue is returned and the current block has been terminated.
4604 static Optional
<LValue
> EmitLValueOrThrowExpression(CodeGenFunction
&CGF
,
4605 const Expr
*Operand
) {
4606 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Operand
->IgnoreParens())) {
4607 CGF
.EmitCXXThrowExpr(ThrowExpr
, /*KeepInsertionPoint*/false);
4611 return CGF
.EmitLValue(Operand
);
4615 // Handle the case where the condition is a constant evaluatable simple integer,
4616 // which means we don't have to separately handle the true/false blocks.
4617 llvm::Optional
<LValue
> HandleConditionalOperatorLValueSimpleCase(
4618 CodeGenFunction
&CGF
, const AbstractConditionalOperator
*E
) {
4619 const Expr
*condExpr
= E
->getCond();
4621 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
4622 const Expr
*Live
= E
->getTrueExpr(), *Dead
= E
->getFalseExpr();
4624 std::swap(Live
, Dead
);
4626 if (!CGF
.ContainsLabel(Dead
)) {
4627 // If the true case is live, we need to track its region.
4629 CGF
.incrementProfileCounter(E
);
4630 // If a throw expression we emit it and return an undefined lvalue
4631 // because it can't be used.
4632 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Live
->IgnoreParens())) {
4633 CGF
.EmitCXXThrowExpr(ThrowExpr
);
4634 llvm::Type
*ElemTy
= CGF
.ConvertType(Dead
->getType());
4635 llvm::Type
*Ty
= llvm::PointerType::getUnqual(ElemTy
);
4636 return CGF
.MakeAddrLValue(
4637 Address(llvm::UndefValue::get(Ty
), ElemTy
, CharUnits::One()),
4640 return CGF
.EmitLValue(Live
);
4645 struct ConditionalInfo
{
4646 llvm::BasicBlock
*lhsBlock
, *rhsBlock
;
4647 Optional
<LValue
> LHS
, RHS
;
4650 // Create and generate the 3 blocks for a conditional operator.
4651 // Leaves the 'current block' in the continuation basic block.
4652 template<typename FuncTy
>
4653 ConditionalInfo
EmitConditionalBlocks(CodeGenFunction
&CGF
,
4654 const AbstractConditionalOperator
*E
,
4655 const FuncTy
&BranchGenFunc
) {
4656 ConditionalInfo Info
{CGF
.createBasicBlock("cond.true"),
4657 CGF
.createBasicBlock("cond.false"), llvm::None
,
4659 llvm::BasicBlock
*endBlock
= CGF
.createBasicBlock("cond.end");
4661 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
4662 CGF
.EmitBranchOnBoolExpr(E
->getCond(), Info
.lhsBlock
, Info
.rhsBlock
,
4663 CGF
.getProfileCount(E
));
4665 // Any temporaries created here are conditional.
4666 CGF
.EmitBlock(Info
.lhsBlock
);
4667 CGF
.incrementProfileCounter(E
);
4669 Info
.LHS
= BranchGenFunc(CGF
, E
->getTrueExpr());
4671 Info
.lhsBlock
= CGF
.Builder
.GetInsertBlock();
4674 CGF
.Builder
.CreateBr(endBlock
);
4676 // Any temporaries created here are conditional.
4677 CGF
.EmitBlock(Info
.rhsBlock
);
4679 Info
.RHS
= BranchGenFunc(CGF
, E
->getFalseExpr());
4681 Info
.rhsBlock
= CGF
.Builder
.GetInsertBlock();
4682 CGF
.EmitBlock(endBlock
);
4688 void CodeGenFunction::EmitIgnoredConditionalOperator(
4689 const AbstractConditionalOperator
*E
) {
4690 if (!E
->isGLValue()) {
4691 // ?: here should be an aggregate.
4692 assert(hasAggregateEvaluationKind(E
->getType()) &&
4693 "Unexpected conditional operator!");
4694 return (void)EmitAggExprToLValue(E
);
4697 OpaqueValueMapping
binding(*this, E
);
4698 if (HandleConditionalOperatorLValueSimpleCase(*this, E
))
4701 EmitConditionalBlocks(*this, E
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4702 CGF
.EmitIgnoredExpr(E
);
4706 LValue
CodeGenFunction::EmitConditionalOperatorLValue(
4707 const AbstractConditionalOperator
*expr
) {
4708 if (!expr
->isGLValue()) {
4709 // ?: here should be an aggregate.
4710 assert(hasAggregateEvaluationKind(expr
->getType()) &&
4711 "Unexpected conditional operator!");
4712 return EmitAggExprToLValue(expr
);
4715 OpaqueValueMapping
binding(*this, expr
);
4716 if (llvm::Optional
<LValue
> Res
=
4717 HandleConditionalOperatorLValueSimpleCase(*this, expr
))
4720 ConditionalInfo Info
= EmitConditionalBlocks(
4721 *this, expr
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4722 return EmitLValueOrThrowExpression(CGF
, E
);
4725 if ((Info
.LHS
&& !Info
.LHS
->isSimple()) ||
4726 (Info
.RHS
&& !Info
.RHS
->isSimple()))
4727 return EmitUnsupportedLValue(expr
, "conditional operator");
4729 if (Info
.LHS
&& Info
.RHS
) {
4730 Address lhsAddr
= Info
.LHS
->getAddress(*this);
4731 Address rhsAddr
= Info
.RHS
->getAddress(*this);
4732 llvm::PHINode
*phi
= Builder
.CreatePHI(lhsAddr
.getType(), 2, "cond-lvalue");
4733 phi
->addIncoming(lhsAddr
.getPointer(), Info
.lhsBlock
);
4734 phi
->addIncoming(rhsAddr
.getPointer(), Info
.rhsBlock
);
4735 Address
result(phi
, lhsAddr
.getElementType(),
4736 std::min(lhsAddr
.getAlignment(), rhsAddr
.getAlignment()));
4737 AlignmentSource alignSource
=
4738 std::max(Info
.LHS
->getBaseInfo().getAlignmentSource(),
4739 Info
.RHS
->getBaseInfo().getAlignmentSource());
4740 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForConditionalOperator(
4741 Info
.LHS
->getTBAAInfo(), Info
.RHS
->getTBAAInfo());
4742 return MakeAddrLValue(result
, expr
->getType(), LValueBaseInfo(alignSource
),
4745 assert((Info
.LHS
|| Info
.RHS
) &&
4746 "both operands of glvalue conditional are throw-expressions?");
4747 return Info
.LHS
? *Info
.LHS
: *Info
.RHS
;
4751 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4752 /// type. If the cast is to a reference, we can have the usual lvalue result,
4753 /// otherwise if a cast is needed by the code generator in an lvalue context,
4754 /// then it must mean that we need the address of an aggregate in order to
4755 /// access one of its members. This can happen for all the reasons that casts
4756 /// are permitted with aggregate result, including noop aggregate casts, and
4757 /// cast from scalar to union.
4758 LValue
CodeGenFunction::EmitCastLValue(const CastExpr
*E
) {
4759 switch (E
->getCastKind()) {
4762 case CK_LValueToRValueBitCast
:
4763 case CK_ArrayToPointerDecay
:
4764 case CK_FunctionToPointerDecay
:
4765 case CK_NullToMemberPointer
:
4766 case CK_NullToPointer
:
4767 case CK_IntegralToPointer
:
4768 case CK_PointerToIntegral
:
4769 case CK_PointerToBoolean
:
4770 case CK_VectorSplat
:
4771 case CK_IntegralCast
:
4772 case CK_BooleanToSignedIntegral
:
4773 case CK_IntegralToBoolean
:
4774 case CK_IntegralToFloating
:
4775 case CK_FloatingToIntegral
:
4776 case CK_FloatingToBoolean
:
4777 case CK_FloatingCast
:
4778 case CK_FloatingRealToComplex
:
4779 case CK_FloatingComplexToReal
:
4780 case CK_FloatingComplexToBoolean
:
4781 case CK_FloatingComplexCast
:
4782 case CK_FloatingComplexToIntegralComplex
:
4783 case CK_IntegralRealToComplex
:
4784 case CK_IntegralComplexToReal
:
4785 case CK_IntegralComplexToBoolean
:
4786 case CK_IntegralComplexCast
:
4787 case CK_IntegralComplexToFloatingComplex
:
4788 case CK_DerivedToBaseMemberPointer
:
4789 case CK_BaseToDerivedMemberPointer
:
4790 case CK_MemberPointerToBoolean
:
4791 case CK_ReinterpretMemberPointer
:
4792 case CK_AnyPointerToBlockPointerCast
:
4793 case CK_ARCProduceObject
:
4794 case CK_ARCConsumeObject
:
4795 case CK_ARCReclaimReturnedObject
:
4796 case CK_ARCExtendBlockObject
:
4797 case CK_CopyAndAutoreleaseBlockObject
:
4798 case CK_IntToOCLSampler
:
4799 case CK_FloatingToFixedPoint
:
4800 case CK_FixedPointToFloating
:
4801 case CK_FixedPointCast
:
4802 case CK_FixedPointToBoolean
:
4803 case CK_FixedPointToIntegral
:
4804 case CK_IntegralToFixedPoint
:
4806 return EmitUnsupportedLValue(E
, "unexpected cast lvalue");
4809 llvm_unreachable("dependent cast kind in IR gen!");
4811 case CK_BuiltinFnToFnPtr
:
4812 llvm_unreachable("builtin functions are handled elsewhere");
4814 // These are never l-values; just use the aggregate emission code.
4815 case CK_NonAtomicToAtomic
:
4816 case CK_AtomicToNonAtomic
:
4817 return EmitAggExprToLValue(E
);
4820 LValue LV
= EmitLValue(E
->getSubExpr());
4821 Address V
= LV
.getAddress(*this);
4822 const auto *DCE
= cast
<CXXDynamicCastExpr
>(E
);
4823 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V
, DCE
), E
->getType());
4826 case CK_ConstructorConversion
:
4827 case CK_UserDefinedConversion
:
4828 case CK_CPointerToObjCPointerCast
:
4829 case CK_BlockPointerToObjCPointerCast
:
4830 case CK_LValueToRValue
:
4831 return EmitLValue(E
->getSubExpr());
4834 // CK_NoOp can model a qualification conversion, which can remove an array
4835 // bound and change the IR type.
4836 // FIXME: Once pointee types are removed from IR, remove this.
4837 LValue LV
= EmitLValue(E
->getSubExpr());
4838 if (LV
.isSimple()) {
4839 Address V
= LV
.getAddress(*this);
4841 llvm::Type
*T
= ConvertTypeForMem(E
->getType());
4842 if (V
.getElementType() != T
)
4843 LV
.setAddress(Builder
.CreateElementBitCast(V
, T
));
4849 case CK_UncheckedDerivedToBase
:
4850 case CK_DerivedToBase
: {
4851 const auto *DerivedClassTy
=
4852 E
->getSubExpr()->getType()->castAs
<RecordType
>();
4853 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4855 LValue LV
= EmitLValue(E
->getSubExpr());
4856 Address This
= LV
.getAddress(*this);
4858 // Perform the derived-to-base conversion
4859 Address Base
= GetAddressOfBaseClass(
4860 This
, DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4861 /*NullCheckValue=*/false, E
->getExprLoc());
4863 // TODO: Support accesses to members of base classes in TBAA. For now, we
4864 // conservatively pretend that the complete object is of the base class
4866 return MakeAddrLValue(Base
, E
->getType(), LV
.getBaseInfo(),
4867 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4870 return EmitAggExprToLValue(E
);
4871 case CK_BaseToDerived
: {
4872 const auto *DerivedClassTy
= E
->getType()->castAs
<RecordType
>();
4873 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4875 LValue LV
= EmitLValue(E
->getSubExpr());
4877 // Perform the base-to-derived conversion
4878 Address Derived
= GetAddressOfDerivedClass(
4879 LV
.getAddress(*this), DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4880 /*NullCheckValue=*/false);
4882 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
4883 // performed and the object is not of the derived type.
4884 if (sanitizePerformTypeCheck())
4885 EmitTypeCheck(TCK_DowncastReference
, E
->getExprLoc(),
4886 Derived
.getPointer(), E
->getType());
4888 if (SanOpts
.has(SanitizerKind::CFIDerivedCast
))
4889 EmitVTablePtrCheckForCast(E
->getType(), Derived
,
4890 /*MayBeNull=*/false, CFITCK_DerivedCast
,
4893 return MakeAddrLValue(Derived
, E
->getType(), LV
.getBaseInfo(),
4894 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4896 case CK_LValueBitCast
: {
4897 // This must be a reinterpret_cast (or c-style equivalent).
4898 const auto *CE
= cast
<ExplicitCastExpr
>(E
);
4900 CGM
.EmitExplicitCastExprType(CE
, this);
4901 LValue LV
= EmitLValue(E
->getSubExpr());
4902 Address V
= Builder
.CreateElementBitCast(
4903 LV
.getAddress(*this),
4904 ConvertTypeForMem(CE
->getTypeAsWritten()->getPointeeType()));
4906 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
))
4907 EmitVTablePtrCheckForCast(E
->getType(), V
,
4908 /*MayBeNull=*/false, CFITCK_UnrelatedCast
,
4911 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4912 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4914 case CK_AddressSpaceConversion
: {
4915 LValue LV
= EmitLValue(E
->getSubExpr());
4916 QualType DestTy
= getContext().getPointerType(E
->getType());
4917 llvm::Value
*V
= getTargetHooks().performAddrSpaceCast(
4918 *this, LV
.getPointer(*this),
4919 E
->getSubExpr()->getType().getAddressSpace(),
4920 E
->getType().getAddressSpace(), ConvertType(DestTy
));
4921 return MakeAddrLValue(Address(V
, ConvertTypeForMem(E
->getType()),
4922 LV
.getAddress(*this).getAlignment()),
4923 E
->getType(), LV
.getBaseInfo(), LV
.getTBAAInfo());
4925 case CK_ObjCObjectLValueCast
: {
4926 LValue LV
= EmitLValue(E
->getSubExpr());
4927 Address V
= Builder
.CreateElementBitCast(LV
.getAddress(*this),
4928 ConvertType(E
->getType()));
4929 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4930 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4932 case CK_ZeroToOCLOpaqueType
:
4933 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
4936 llvm_unreachable("Unhandled lvalue cast kind?");
4939 LValue
CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr
*e
) {
4940 assert(OpaqueValueMappingData::shouldBindAsLValue(e
));
4941 return getOrCreateOpaqueLValueMapping(e
);
4945 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
) {
4946 assert(OpaqueValueMapping::shouldBindAsLValue(e
));
4948 llvm::DenseMap
<const OpaqueValueExpr
*,LValue
>::iterator
4949 it
= OpaqueLValues
.find(e
);
4951 if (it
!= OpaqueLValues
.end())
4954 assert(e
->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
4955 return EmitLValue(e
->getSourceExpr());
4959 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
) {
4960 assert(!OpaqueValueMapping::shouldBindAsLValue(e
));
4962 llvm::DenseMap
<const OpaqueValueExpr
*,RValue
>::iterator
4963 it
= OpaqueRValues
.find(e
);
4965 if (it
!= OpaqueRValues
.end())
4968 assert(e
->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
4969 return EmitAnyExpr(e
->getSourceExpr());
4972 RValue
CodeGenFunction::EmitRValueForField(LValue LV
,
4973 const FieldDecl
*FD
,
4974 SourceLocation Loc
) {
4975 QualType FT
= FD
->getType();
4976 LValue FieldLV
= EmitLValueForField(LV
, FD
);
4977 switch (getEvaluationKind(FT
)) {
4979 return RValue::getComplex(EmitLoadOfComplex(FieldLV
, Loc
));
4981 return FieldLV
.asAggregateRValue(*this);
4983 // This routine is used to load fields one-by-one to perform a copy, so
4984 // don't load reference fields.
4985 if (FD
->getType()->isReferenceType())
4986 return RValue::get(FieldLV
.getPointer(*this));
4987 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
4989 if (FieldLV
.isBitField())
4990 return EmitLoadOfLValue(FieldLV
, Loc
);
4991 return RValue::get(EmitLoadOfScalar(FieldLV
, Loc
));
4993 llvm_unreachable("bad evaluation kind");
4996 //===--------------------------------------------------------------------===//
4997 // Expression Emission
4998 //===--------------------------------------------------------------------===//
5000 RValue
CodeGenFunction::EmitCallExpr(const CallExpr
*E
,
5001 ReturnValueSlot ReturnValue
) {
5002 // Builtins never have block type.
5003 if (E
->getCallee()->getType()->isBlockPointerType())
5004 return EmitBlockCallExpr(E
, ReturnValue
);
5006 if (const auto *CE
= dyn_cast
<CXXMemberCallExpr
>(E
))
5007 return EmitCXXMemberCallExpr(CE
, ReturnValue
);
5009 if (const auto *CE
= dyn_cast
<CUDAKernelCallExpr
>(E
))
5010 return EmitCUDAKernelCallExpr(CE
, ReturnValue
);
5012 if (const auto *CE
= dyn_cast
<CXXOperatorCallExpr
>(E
))
5013 if (const CXXMethodDecl
*MD
=
5014 dyn_cast_or_null
<CXXMethodDecl
>(CE
->getCalleeDecl()))
5015 return EmitCXXOperatorMemberCallExpr(CE
, MD
, ReturnValue
);
5017 CGCallee callee
= EmitCallee(E
->getCallee());
5019 if (callee
.isBuiltin()) {
5020 return EmitBuiltinExpr(callee
.getBuiltinDecl(), callee
.getBuiltinID(),
5024 if (callee
.isPseudoDestructor()) {
5025 return EmitCXXPseudoDestructorExpr(callee
.getPseudoDestructorExpr());
5028 return EmitCall(E
->getCallee()->getType(), callee
, E
, ReturnValue
);
5031 /// Emit a CallExpr without considering whether it might be a subclass.
5032 RValue
CodeGenFunction::EmitSimpleCallExpr(const CallExpr
*E
,
5033 ReturnValueSlot ReturnValue
) {
5034 CGCallee Callee
= EmitCallee(E
->getCallee());
5035 return EmitCall(E
->getCallee()->getType(), Callee
, E
, ReturnValue
);
5038 // Detect the unusual situation where an inline version is shadowed by a
5039 // non-inline version. In that case we should pick the external one
5040 // everywhere. That's GCC behavior too.
5041 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl
*FD
) {
5042 for (const FunctionDecl
*PD
= FD
; PD
; PD
= PD
->getPreviousDecl())
5043 if (!PD
->isInlineBuiltinDeclaration())
5048 static CGCallee
EmitDirectCallee(CodeGenFunction
&CGF
, GlobalDecl GD
) {
5049 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
5051 if (auto builtinID
= FD
->getBuiltinID()) {
5052 std::string NoBuiltinFD
= ("no-builtin-" + FD
->getName()).str();
5053 std::string NoBuiltins
= "no-builtins";
5054 std::string FDInlineName
= (FD
->getName() + ".inline").str();
5056 bool IsPredefinedLibFunction
=
5057 CGF
.getContext().BuiltinInfo
.isPredefinedLibFunction(builtinID
);
5058 bool HasAttributeNoBuiltin
=
5059 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltinFD
) ||
5060 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltins
);
5062 // When directing calling an inline builtin, call it through it's mangled
5063 // name to make it clear it's not the actual builtin.
5064 if (CGF
.CurFn
->getName() != FDInlineName
&&
5065 OnlyHasInlineBuiltinDeclaration(FD
)) {
5066 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5067 llvm::Function
*Fn
= llvm::cast
<llvm::Function
>(CalleePtr
);
5068 llvm::Module
*M
= Fn
->getParent();
5069 llvm::Function
*Clone
= M
->getFunction(FDInlineName
);
5071 Clone
= llvm::Function::Create(Fn
->getFunctionType(),
5072 llvm::GlobalValue::InternalLinkage
,
5073 Fn
->getAddressSpace(), FDInlineName
, M
);
5074 Clone
->addFnAttr(llvm::Attribute::AlwaysInline
);
5076 return CGCallee::forDirect(Clone
, GD
);
5079 // Replaceable builtins provide their own implementation of a builtin. If we
5080 // are in an inline builtin implementation, avoid trivial infinite
5081 // recursion. Honor __attribute__((no_builtin("foo"))) or
5082 // __attribute__((no_builtin)) on the current function unless foo is
5083 // not a predefined library function which means we must generate the
5084 // builtin no matter what.
5085 else if (!IsPredefinedLibFunction
|| !HasAttributeNoBuiltin
)
5086 return CGCallee::forBuiltin(builtinID
, FD
);
5089 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5090 if (CGF
.CGM
.getLangOpts().CUDA
&& !CGF
.CGM
.getLangOpts().CUDAIsDevice
&&
5091 FD
->hasAttr
<CUDAGlobalAttr
>())
5092 CalleePtr
= CGF
.CGM
.getCUDARuntime().getKernelStub(
5093 cast
<llvm::GlobalValue
>(CalleePtr
->stripPointerCasts()));
5095 return CGCallee::forDirect(CalleePtr
, GD
);
5098 CGCallee
CodeGenFunction::EmitCallee(const Expr
*E
) {
5099 E
= E
->IgnoreParens();
5101 // Look through function-to-pointer decay.
5102 if (auto ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
5103 if (ICE
->getCastKind() == CK_FunctionToPointerDecay
||
5104 ICE
->getCastKind() == CK_BuiltinFnToFnPtr
) {
5105 return EmitCallee(ICE
->getSubExpr());
5108 // Resolve direct calls.
5109 } else if (auto DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
5110 if (auto FD
= dyn_cast
<FunctionDecl
>(DRE
->getDecl())) {
5111 return EmitDirectCallee(*this, FD
);
5113 } else if (auto ME
= dyn_cast
<MemberExpr
>(E
)) {
5114 if (auto FD
= dyn_cast
<FunctionDecl
>(ME
->getMemberDecl())) {
5115 EmitIgnoredExpr(ME
->getBase());
5116 return EmitDirectCallee(*this, FD
);
5119 // Look through template substitutions.
5120 } else if (auto NTTP
= dyn_cast
<SubstNonTypeTemplateParmExpr
>(E
)) {
5121 return EmitCallee(NTTP
->getReplacement());
5123 // Treat pseudo-destructor calls differently.
5124 } else if (auto PDE
= dyn_cast
<CXXPseudoDestructorExpr
>(E
)) {
5125 return CGCallee::forPseudoDestructor(PDE
);
5128 // Otherwise, we have an indirect reference.
5129 llvm::Value
*calleePtr
;
5130 QualType functionType
;
5131 if (auto ptrType
= E
->getType()->getAs
<PointerType
>()) {
5132 calleePtr
= EmitScalarExpr(E
);
5133 functionType
= ptrType
->getPointeeType();
5135 functionType
= E
->getType();
5136 calleePtr
= EmitLValue(E
).getPointer(*this);
5138 assert(functionType
->isFunctionType());
5141 if (const auto *VD
=
5142 dyn_cast_or_null
<VarDecl
>(E
->getReferencedDeclOfCallee()))
5143 GD
= GlobalDecl(VD
);
5145 CGCalleeInfo
calleeInfo(functionType
->getAs
<FunctionProtoType
>(), GD
);
5146 CGCallee
callee(calleeInfo
, calleePtr
);
5150 LValue
CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator
*E
) {
5151 // Comma expressions just emit their LHS then their RHS as an l-value.
5152 if (E
->getOpcode() == BO_Comma
) {
5153 EmitIgnoredExpr(E
->getLHS());
5154 EnsureInsertPoint();
5155 return EmitLValue(E
->getRHS());
5158 if (E
->getOpcode() == BO_PtrMemD
||
5159 E
->getOpcode() == BO_PtrMemI
)
5160 return EmitPointerToDataMemberBinaryExpr(E
);
5162 assert(E
->getOpcode() == BO_Assign
&& "unexpected binary l-value");
5164 // Note that in all of these cases, __block variables need the RHS
5165 // evaluated first just in case the variable gets moved by the RHS.
5167 switch (getEvaluationKind(E
->getType())) {
5169 switch (E
->getLHS()->getType().getObjCLifetime()) {
5170 case Qualifiers::OCL_Strong
:
5171 return EmitARCStoreStrong(E
, /*ignored*/ false).first
;
5173 case Qualifiers::OCL_Autoreleasing
:
5174 return EmitARCStoreAutoreleasing(E
).first
;
5176 // No reason to do any of these differently.
5177 case Qualifiers::OCL_None
:
5178 case Qualifiers::OCL_ExplicitNone
:
5179 case Qualifiers::OCL_Weak
:
5183 RValue RV
= EmitAnyExpr(E
->getRHS());
5184 LValue LV
= EmitCheckedLValue(E
->getLHS(), TCK_Store
);
5186 EmitNullabilityCheck(LV
, RV
.getScalarVal(), E
->getExprLoc());
5187 EmitStoreThroughLValue(RV
, LV
);
5188 if (getLangOpts().OpenMP
)
5189 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5195 return EmitComplexAssignmentLValue(E
);
5198 return EmitAggExprToLValue(E
);
5200 llvm_unreachable("bad evaluation kind");
5203 LValue
CodeGenFunction::EmitCallExprLValue(const CallExpr
*E
) {
5204 RValue RV
= EmitCallExpr(E
);
5207 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5208 AlignmentSource::Decl
);
5210 assert(E
->getCallReturnType(getContext())->isReferenceType() &&
5211 "Can't have a scalar return unless the return type is a "
5214 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5217 LValue
CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr
*E
) {
5218 // FIXME: This shouldn't require another copy.
5219 return EmitAggExprToLValue(E
);
5222 LValue
CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr
*E
) {
5223 assert(E
->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5224 && "binding l-value to type which needs a temporary");
5225 AggValueSlot Slot
= CreateAggTemp(E
->getType());
5226 EmitCXXConstructExpr(E
, Slot
);
5227 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5231 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr
*E
) {
5232 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E
), E
->getType());
5235 Address
CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr
*E
) {
5236 return Builder
.CreateElementBitCast(CGM
.GetAddrOfMSGuidDecl(E
->getGuidDecl()),
5237 ConvertType(E
->getType()));
5240 LValue
CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr
*E
) {
5241 return MakeAddrLValue(EmitCXXUuidofExpr(E
), E
->getType(),
5242 AlignmentSource::Decl
);
5246 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
) {
5247 AggValueSlot Slot
= CreateAggTemp(E
->getType(), "temp.lvalue");
5248 Slot
.setExternallyDestructed();
5249 EmitAggExpr(E
->getSubExpr(), Slot
);
5250 EmitCXXTemporary(E
->getTemporary(), E
->getType(), Slot
.getAddress());
5251 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5254 LValue
CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
) {
5255 RValue RV
= EmitObjCMessageExpr(E
);
5258 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5259 AlignmentSource::Decl
);
5261 assert(E
->getMethodDecl()->getReturnType()->isReferenceType() &&
5262 "Can't have a scalar return unless the return type is a "
5265 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5268 LValue
CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
) {
5270 CGM
.getObjCRuntime().GetAddrOfSelector(*this, E
->getSelector());
5271 return MakeAddrLValue(V
, E
->getType(), AlignmentSource::Decl
);
5274 llvm::Value
*CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
5275 const ObjCIvarDecl
*Ivar
) {
5276 return CGM
.getObjCRuntime().EmitIvarOffset(*this, Interface
, Ivar
);
5279 LValue
CodeGenFunction::EmitLValueForIvar(QualType ObjectTy
,
5280 llvm::Value
*BaseValue
,
5281 const ObjCIvarDecl
*Ivar
,
5282 unsigned CVRQualifiers
) {
5283 return CGM
.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy
, BaseValue
,
5284 Ivar
, CVRQualifiers
);
5287 LValue
CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
) {
5288 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5289 llvm::Value
*BaseValue
= nullptr;
5290 const Expr
*BaseExpr
= E
->getBase();
5291 Qualifiers BaseQuals
;
5294 BaseValue
= EmitScalarExpr(BaseExpr
);
5295 ObjectTy
= BaseExpr
->getType()->getPointeeType();
5296 BaseQuals
= ObjectTy
.getQualifiers();
5298 LValue BaseLV
= EmitLValue(BaseExpr
);
5299 BaseValue
= BaseLV
.getPointer(*this);
5300 ObjectTy
= BaseExpr
->getType();
5301 BaseQuals
= ObjectTy
.getQualifiers();
5305 EmitLValueForIvar(ObjectTy
, BaseValue
, E
->getDecl(),
5306 BaseQuals
.getCVRQualifiers());
5307 setObjCGCLValueClass(getContext(), E
, LV
);
5311 LValue
CodeGenFunction::EmitStmtExprLValue(const StmtExpr
*E
) {
5312 // Can only get l-value for message expression returning aggregate type
5313 RValue RV
= EmitAnyExprToTemp(E
);
5314 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5315 AlignmentSource::Decl
);
5318 RValue
CodeGenFunction::EmitCall(QualType CalleeType
, const CGCallee
&OrigCallee
,
5319 const CallExpr
*E
, ReturnValueSlot ReturnValue
,
5320 llvm::Value
*Chain
) {
5321 // Get the actual function type. The callee type will always be a pointer to
5322 // function type or a block pointer type.
5323 assert(CalleeType
->isFunctionPointerType() &&
5324 "Call must have function pointer type!");
5326 const Decl
*TargetDecl
=
5327 OrigCallee
.getAbstractInfo().getCalleeDecl().getDecl();
5329 CalleeType
= getContext().getCanonicalType(CalleeType
);
5331 auto PointeeType
= cast
<PointerType
>(CalleeType
)->getPointeeType();
5333 CGCallee Callee
= OrigCallee
;
5335 if (getLangOpts().CPlusPlus
&& SanOpts
.has(SanitizerKind::Function
) &&
5336 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5337 if (llvm::Constant
*PrefixSig
=
5338 CGM
.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM
)) {
5339 SanitizerScope
SanScope(this);
5340 // Remove any (C++17) exception specifications, to allow calling e.g. a
5341 // noexcept function through a non-noexcept pointer.
5343 getContext().getFunctionTypeWithExceptionSpec(PointeeType
, EST_None
);
5344 llvm::Constant
*FTRTTIConst
=
5345 CGM
.GetAddrOfRTTIDescriptor(ProtoTy
, /*ForEH=*/true);
5346 llvm::Type
*PrefixSigType
= PrefixSig
->getType();
5347 llvm::StructType
*PrefixStructTy
= llvm::StructType::get(
5348 CGM
.getLLVMContext(), {PrefixSigType
, Int32Ty
}, /*isPacked=*/true);
5350 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5352 llvm::Value
*CalleePrefixStruct
= Builder
.CreateBitCast(
5353 CalleePtr
, llvm::PointerType::getUnqual(PrefixStructTy
));
5354 llvm::Value
*CalleeSigPtr
=
5355 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, 0, 0);
5356 llvm::Value
*CalleeSig
=
5357 Builder
.CreateAlignedLoad(PrefixSigType
, CalleeSigPtr
, getIntAlign());
5358 llvm::Value
*CalleeSigMatch
= Builder
.CreateICmpEQ(CalleeSig
, PrefixSig
);
5360 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
5361 llvm::BasicBlock
*TypeCheck
= createBasicBlock("typecheck");
5362 Builder
.CreateCondBr(CalleeSigMatch
, TypeCheck
, Cont
);
5364 EmitBlock(TypeCheck
);
5365 llvm::Value
*CalleeRTTIPtr
=
5366 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, 0, 1);
5367 llvm::Value
*CalleeRTTIEncoded
=
5368 Builder
.CreateAlignedLoad(Int32Ty
, CalleeRTTIPtr
, getPointerAlign());
5369 llvm::Value
*CalleeRTTI
=
5370 DecodeAddrUsedInPrologue(CalleePtr
, CalleeRTTIEncoded
);
5371 llvm::Value
*CalleeRTTIMatch
=
5372 Builder
.CreateICmpEQ(CalleeRTTI
, FTRTTIConst
);
5373 llvm::Constant
*StaticData
[] = {EmitCheckSourceLocation(E
->getBeginLoc()),
5374 EmitCheckTypeDescriptor(CalleeType
)};
5375 EmitCheck(std::make_pair(CalleeRTTIMatch
, SanitizerKind::Function
),
5376 SanitizerHandler::FunctionTypeMismatch
, StaticData
,
5377 {CalleePtr
, CalleeRTTI
, FTRTTIConst
});
5379 Builder
.CreateBr(Cont
);
5384 const auto *FnType
= cast
<FunctionType
>(PointeeType
);
5386 // If we are checking indirect calls and this call is indirect, check that the
5387 // function pointer is a member of the bit set for the function type.
5388 if (SanOpts
.has(SanitizerKind::CFIICall
) &&
5389 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5390 SanitizerScope
SanScope(this);
5391 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall
);
5394 if (CGM
.getCodeGenOpts().SanitizeCfiICallGeneralizePointers
)
5395 MD
= CGM
.CreateMetadataIdentifierGeneralized(QualType(FnType
, 0));
5397 MD
= CGM
.CreateMetadataIdentifierForType(QualType(FnType
, 0));
5399 llvm::Value
*TypeId
= llvm::MetadataAsValue::get(getLLVMContext(), MD
);
5401 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5402 llvm::Value
*CastedCallee
= Builder
.CreateBitCast(CalleePtr
, Int8PtrTy
);
5403 llvm::Value
*TypeTest
= Builder
.CreateCall(
5404 CGM
.getIntrinsic(llvm::Intrinsic::type_test
), {CastedCallee
, TypeId
});
5406 auto CrossDsoTypeId
= CGM
.CreateCrossDsoCfiTypeId(MD
);
5407 llvm::Constant
*StaticData
[] = {
5408 llvm::ConstantInt::get(Int8Ty
, CFITCK_ICall
),
5409 EmitCheckSourceLocation(E
->getBeginLoc()),
5410 EmitCheckTypeDescriptor(QualType(FnType
, 0)),
5412 if (CGM
.getCodeGenOpts().SanitizeCfiCrossDso
&& CrossDsoTypeId
) {
5413 EmitCfiSlowPathCheck(SanitizerKind::CFIICall
, TypeTest
, CrossDsoTypeId
,
5414 CastedCallee
, StaticData
);
5416 EmitCheck(std::make_pair(TypeTest
, SanitizerKind::CFIICall
),
5417 SanitizerHandler::CFICheckFail
, StaticData
,
5418 {CastedCallee
, llvm::UndefValue::get(IntPtrTy
)});
5424 Args
.add(RValue::get(Builder
.CreateBitCast(Chain
, CGM
.VoidPtrTy
)),
5425 CGM
.getContext().VoidPtrTy
);
5427 // C++17 requires that we evaluate arguments to a call using assignment syntax
5428 // right-to-left, and that we evaluate arguments to certain other operators
5429 // left-to-right. Note that we allow this to override the order dictated by
5430 // the calling convention on the MS ABI, which means that parameter
5431 // destruction order is not necessarily reverse construction order.
5432 // FIXME: Revisit this based on C++ committee response to unimplementability.
5433 EvaluationOrder Order
= EvaluationOrder::Default
;
5434 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(E
)) {
5435 if (OCE
->isAssignmentOp())
5436 Order
= EvaluationOrder::ForceRightToLeft
;
5438 switch (OCE
->getOperator()) {
5440 case OO_GreaterGreater
:
5445 Order
= EvaluationOrder::ForceLeftToRight
;
5453 EmitCallArgs(Args
, dyn_cast
<FunctionProtoType
>(FnType
), E
->arguments(),
5454 E
->getDirectCallee(), /*ParamsToSkip*/ 0, Order
);
5456 const CGFunctionInfo
&FnInfo
= CGM
.getTypes().arrangeFreeFunctionCall(
5457 Args
, FnType
, /*ChainCall=*/Chain
);
5460 // If the expression that denotes the called function has a type
5461 // that does not include a prototype, [the default argument
5462 // promotions are performed]. If the number of arguments does not
5463 // equal the number of parameters, the behavior is undefined. If
5464 // the function is defined with a type that includes a prototype,
5465 // and either the prototype ends with an ellipsis (, ...) or the
5466 // types of the arguments after promotion are not compatible with
5467 // the types of the parameters, the behavior is undefined. If the
5468 // function is defined with a type that does not include a
5469 // prototype, and the types of the arguments after promotion are
5470 // not compatible with those of the parameters after promotion,
5471 // the behavior is undefined [except in some trivial cases].
5472 // That is, in the general case, we should assume that a call
5473 // through an unprototyped function type works like a *non-variadic*
5474 // call. The way we make this work is to cast to the exact type
5475 // of the promoted arguments.
5477 // Chain calls use this same code path to add the invisible chain parameter
5478 // to the function type.
5479 if (isa
<FunctionNoProtoType
>(FnType
) || Chain
) {
5480 llvm::Type
*CalleeTy
= getTypes().GetFunctionType(FnInfo
);
5481 int AS
= Callee
.getFunctionPointer()->getType()->getPointerAddressSpace();
5482 CalleeTy
= CalleeTy
->getPointerTo(AS
);
5484 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5485 CalleePtr
= Builder
.CreateBitCast(CalleePtr
, CalleeTy
, "callee.knr.cast");
5486 Callee
.setFunctionPointer(CalleePtr
);
5489 // HIP function pointer contains kernel handle when it is used in triple
5490 // chevron. The kernel stub needs to be loaded from kernel handle and used
5492 if (CGM
.getLangOpts().HIP
&& !CGM
.getLangOpts().CUDAIsDevice
&&
5493 isa
<CUDAKernelCallExpr
>(E
) &&
5494 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5495 llvm::Value
*Handle
= Callee
.getFunctionPointer();
5497 Builder
.CreateBitCast(Handle
, Handle
->getType()->getPointerTo());
5498 auto *Stub
= Builder
.CreateLoad(
5499 Address(Cast
, Handle
->getType(), CGM
.getPointerAlign()));
5500 Callee
.setFunctionPointer(Stub
);
5502 llvm::CallBase
*CallOrInvoke
= nullptr;
5503 RValue Call
= EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, &CallOrInvoke
,
5504 E
== MustTailCall
, E
->getExprLoc());
5506 // Generate function declaration DISuprogram in order to be used
5507 // in debug info about call sites.
5508 if (CGDebugInfo
*DI
= getDebugInfo()) {
5509 if (auto *CalleeDecl
= dyn_cast_or_null
<FunctionDecl
>(TargetDecl
)) {
5510 FunctionArgList Args
;
5511 QualType ResTy
= BuildFunctionArgList(CalleeDecl
, Args
);
5512 DI
->EmitFuncDeclForCallSite(CallOrInvoke
,
5513 DI
->getFunctionType(CalleeDecl
, ResTy
, Args
),
5521 LValue
CodeGenFunction::
5522 EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
) {
5523 Address BaseAddr
= Address::invalid();
5524 if (E
->getOpcode() == BO_PtrMemI
) {
5525 BaseAddr
= EmitPointerWithAlignment(E
->getLHS());
5527 BaseAddr
= EmitLValue(E
->getLHS()).getAddress(*this);
5530 llvm::Value
*OffsetV
= EmitScalarExpr(E
->getRHS());
5531 const auto *MPT
= E
->getRHS()->getType()->castAs
<MemberPointerType
>();
5533 LValueBaseInfo BaseInfo
;
5534 TBAAAccessInfo TBAAInfo
;
5535 Address MemberAddr
=
5536 EmitCXXMemberDataPointerAddress(E
, BaseAddr
, OffsetV
, MPT
, &BaseInfo
,
5539 return MakeAddrLValue(MemberAddr
, MPT
->getPointeeType(), BaseInfo
, TBAAInfo
);
5542 /// Given the address of a temporary variable, produce an r-value of
5544 RValue
CodeGenFunction::convertTempToRValue(Address addr
,
5546 SourceLocation loc
) {
5547 LValue lvalue
= MakeAddrLValue(addr
, type
, AlignmentSource::Decl
);
5548 switch (getEvaluationKind(type
)) {
5550 return RValue::getComplex(EmitLoadOfComplex(lvalue
, loc
));
5552 return lvalue
.asAggregateRValue(*this);
5554 return RValue::get(EmitLoadOfScalar(lvalue
, loc
));
5556 llvm_unreachable("bad evaluation kind");
5559 void CodeGenFunction::SetFPAccuracy(llvm::Value
*Val
, float Accuracy
) {
5560 assert(Val
->getType()->isFPOrFPVectorTy());
5561 if (Accuracy
== 0.0 || !isa
<llvm::Instruction
>(Val
))
5564 llvm::MDBuilder
MDHelper(getLLVMContext());
5565 llvm::MDNode
*Node
= MDHelper
.createFPMath(Accuracy
);
5567 cast
<llvm::Instruction
>(Val
)->setMetadata(llvm::LLVMContext::MD_fpmath
, Node
);
5571 struct LValueOrRValue
{
5577 static LValueOrRValue
emitPseudoObjectExpr(CodeGenFunction
&CGF
,
5578 const PseudoObjectExpr
*E
,
5580 AggValueSlot slot
) {
5581 SmallVector
<CodeGenFunction::OpaqueValueMappingData
, 4> opaques
;
5583 // Find the result expression, if any.
5584 const Expr
*resultExpr
= E
->getResultExpr();
5585 LValueOrRValue result
;
5587 for (PseudoObjectExpr::const_semantics_iterator
5588 i
= E
->semantics_begin(), e
= E
->semantics_end(); i
!= e
; ++i
) {
5589 const Expr
*semantic
= *i
;
5591 // If this semantic expression is an opaque value, bind it
5592 // to the result of its source expression.
5593 if (const auto *ov
= dyn_cast
<OpaqueValueExpr
>(semantic
)) {
5594 // Skip unique OVEs.
5595 if (ov
->isUnique()) {
5596 assert(ov
!= resultExpr
&&
5597 "A unique OVE cannot be used as the result expression");
5601 // If this is the result expression, we may need to evaluate
5602 // directly into the slot.
5603 typedef CodeGenFunction::OpaqueValueMappingData OVMA
;
5605 if (ov
== resultExpr
&& ov
->isPRValue() && !forLValue
&&
5606 CodeGenFunction::hasAggregateEvaluationKind(ov
->getType())) {
5607 CGF
.EmitAggExpr(ov
->getSourceExpr(), slot
);
5608 LValue LV
= CGF
.MakeAddrLValue(slot
.getAddress(), ov
->getType(),
5609 AlignmentSource::Decl
);
5610 opaqueData
= OVMA::bind(CGF
, ov
, LV
);
5611 result
.RV
= slot
.asRValue();
5613 // Otherwise, emit as normal.
5615 opaqueData
= OVMA::bind(CGF
, ov
, ov
->getSourceExpr());
5617 // If this is the result, also evaluate the result now.
5618 if (ov
== resultExpr
) {
5620 result
.LV
= CGF
.EmitLValue(ov
);
5622 result
.RV
= CGF
.EmitAnyExpr(ov
, slot
);
5626 opaques
.push_back(opaqueData
);
5628 // Otherwise, if the expression is the result, evaluate it
5629 // and remember the result.
5630 } else if (semantic
== resultExpr
) {
5632 result
.LV
= CGF
.EmitLValue(semantic
);
5634 result
.RV
= CGF
.EmitAnyExpr(semantic
, slot
);
5636 // Otherwise, evaluate the expression in an ignored context.
5638 CGF
.EmitIgnoredExpr(semantic
);
5642 // Unbind all the opaques now.
5643 for (unsigned i
= 0, e
= opaques
.size(); i
!= e
; ++i
)
5644 opaques
[i
].unbind(CGF
);
5649 RValue
CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr
*E
,
5650 AggValueSlot slot
) {
5651 return emitPseudoObjectExpr(*this, E
, false, slot
).RV
;
5654 LValue
CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr
*E
) {
5655 return emitPseudoObjectExpr(*this, E
, true, AggValueSlot::ignored()).LV
;