1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/MatrixBuilder.h"
39 #include "llvm/Support/ConvertUTF.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Path.h"
42 #include "llvm/Support/SaveAndRestore.h"
43 #include "llvm/Transforms/Utils/SanitizerStats.h"
47 using namespace clang
;
48 using namespace CodeGen
;
50 //===--------------------------------------------------------------------===//
51 // Miscellaneous Helper Methods
52 //===--------------------------------------------------------------------===//
54 llvm::Value
*CodeGenFunction::EmitCastToVoidPtr(llvm::Value
*value
) {
55 unsigned addressSpace
=
56 cast
<llvm::PointerType
>(value
->getType())->getAddressSpace();
58 llvm::PointerType
*destType
= Int8PtrTy
;
60 destType
= llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace
);
62 if (value
->getType() == destType
) return value
;
63 return Builder
.CreateBitCast(value
, destType
);
66 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
68 Address
CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type
*Ty
,
71 llvm::Value
*ArraySize
) {
72 auto Alloca
= CreateTempAlloca(Ty
, Name
, ArraySize
);
73 Alloca
->setAlignment(Align
.getAsAlign());
74 return Address(Alloca
, Ty
, Align
);
77 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
78 /// block. The alloca is casted to default address space if necessary.
79 Address
CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
, CharUnits Align
,
81 llvm::Value
*ArraySize
,
82 Address
*AllocaAddr
) {
83 auto Alloca
= CreateTempAllocaWithoutCast(Ty
, Align
, Name
, ArraySize
);
86 llvm::Value
*V
= Alloca
.getPointer();
87 // Alloca always returns a pointer in alloca address space, which may
88 // be different from the type defined by the language. For example,
89 // in C++ the auto variables are in the default address space. Therefore
90 // cast alloca to the default address space when necessary.
91 if (getASTAllocaAddressSpace() != LangAS::Default
) {
92 auto DestAddrSpace
= getContext().getTargetAddressSpace(LangAS::Default
);
93 llvm::IRBuilderBase::InsertPointGuard
IPG(Builder
);
94 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
95 // otherwise alloca is inserted at the current insertion point of the
98 Builder
.SetInsertPoint(getPostAllocaInsertPoint());
99 V
= getTargetHooks().performAddrSpaceCast(
100 *this, V
, getASTAllocaAddressSpace(), LangAS::Default
,
101 Ty
->getPointerTo(DestAddrSpace
), /*non-null*/ true);
104 return Address(V
, Ty
, Align
);
107 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
108 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
109 /// insertion point of the builder.
110 llvm::AllocaInst
*CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
,
112 llvm::Value
*ArraySize
) {
114 return Builder
.CreateAlloca(Ty
, ArraySize
, Name
);
115 return new llvm::AllocaInst(Ty
, CGM
.getDataLayout().getAllocaAddrSpace(),
116 ArraySize
, Name
, AllocaInsertPt
);
119 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
120 /// default alignment of the corresponding LLVM type, which is *not*
121 /// guaranteed to be related in any way to the expected alignment of
122 /// an AST type that might have been lowered to Ty.
123 Address
CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
126 CharUnits::fromQuantity(CGM
.getDataLayout().getPrefTypeAlignment(Ty
));
127 return CreateTempAlloca(Ty
, Align
, Name
);
130 Address
CodeGenFunction::CreateIRTemp(QualType Ty
, const Twine
&Name
) {
131 CharUnits Align
= getContext().getTypeAlignInChars(Ty
);
132 return CreateTempAlloca(ConvertType(Ty
), Align
, Name
);
135 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, const Twine
&Name
,
137 // FIXME: Should we prefer the preferred type alignment here?
138 return CreateMemTemp(Ty
, getContext().getTypeAlignInChars(Ty
), Name
, Alloca
);
141 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, CharUnits Align
,
142 const Twine
&Name
, Address
*Alloca
) {
143 Address Result
= CreateTempAlloca(ConvertTypeForMem(Ty
), Align
, Name
,
144 /*ArraySize=*/nullptr, Alloca
);
146 if (Ty
->isConstantMatrixType()) {
147 auto *ArrayTy
= cast
<llvm::ArrayType
>(Result
.getElementType());
148 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
149 ArrayTy
->getNumElements());
152 Builder
.CreateBitCast(Result
.getPointer(), VectorTy
->getPointerTo()),
153 VectorTy
, Result
.getAlignment());
158 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
, CharUnits Align
,
160 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty
), Align
, Name
);
163 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
,
165 return CreateMemTempWithoutCast(Ty
, getContext().getTypeAlignInChars(Ty
),
169 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
170 /// expression and compare the result against zero, returning an Int1Ty value.
171 llvm::Value
*CodeGenFunction::EvaluateExprAsBool(const Expr
*E
) {
172 PGO
.setCurrentStmt(E
);
173 if (const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>()) {
174 llvm::Value
*MemPtr
= EmitScalarExpr(E
);
175 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr
, MPT
);
178 QualType BoolTy
= getContext().BoolTy
;
179 SourceLocation Loc
= E
->getExprLoc();
180 CGFPOptionsRAII
FPOptsRAII(*this, E
);
181 if (!E
->getType()->isAnyComplexType())
182 return EmitScalarConversion(EmitScalarExpr(E
), E
->getType(), BoolTy
, Loc
);
184 return EmitComplexToScalarConversion(EmitComplexExpr(E
), E
->getType(), BoolTy
,
188 /// EmitIgnoredExpr - Emit code to compute the specified expression,
189 /// ignoring the result.
190 void CodeGenFunction::EmitIgnoredExpr(const Expr
*E
) {
192 return (void)EmitAnyExpr(E
, AggValueSlot::ignored(), true);
194 // if this is a bitfield-resulting conditional operator, we can special case
195 // emit this. The normal 'EmitLValue' version of this is particularly
196 // difficult to codegen for, since creating a single "LValue" for two
197 // different sized arguments here is not particularly doable.
198 if (const auto *CondOp
= dyn_cast
<AbstractConditionalOperator
>(
199 E
->IgnoreParenNoopCasts(getContext()))) {
200 if (CondOp
->getObjectKind() == OK_BitField
)
201 return EmitIgnoredConditionalOperator(CondOp
);
204 // Just emit it as an l-value and drop the result.
208 /// EmitAnyExpr - Emit code to compute the specified expression which
209 /// can have any type. The result is returned as an RValue struct.
210 /// If this is an aggregate expression, AggSlot indicates where the
211 /// result should be returned.
212 RValue
CodeGenFunction::EmitAnyExpr(const Expr
*E
,
213 AggValueSlot aggSlot
,
215 switch (getEvaluationKind(E
->getType())) {
217 return RValue::get(EmitScalarExpr(E
, ignoreResult
));
219 return RValue::getComplex(EmitComplexExpr(E
, ignoreResult
, ignoreResult
));
221 if (!ignoreResult
&& aggSlot
.isIgnored())
222 aggSlot
= CreateAggTemp(E
->getType(), "agg-temp");
223 EmitAggExpr(E
, aggSlot
);
224 return aggSlot
.asRValue();
226 llvm_unreachable("bad evaluation kind");
229 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
230 /// always be accessible even if no aggregate location is provided.
231 RValue
CodeGenFunction::EmitAnyExprToTemp(const Expr
*E
) {
232 AggValueSlot AggSlot
= AggValueSlot::ignored();
234 if (hasAggregateEvaluationKind(E
->getType()))
235 AggSlot
= CreateAggTemp(E
->getType(), "agg.tmp");
236 return EmitAnyExpr(E
, AggSlot
);
239 /// EmitAnyExprToMem - Evaluate an expression into a given memory
241 void CodeGenFunction::EmitAnyExprToMem(const Expr
*E
,
245 // FIXME: This function should take an LValue as an argument.
246 switch (getEvaluationKind(E
->getType())) {
248 EmitComplexExprIntoLValue(E
, MakeAddrLValue(Location
, E
->getType()),
252 case TEK_Aggregate
: {
253 EmitAggExpr(E
, AggValueSlot::forAddr(Location
, Quals
,
254 AggValueSlot::IsDestructed_t(IsInit
),
255 AggValueSlot::DoesNotNeedGCBarriers
,
256 AggValueSlot::IsAliased_t(!IsInit
),
257 AggValueSlot::MayOverlap
));
262 RValue RV
= RValue::get(EmitScalarExpr(E
, /*Ignore*/ false));
263 LValue LV
= MakeAddrLValue(Location
, E
->getType());
264 EmitStoreThroughLValue(RV
, LV
);
268 llvm_unreachable("bad evaluation kind");
272 pushTemporaryCleanup(CodeGenFunction
&CGF
, const MaterializeTemporaryExpr
*M
,
273 const Expr
*E
, Address ReferenceTemporary
) {
274 // Objective-C++ ARC:
275 // If we are binding a reference to a temporary that has ownership, we
276 // need to perform retain/release operations on the temporary.
278 // FIXME: This should be looking at E, not M.
279 if (auto Lifetime
= M
->getType().getObjCLifetime()) {
281 case Qualifiers::OCL_None
:
282 case Qualifiers::OCL_ExplicitNone
:
283 // Carry on to normal cleanup handling.
286 case Qualifiers::OCL_Autoreleasing
:
287 // Nothing to do; cleaned up by an autorelease pool.
290 case Qualifiers::OCL_Strong
:
291 case Qualifiers::OCL_Weak
:
292 switch (StorageDuration Duration
= M
->getStorageDuration()) {
294 // Note: we intentionally do not register a cleanup to release
295 // the object on program termination.
299 // FIXME: We should probably register a cleanup in this case.
303 case SD_FullExpression
:
304 CodeGenFunction::Destroyer
*Destroy
;
305 CleanupKind CleanupKind
;
306 if (Lifetime
== Qualifiers::OCL_Strong
) {
307 const ValueDecl
*VD
= M
->getExtendingDecl();
309 VD
&& isa
<VarDecl
>(VD
) && VD
->hasAttr
<ObjCPreciseLifetimeAttr
>();
310 CleanupKind
= CGF
.getARCCleanupKind();
311 Destroy
= Precise
? &CodeGenFunction::destroyARCStrongPrecise
312 : &CodeGenFunction::destroyARCStrongImprecise
;
314 // __weak objects always get EH cleanups; otherwise, exceptions
315 // could cause really nasty crashes instead of mere leaks.
316 CleanupKind
= NormalAndEHCleanup
;
317 Destroy
= &CodeGenFunction::destroyARCWeak
;
319 if (Duration
== SD_FullExpression
)
320 CGF
.pushDestroy(CleanupKind
, ReferenceTemporary
,
321 M
->getType(), *Destroy
,
322 CleanupKind
& EHCleanup
);
324 CGF
.pushLifetimeExtendedDestroy(CleanupKind
, ReferenceTemporary
,
326 *Destroy
, CleanupKind
& EHCleanup
);
330 llvm_unreachable("temporary cannot have dynamic storage duration");
332 llvm_unreachable("unknown storage duration");
336 CXXDestructorDecl
*ReferenceTemporaryDtor
= nullptr;
337 if (const RecordType
*RT
=
338 E
->getType()->getBaseElementTypeUnsafe()->getAs
<RecordType
>()) {
339 // Get the destructor for the reference temporary.
340 auto *ClassDecl
= cast
<CXXRecordDecl
>(RT
->getDecl());
341 if (!ClassDecl
->hasTrivialDestructor())
342 ReferenceTemporaryDtor
= ClassDecl
->getDestructor();
345 if (!ReferenceTemporaryDtor
)
348 // Call the destructor for the temporary.
349 switch (M
->getStorageDuration()) {
352 llvm::FunctionCallee CleanupFn
;
353 llvm::Constant
*CleanupArg
;
354 if (E
->getType()->isArrayType()) {
355 CleanupFn
= CodeGenFunction(CGF
.CGM
).generateDestroyHelper(
356 ReferenceTemporary
, E
->getType(),
357 CodeGenFunction::destroyCXXObject
, CGF
.getLangOpts().Exceptions
,
358 dyn_cast_or_null
<VarDecl
>(M
->getExtendingDecl()));
359 CleanupArg
= llvm::Constant::getNullValue(CGF
.Int8PtrTy
);
361 CleanupFn
= CGF
.CGM
.getAddrAndTypeOfCXXStructor(
362 GlobalDecl(ReferenceTemporaryDtor
, Dtor_Complete
));
363 CleanupArg
= cast
<llvm::Constant
>(ReferenceTemporary
.getPointer());
365 CGF
.CGM
.getCXXABI().registerGlobalDtor(
366 CGF
, *cast
<VarDecl
>(M
->getExtendingDecl()), CleanupFn
, CleanupArg
);
370 case SD_FullExpression
:
371 CGF
.pushDestroy(NormalAndEHCleanup
, ReferenceTemporary
, E
->getType(),
372 CodeGenFunction::destroyCXXObject
,
373 CGF
.getLangOpts().Exceptions
);
377 CGF
.pushLifetimeExtendedDestroy(NormalAndEHCleanup
,
378 ReferenceTemporary
, E
->getType(),
379 CodeGenFunction::destroyCXXObject
,
380 CGF
.getLangOpts().Exceptions
);
384 llvm_unreachable("temporary cannot have dynamic storage duration");
388 static Address
createReferenceTemporary(CodeGenFunction
&CGF
,
389 const MaterializeTemporaryExpr
*M
,
391 Address
*Alloca
= nullptr) {
392 auto &TCG
= CGF
.getTargetHooks();
393 switch (M
->getStorageDuration()) {
394 case SD_FullExpression
:
396 // If we have a constant temporary array or record try to promote it into a
397 // constant global under the same rules a normal constant would've been
398 // promoted. This is easier on the optimizer and generally emits fewer
400 QualType Ty
= Inner
->getType();
401 if (CGF
.CGM
.getCodeGenOpts().MergeAllConstants
&&
402 (Ty
->isArrayType() || Ty
->isRecordType()) &&
403 CGF
.CGM
.isTypeConstant(Ty
, true))
404 if (auto Init
= ConstantEmitter(CGF
).tryEmitAbstract(Inner
, Ty
)) {
405 auto AS
= CGF
.CGM
.GetGlobalConstantAddressSpace();
406 auto *GV
= new llvm::GlobalVariable(
407 CGF
.CGM
.getModule(), Init
->getType(), /*isConstant=*/true,
408 llvm::GlobalValue::PrivateLinkage
, Init
, ".ref.tmp", nullptr,
409 llvm::GlobalValue::NotThreadLocal
,
410 CGF
.getContext().getTargetAddressSpace(AS
));
411 CharUnits alignment
= CGF
.getContext().getTypeAlignInChars(Ty
);
412 GV
->setAlignment(alignment
.getAsAlign());
413 llvm::Constant
*C
= GV
;
414 if (AS
!= LangAS::Default
)
415 C
= TCG
.performAddrSpaceCast(
416 CGF
.CGM
, GV
, AS
, LangAS::Default
,
417 GV
->getValueType()->getPointerTo(
418 CGF
.getContext().getTargetAddressSpace(LangAS::Default
)));
419 // FIXME: Should we put the new global into a COMDAT?
420 return Address(C
, GV
->getValueType(), alignment
);
422 return CGF
.CreateMemTemp(Ty
, "ref.tmp", Alloca
);
426 return CGF
.CGM
.GetAddrOfGlobalTemporary(M
, Inner
);
429 llvm_unreachable("temporary can't have dynamic storage duration");
431 llvm_unreachable("unknown storage duration");
434 /// Helper method to check if the underlying ABI is AAPCS
435 static bool isAAPCS(const TargetInfo
&TargetInfo
) {
436 return TargetInfo
.getABI().startswith("aapcs");
439 LValue
CodeGenFunction::
440 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*M
) {
441 const Expr
*E
= M
->getSubExpr();
443 assert((!M
->getExtendingDecl() || !isa
<VarDecl
>(M
->getExtendingDecl()) ||
444 !cast
<VarDecl
>(M
->getExtendingDecl())->isARCPseudoStrong()) &&
445 "Reference should never be pseudo-strong!");
447 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
448 // as that will cause the lifetime adjustment to be lost for ARC
449 auto ownership
= M
->getType().getObjCLifetime();
450 if (ownership
!= Qualifiers::OCL_None
&&
451 ownership
!= Qualifiers::OCL_ExplicitNone
) {
452 Address Object
= createReferenceTemporary(*this, M
, E
);
453 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(Object
.getPointer())) {
454 llvm::Type
*Ty
= ConvertTypeForMem(E
->getType());
455 Object
= Address(llvm::ConstantExpr::getBitCast(
456 Var
, Ty
->getPointerTo(Object
.getAddressSpace())),
457 Ty
, Object
.getAlignment());
459 // createReferenceTemporary will promote the temporary to a global with a
460 // constant initializer if it can. It can only do this to a value of
461 // ARC-manageable type if the value is global and therefore "immune" to
462 // ref-counting operations. Therefore we have no need to emit either a
463 // dynamic initialization or a cleanup and we can just return the address
465 if (Var
->hasInitializer())
466 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
468 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
470 LValue RefTempDst
= MakeAddrLValue(Object
, M
->getType(),
471 AlignmentSource::Decl
);
473 switch (getEvaluationKind(E
->getType())) {
474 default: llvm_unreachable("expected scalar or aggregate expression");
476 EmitScalarInit(E
, M
->getExtendingDecl(), RefTempDst
, false);
478 case TEK_Aggregate
: {
479 EmitAggExpr(E
, AggValueSlot::forAddr(Object
,
480 E
->getType().getQualifiers(),
481 AggValueSlot::IsDestructed
,
482 AggValueSlot::DoesNotNeedGCBarriers
,
483 AggValueSlot::IsNotAliased
,
484 AggValueSlot::DoesNotOverlap
));
489 pushTemporaryCleanup(*this, M
, E
, Object
);
493 SmallVector
<const Expr
*, 2> CommaLHSs
;
494 SmallVector
<SubobjectAdjustment
, 2> Adjustments
;
495 E
= E
->skipRValueSubobjectAdjustments(CommaLHSs
, Adjustments
);
497 for (const auto &Ignored
: CommaLHSs
)
498 EmitIgnoredExpr(Ignored
);
500 if (const auto *opaque
= dyn_cast
<OpaqueValueExpr
>(E
)) {
501 if (opaque
->getType()->isRecordType()) {
502 assert(Adjustments
.empty());
503 return EmitOpaqueValueLValue(opaque
);
507 // Create and initialize the reference temporary.
508 Address Alloca
= Address::invalid();
509 Address Object
= createReferenceTemporary(*this, M
, E
, &Alloca
);
510 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(
511 Object
.getPointer()->stripPointerCasts())) {
512 llvm::Type
*TemporaryType
= ConvertTypeForMem(E
->getType());
513 Object
= Address(llvm::ConstantExpr::getBitCast(
514 cast
<llvm::Constant
>(Object
.getPointer()),
515 TemporaryType
->getPointerTo()),
517 Object
.getAlignment());
518 // If the temporary is a global and has a constant initializer or is a
519 // constant temporary that we promoted to a global, we may have already
521 if (!Var
->hasInitializer()) {
522 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
523 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
526 switch (M
->getStorageDuration()) {
528 if (auto *Size
= EmitLifetimeStart(
529 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
530 Alloca
.getPointer())) {
531 pushCleanupAfterFullExpr
<CallLifetimeEnd
>(NormalEHLifetimeMarker
,
536 case SD_FullExpression
: {
537 if (!ShouldEmitLifetimeMarkers
)
540 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
541 // marker. Instead, start the lifetime of a conditional temporary earlier
542 // so that it's unconditional. Don't do this with sanitizers which need
543 // more precise lifetime marks.
544 ConditionalEvaluation
*OldConditional
= nullptr;
545 CGBuilderTy::InsertPoint OldIP
;
546 if (isInConditionalBranch() && !E
->getType().isDestructedType() &&
547 !SanOpts
.has(SanitizerKind::HWAddress
) &&
548 !SanOpts
.has(SanitizerKind::Memory
) &&
549 !CGM
.getCodeGenOpts().SanitizeAddressUseAfterScope
) {
550 OldConditional
= OutermostConditional
;
551 OutermostConditional
= nullptr;
553 OldIP
= Builder
.saveIP();
554 llvm::BasicBlock
*Block
= OldConditional
->getStartingBlock();
555 Builder
.restoreIP(CGBuilderTy::InsertPoint(
556 Block
, llvm::BasicBlock::iterator(Block
->back())));
559 if (auto *Size
= EmitLifetimeStart(
560 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
561 Alloca
.getPointer())) {
562 pushFullExprCleanup
<CallLifetimeEnd
>(NormalEHLifetimeMarker
, Alloca
,
566 if (OldConditional
) {
567 OutermostConditional
= OldConditional
;
568 Builder
.restoreIP(OldIP
);
576 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
578 pushTemporaryCleanup(*this, M
, E
, Object
);
580 // Perform derived-to-base casts and/or field accesses, to get from the
581 // temporary object we created (and, potentially, for which we extended
582 // the lifetime) to the subobject we're binding the reference to.
583 for (SubobjectAdjustment
&Adjustment
: llvm::reverse(Adjustments
)) {
584 switch (Adjustment
.Kind
) {
585 case SubobjectAdjustment::DerivedToBaseAdjustment
:
587 GetAddressOfBaseClass(Object
, Adjustment
.DerivedToBase
.DerivedClass
,
588 Adjustment
.DerivedToBase
.BasePath
->path_begin(),
589 Adjustment
.DerivedToBase
.BasePath
->path_end(),
590 /*NullCheckValue=*/ false, E
->getExprLoc());
593 case SubobjectAdjustment::FieldAdjustment
: {
594 LValue LV
= MakeAddrLValue(Object
, E
->getType(), AlignmentSource::Decl
);
595 LV
= EmitLValueForField(LV
, Adjustment
.Field
);
596 assert(LV
.isSimple() &&
597 "materialized temporary field is not a simple lvalue");
598 Object
= LV
.getAddress(*this);
602 case SubobjectAdjustment::MemberPointerAdjustment
: {
603 llvm::Value
*Ptr
= EmitScalarExpr(Adjustment
.Ptr
.RHS
);
604 Object
= EmitCXXMemberDataPointerAddress(E
, Object
, Ptr
,
611 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
615 CodeGenFunction::EmitReferenceBindingToExpr(const Expr
*E
) {
616 // Emit the expression as an lvalue.
617 LValue LV
= EmitLValue(E
);
618 assert(LV
.isSimple());
619 llvm::Value
*Value
= LV
.getPointer(*this);
621 if (sanitizePerformTypeCheck() && !E
->getType()->isFunctionType()) {
622 // C++11 [dcl.ref]p5 (as amended by core issue 453):
623 // If a glvalue to which a reference is directly bound designates neither
624 // an existing object or function of an appropriate type nor a region of
625 // storage of suitable size and alignment to contain an object of the
626 // reference's type, the behavior is undefined.
627 QualType Ty
= E
->getType();
628 EmitTypeCheck(TCK_ReferenceBinding
, E
->getExprLoc(), Value
, Ty
);
631 return RValue::get(Value
);
635 /// getAccessedFieldNo - Given an encoded value and a result number, return the
636 /// input field number being accessed.
637 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx
,
638 const llvm::Constant
*Elts
) {
639 return cast
<llvm::ConstantInt
>(Elts
->getAggregateElement(Idx
))
643 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
644 static llvm::Value
*emitHash16Bytes(CGBuilderTy
&Builder
, llvm::Value
*Low
,
646 llvm::Value
*KMul
= Builder
.getInt64(0x9ddfea08eb382d69ULL
);
647 llvm::Value
*K47
= Builder
.getInt64(47);
648 llvm::Value
*A0
= Builder
.CreateMul(Builder
.CreateXor(Low
, High
), KMul
);
649 llvm::Value
*A1
= Builder
.CreateXor(Builder
.CreateLShr(A0
, K47
), A0
);
650 llvm::Value
*B0
= Builder
.CreateMul(Builder
.CreateXor(High
, A1
), KMul
);
651 llvm::Value
*B1
= Builder
.CreateXor(Builder
.CreateLShr(B0
, K47
), B0
);
652 return Builder
.CreateMul(B1
, KMul
);
655 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK
) {
656 return TCK
== TCK_DowncastPointer
|| TCK
== TCK_Upcast
||
657 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
;
660 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
) {
661 CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
662 return (RD
&& RD
->hasDefinition() && RD
->isDynamicClass()) &&
663 (TCK
== TCK_MemberAccess
|| TCK
== TCK_MemberCall
||
664 TCK
== TCK_DowncastPointer
|| TCK
== TCK_DowncastReference
||
665 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
);
668 bool CodeGenFunction::sanitizePerformTypeCheck() const {
669 return SanOpts
.has(SanitizerKind::Null
) ||
670 SanOpts
.has(SanitizerKind::Alignment
) ||
671 SanOpts
.has(SanitizerKind::ObjectSize
) ||
672 SanOpts
.has(SanitizerKind::Vptr
);
675 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
,
676 llvm::Value
*Ptr
, QualType Ty
,
678 SanitizerSet SkippedChecks
,
679 llvm::Value
*ArraySize
) {
680 if (!sanitizePerformTypeCheck())
683 // Don't check pointers outside the default address space. The null check
684 // isn't correct, the object-size check isn't supported by LLVM, and we can't
685 // communicate the addresses to the runtime handler for the vptr check.
686 if (Ptr
->getType()->getPointerAddressSpace())
689 // Don't check pointers to volatile data. The behavior here is implementation-
691 if (Ty
.isVolatileQualified())
694 SanitizerScope
SanScope(this);
696 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 3> Checks
;
697 llvm::BasicBlock
*Done
= nullptr;
699 // Quickly determine whether we have a pointer to an alloca. It's possible
700 // to skip null checks, and some alignment checks, for these pointers. This
701 // can reduce compile-time significantly.
702 auto PtrToAlloca
= dyn_cast
<llvm::AllocaInst
>(Ptr
->stripPointerCasts());
704 llvm::Value
*True
= llvm::ConstantInt::getTrue(getLLVMContext());
705 llvm::Value
*IsNonNull
= nullptr;
706 bool IsGuaranteedNonNull
=
707 SkippedChecks
.has(SanitizerKind::Null
) || PtrToAlloca
;
708 bool AllowNullPointers
= isNullPointerAllowed(TCK
);
709 if ((SanOpts
.has(SanitizerKind::Null
) || AllowNullPointers
) &&
710 !IsGuaranteedNonNull
) {
711 // The glvalue must not be an empty glvalue.
712 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
714 // The IR builder can constant-fold the null check if the pointer points to
716 IsGuaranteedNonNull
= IsNonNull
== True
;
718 // Skip the null check if the pointer is known to be non-null.
719 if (!IsGuaranteedNonNull
) {
720 if (AllowNullPointers
) {
721 // When performing pointer casts, it's OK if the value is null.
722 // Skip the remaining checks in that case.
723 Done
= createBasicBlock("null");
724 llvm::BasicBlock
*Rest
= createBasicBlock("not.null");
725 Builder
.CreateCondBr(IsNonNull
, Rest
, Done
);
728 Checks
.push_back(std::make_pair(IsNonNull
, SanitizerKind::Null
));
733 if (SanOpts
.has(SanitizerKind::ObjectSize
) &&
734 !SkippedChecks
.has(SanitizerKind::ObjectSize
) &&
735 !Ty
->isIncompleteType()) {
736 uint64_t TySize
= CGM
.getMinimumObjectSize(Ty
).getQuantity();
737 llvm::Value
*Size
= llvm::ConstantInt::get(IntPtrTy
, TySize
);
739 Size
= Builder
.CreateMul(Size
, ArraySize
);
741 // Degenerate case: new X[0] does not need an objectsize check.
742 llvm::Constant
*ConstantSize
= dyn_cast
<llvm::Constant
>(Size
);
743 if (!ConstantSize
|| !ConstantSize
->isNullValue()) {
744 // The glvalue must refer to a large enough storage region.
745 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
747 // FIXME: Get object address space
748 llvm::Type
*Tys
[2] = { IntPtrTy
, Int8PtrTy
};
749 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::objectsize
, Tys
);
750 llvm::Value
*Min
= Builder
.getFalse();
751 llvm::Value
*NullIsUnknown
= Builder
.getFalse();
752 llvm::Value
*Dynamic
= Builder
.getFalse();
753 llvm::Value
*CastAddr
= Builder
.CreateBitCast(Ptr
, Int8PtrTy
);
754 llvm::Value
*LargeEnough
= Builder
.CreateICmpUGE(
755 Builder
.CreateCall(F
, {CastAddr
, Min
, NullIsUnknown
, Dynamic
}), Size
);
756 Checks
.push_back(std::make_pair(LargeEnough
, SanitizerKind::ObjectSize
));
760 llvm::MaybeAlign AlignVal
;
761 llvm::Value
*PtrAsInt
= nullptr;
763 if (SanOpts
.has(SanitizerKind::Alignment
) &&
764 !SkippedChecks
.has(SanitizerKind::Alignment
)) {
765 AlignVal
= Alignment
.getAsMaybeAlign();
766 if (!Ty
->isIncompleteType() && !AlignVal
)
767 AlignVal
= CGM
.getNaturalTypeAlignment(Ty
, nullptr, nullptr,
768 /*ForPointeeType=*/true)
771 // The glvalue must be suitably aligned.
772 if (AlignVal
&& *AlignVal
> llvm::Align(1) &&
773 (!PtrToAlloca
|| PtrToAlloca
->getAlign() < *AlignVal
)) {
774 PtrAsInt
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
775 llvm::Value
*Align
= Builder
.CreateAnd(
776 PtrAsInt
, llvm::ConstantInt::get(IntPtrTy
, AlignVal
->value() - 1));
777 llvm::Value
*Aligned
=
778 Builder
.CreateICmpEQ(Align
, llvm::ConstantInt::get(IntPtrTy
, 0));
780 Checks
.push_back(std::make_pair(Aligned
, SanitizerKind::Alignment
));
784 if (Checks
.size() > 0) {
785 llvm::Constant
*StaticData
[] = {
786 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(Ty
),
787 llvm::ConstantInt::get(Int8Ty
, AlignVal
? llvm::Log2(*AlignVal
) : 1),
788 llvm::ConstantInt::get(Int8Ty
, TCK
)};
789 EmitCheck(Checks
, SanitizerHandler::TypeMismatch
, StaticData
,
790 PtrAsInt
? PtrAsInt
: Ptr
);
793 // If possible, check that the vptr indicates that there is a subobject of
794 // type Ty at offset zero within this object.
796 // C++11 [basic.life]p5,6:
797 // [For storage which does not refer to an object within its lifetime]
798 // The program has undefined behavior if:
799 // -- the [pointer or glvalue] is used to access a non-static data member
800 // or call a non-static member function
801 if (SanOpts
.has(SanitizerKind::Vptr
) &&
802 !SkippedChecks
.has(SanitizerKind::Vptr
) && isVptrCheckRequired(TCK
, Ty
)) {
803 // Ensure that the pointer is non-null before loading it. If there is no
804 // compile-time guarantee, reuse the run-time null check or emit a new one.
805 if (!IsGuaranteedNonNull
) {
807 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
809 Done
= createBasicBlock("vptr.null");
810 llvm::BasicBlock
*VptrNotNull
= createBasicBlock("vptr.not.null");
811 Builder
.CreateCondBr(IsNonNull
, VptrNotNull
, Done
);
812 EmitBlock(VptrNotNull
);
815 // Compute a hash of the mangled name of the type.
817 // FIXME: This is not guaranteed to be deterministic! Move to a
818 // fingerprinting mechanism once LLVM provides one. For the time
819 // being the implementation happens to be deterministic.
820 SmallString
<64> MangledName
;
821 llvm::raw_svector_ostream
Out(MangledName
);
822 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
.getUnqualifiedType(),
825 // Contained in NoSanitizeList based on the mangled type.
826 if (!CGM
.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr
,
828 llvm::hash_code TypeHash
= hash_value(Out
.str());
830 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
831 llvm::Value
*Low
= llvm::ConstantInt::get(Int64Ty
, TypeHash
);
832 llvm::Type
*VPtrTy
= llvm::PointerType::get(IntPtrTy
, 0);
833 Address
VPtrAddr(Builder
.CreateBitCast(Ptr
, VPtrTy
), IntPtrTy
,
835 llvm::Value
*VPtrVal
= Builder
.CreateLoad(VPtrAddr
);
836 llvm::Value
*High
= Builder
.CreateZExt(VPtrVal
, Int64Ty
);
838 llvm::Value
*Hash
= emitHash16Bytes(Builder
, Low
, High
);
839 Hash
= Builder
.CreateTrunc(Hash
, IntPtrTy
);
841 // Look the hash up in our cache.
842 const int CacheSize
= 128;
843 llvm::Type
*HashTable
= llvm::ArrayType::get(IntPtrTy
, CacheSize
);
844 llvm::Value
*Cache
= CGM
.CreateRuntimeVariable(HashTable
,
845 "__ubsan_vptr_type_cache");
846 llvm::Value
*Slot
= Builder
.CreateAnd(Hash
,
847 llvm::ConstantInt::get(IntPtrTy
,
849 llvm::Value
*Indices
[] = { Builder
.getInt32(0), Slot
};
850 llvm::Value
*CacheVal
= Builder
.CreateAlignedLoad(
851 IntPtrTy
, Builder
.CreateInBoundsGEP(HashTable
, Cache
, Indices
),
854 // If the hash isn't in the cache, call a runtime handler to perform the
855 // hard work of checking whether the vptr is for an object of the right
856 // type. This will either fill in the cache and return, or produce a
858 llvm::Value
*EqualHash
= Builder
.CreateICmpEQ(CacheVal
, Hash
);
859 llvm::Constant
*StaticData
[] = {
860 EmitCheckSourceLocation(Loc
),
861 EmitCheckTypeDescriptor(Ty
),
862 CGM
.GetAddrOfRTTIDescriptor(Ty
.getUnqualifiedType()),
863 llvm::ConstantInt::get(Int8Ty
, TCK
)
865 llvm::Value
*DynamicData
[] = { Ptr
, Hash
};
866 EmitCheck(std::make_pair(EqualHash
, SanitizerKind::Vptr
),
867 SanitizerHandler::DynamicTypeCacheMiss
, StaticData
,
873 Builder
.CreateBr(Done
);
878 llvm::Value
*CodeGenFunction::LoadPassedObjectSize(const Expr
*E
,
880 ASTContext
&C
= getContext();
881 uint64_t EltSize
= C
.getTypeSizeInChars(EltTy
).getQuantity();
885 auto *ArrayDeclRef
= dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts());
889 auto *ParamDecl
= dyn_cast
<ParmVarDecl
>(ArrayDeclRef
->getDecl());
893 auto *POSAttr
= ParamDecl
->getAttr
<PassObjectSizeAttr
>();
897 // Don't load the size if it's a lower bound.
898 int POSType
= POSAttr
->getType();
899 if (POSType
!= 0 && POSType
!= 1)
902 // Find the implicit size parameter.
903 auto PassedSizeIt
= SizeArguments
.find(ParamDecl
);
904 if (PassedSizeIt
== SizeArguments
.end())
907 const ImplicitParamDecl
*PassedSizeDecl
= PassedSizeIt
->second
;
908 assert(LocalDeclMap
.count(PassedSizeDecl
) && "Passed size not loadable");
909 Address AddrOfSize
= LocalDeclMap
.find(PassedSizeDecl
)->second
;
910 llvm::Value
*SizeInBytes
= EmitLoadOfScalar(AddrOfSize
, /*Volatile=*/false,
911 C
.getSizeType(), E
->getExprLoc());
912 llvm::Value
*SizeOfElement
=
913 llvm::ConstantInt::get(SizeInBytes
->getType(), EltSize
);
914 return Builder
.CreateUDiv(SizeInBytes
, SizeOfElement
);
917 /// If Base is known to point to the start of an array, return the length of
918 /// that array. Return 0 if the length cannot be determined.
919 static llvm::Value
*getArrayIndexingBound(CodeGenFunction
&CGF
,
921 QualType
&IndexedType
,
922 LangOptions::StrictFlexArraysLevelKind
923 StrictFlexArraysLevel
) {
924 // For the vector indexing extension, the bound is the number of elements.
925 if (const VectorType
*VT
= Base
->getType()->getAs
<VectorType
>()) {
926 IndexedType
= Base
->getType();
927 return CGF
.Builder
.getInt32(VT
->getNumElements());
930 Base
= Base
->IgnoreParens();
932 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
933 if (CE
->getCastKind() == CK_ArrayToPointerDecay
&&
934 !CE
->getSubExpr()->isFlexibleArrayMemberLike(CGF
.getContext(),
935 StrictFlexArraysLevel
)) {
936 IndexedType
= CE
->getSubExpr()->getType();
937 const ArrayType
*AT
= IndexedType
->castAsArrayTypeUnsafe();
938 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
))
939 return CGF
.Builder
.getInt(CAT
->getSize());
940 else if (const auto *VAT
= dyn_cast
<VariableArrayType
>(AT
))
941 return CGF
.getVLASize(VAT
).NumElts
;
942 // Ignore pass_object_size here. It's not applicable on decayed pointers.
946 QualType EltTy
{Base
->getType()->getPointeeOrArrayElementType(), 0};
947 if (llvm::Value
*POS
= CGF
.LoadPassedObjectSize(Base
, EltTy
)) {
948 IndexedType
= Base
->getType();
955 void CodeGenFunction::EmitBoundsCheck(const Expr
*E
, const Expr
*Base
,
956 llvm::Value
*Index
, QualType IndexType
,
958 assert(SanOpts
.has(SanitizerKind::ArrayBounds
) &&
959 "should not be called unless adding bounds checks");
960 SanitizerScope
SanScope(this);
962 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
=
963 getLangOpts().getStrictFlexArraysLevel();
965 QualType IndexedType
;
967 getArrayIndexingBound(*this, Base
, IndexedType
, StrictFlexArraysLevel
);
971 bool IndexSigned
= IndexType
->isSignedIntegerOrEnumerationType();
972 llvm::Value
*IndexVal
= Builder
.CreateIntCast(Index
, SizeTy
, IndexSigned
);
973 llvm::Value
*BoundVal
= Builder
.CreateIntCast(Bound
, SizeTy
, false);
975 llvm::Constant
*StaticData
[] = {
976 EmitCheckSourceLocation(E
->getExprLoc()),
977 EmitCheckTypeDescriptor(IndexedType
),
978 EmitCheckTypeDescriptor(IndexType
)
980 llvm::Value
*Check
= Accessed
? Builder
.CreateICmpULT(IndexVal
, BoundVal
)
981 : Builder
.CreateICmpULE(IndexVal
, BoundVal
);
982 EmitCheck(std::make_pair(Check
, SanitizerKind::ArrayBounds
),
983 SanitizerHandler::OutOfBounds
, StaticData
, Index
);
987 CodeGenFunction::ComplexPairTy
CodeGenFunction::
988 EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
989 bool isInc
, bool isPre
) {
990 ComplexPairTy InVal
= EmitLoadOfComplex(LV
, E
->getExprLoc());
992 llvm::Value
*NextVal
;
993 if (isa
<llvm::IntegerType
>(InVal
.first
->getType())) {
994 uint64_t AmountVal
= isInc
? 1 : -1;
995 NextVal
= llvm::ConstantInt::get(InVal
.first
->getType(), AmountVal
, true);
997 // Add the inc/dec to the real part.
998 NextVal
= Builder
.CreateAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1000 QualType ElemTy
= E
->getType()->castAs
<ComplexType
>()->getElementType();
1001 llvm::APFloat
FVal(getContext().getFloatTypeSemantics(ElemTy
), 1);
1004 NextVal
= llvm::ConstantFP::get(getLLVMContext(), FVal
);
1006 // Add the inc/dec to the real part.
1007 NextVal
= Builder
.CreateFAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1010 ComplexPairTy
IncVal(NextVal
, InVal
.second
);
1012 // Store the updated result through the lvalue.
1013 EmitStoreOfComplex(IncVal
, LV
, /*init*/ false);
1014 if (getLangOpts().OpenMP
)
1015 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1018 // If this is a postinc, return the value read from memory, otherwise use the
1020 return isPre
? IncVal
: InVal
;
1023 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr
*E
,
1024 CodeGenFunction
*CGF
) {
1025 // Bind VLAs in the cast type.
1026 if (CGF
&& E
->getType()->isVariablyModifiedType())
1027 CGF
->EmitVariablyModifiedType(E
->getType());
1029 if (CGDebugInfo
*DI
= getModuleDebugInfo())
1030 DI
->EmitExplicitCastType(E
->getType());
1033 //===----------------------------------------------------------------------===//
1034 // LValue Expression Emission
1035 //===----------------------------------------------------------------------===//
1037 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1038 /// derive a more accurate bound on the alignment of the pointer.
1039 Address
CodeGenFunction::EmitPointerWithAlignment(const Expr
*E
,
1040 LValueBaseInfo
*BaseInfo
,
1041 TBAAAccessInfo
*TBAAInfo
) {
1042 // We allow this with ObjC object pointers because of fragile ABIs.
1043 assert(E
->getType()->isPointerType() ||
1044 E
->getType()->isObjCObjectPointerType());
1045 E
= E
->IgnoreParens();
1048 if (const CastExpr
*CE
= dyn_cast
<CastExpr
>(E
)) {
1049 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(CE
))
1050 CGM
.EmitExplicitCastExprType(ECE
, this);
1052 switch (CE
->getCastKind()) {
1053 // Non-converting casts (but not C's implicit conversion from void*).
1056 case CK_AddressSpaceConversion
:
1057 if (auto PtrTy
= CE
->getSubExpr()->getType()->getAs
<PointerType
>()) {
1058 if (PtrTy
->getPointeeType()->isVoidType())
1061 LValueBaseInfo InnerBaseInfo
;
1062 TBAAAccessInfo InnerTBAAInfo
;
1063 Address Addr
= EmitPointerWithAlignment(CE
->getSubExpr(),
1066 if (BaseInfo
) *BaseInfo
= InnerBaseInfo
;
1067 if (TBAAInfo
) *TBAAInfo
= InnerTBAAInfo
;
1069 if (isa
<ExplicitCastExpr
>(CE
)) {
1070 LValueBaseInfo TargetTypeBaseInfo
;
1071 TBAAAccessInfo TargetTypeTBAAInfo
;
1072 CharUnits Align
= CGM
.getNaturalPointeeTypeAlignment(
1073 E
->getType(), &TargetTypeBaseInfo
, &TargetTypeTBAAInfo
);
1075 *TBAAInfo
= CGM
.mergeTBAAInfoForCast(*TBAAInfo
,
1076 TargetTypeTBAAInfo
);
1077 // If the source l-value is opaque, honor the alignment of the
1079 if (InnerBaseInfo
.getAlignmentSource() != AlignmentSource::Decl
) {
1081 BaseInfo
->mergeForCast(TargetTypeBaseInfo
);
1082 Addr
= Address(Addr
.getPointer(), Addr
.getElementType(), Align
);
1086 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
) &&
1087 CE
->getCastKind() == CK_BitCast
) {
1088 if (auto PT
= E
->getType()->getAs
<PointerType
>())
1089 EmitVTablePtrCheckForCast(PT
->getPointeeType(), Addr
,
1091 CodeGenFunction::CFITCK_UnrelatedCast
,
1095 llvm::Type
*ElemTy
= ConvertTypeForMem(E
->getType()->getPointeeType());
1096 Addr
= Builder
.CreateElementBitCast(Addr
, ElemTy
);
1097 if (CE
->getCastKind() == CK_AddressSpaceConversion
)
1098 Addr
= Builder
.CreateAddrSpaceCast(Addr
, ConvertType(E
->getType()));
1103 // Array-to-pointer decay.
1104 case CK_ArrayToPointerDecay
:
1105 return EmitArrayToPointerDecay(CE
->getSubExpr(), BaseInfo
, TBAAInfo
);
1107 // Derived-to-base conversions.
1108 case CK_UncheckedDerivedToBase
:
1109 case CK_DerivedToBase
: {
1110 // TODO: Support accesses to members of base classes in TBAA. For now, we
1111 // conservatively pretend that the complete object is of the base class
1114 *TBAAInfo
= CGM
.getTBAAAccessInfo(E
->getType());
1115 Address Addr
= EmitPointerWithAlignment(CE
->getSubExpr(), BaseInfo
);
1116 auto Derived
= CE
->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1117 return GetAddressOfBaseClass(Addr
, Derived
,
1118 CE
->path_begin(), CE
->path_end(),
1119 ShouldNullCheckClassCastValue(CE
),
1123 // TODO: Is there any reason to treat base-to-derived conversions
1131 if (const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(E
)) {
1132 if (UO
->getOpcode() == UO_AddrOf
) {
1133 LValue LV
= EmitLValue(UO
->getSubExpr());
1134 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1135 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1136 return LV
.getAddress(*this);
1140 // std::addressof and variants.
1141 if (auto *Call
= dyn_cast
<CallExpr
>(E
)) {
1142 switch (Call
->getBuiltinCallee()) {
1145 case Builtin::BIaddressof
:
1146 case Builtin::BI__addressof
:
1147 case Builtin::BI__builtin_addressof
: {
1148 LValue LV
= EmitLValue(Call
->getArg(0));
1149 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1150 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1151 return LV
.getAddress(*this);
1156 // TODO: conditional operators, comma.
1158 // Otherwise, use the alignment of the type.
1160 CGM
.getNaturalPointeeTypeAlignment(E
->getType(), BaseInfo
, TBAAInfo
);
1161 llvm::Type
*ElemTy
= ConvertTypeForMem(E
->getType()->getPointeeType());
1162 return Address(EmitScalarExpr(E
), ElemTy
, Align
);
1165 llvm::Value
*CodeGenFunction::EmitNonNullRValueCheck(RValue RV
, QualType T
) {
1166 llvm::Value
*V
= RV
.getScalarVal();
1167 if (auto MPT
= T
->getAs
<MemberPointerType
>())
1168 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, V
, MPT
);
1169 return Builder
.CreateICmpNE(V
, llvm::Constant::getNullValue(V
->getType()));
1172 RValue
CodeGenFunction::GetUndefRValue(QualType Ty
) {
1173 if (Ty
->isVoidType())
1174 return RValue::get(nullptr);
1176 switch (getEvaluationKind(Ty
)) {
1179 ConvertType(Ty
->castAs
<ComplexType
>()->getElementType());
1180 llvm::Value
*U
= llvm::UndefValue::get(EltTy
);
1181 return RValue::getComplex(std::make_pair(U
, U
));
1184 // If this is a use of an undefined aggregate type, the aggregate must have an
1185 // identifiable address. Just because the contents of the value are undefined
1186 // doesn't mean that the address can't be taken and compared.
1187 case TEK_Aggregate
: {
1188 Address DestPtr
= CreateMemTemp(Ty
, "undef.agg.tmp");
1189 return RValue::getAggregate(DestPtr
);
1193 return RValue::get(llvm::UndefValue::get(ConvertType(Ty
)));
1195 llvm_unreachable("bad evaluation kind");
1198 RValue
CodeGenFunction::EmitUnsupportedRValue(const Expr
*E
,
1200 ErrorUnsupported(E
, Name
);
1201 return GetUndefRValue(E
->getType());
1204 LValue
CodeGenFunction::EmitUnsupportedLValue(const Expr
*E
,
1206 ErrorUnsupported(E
, Name
);
1207 llvm::Type
*ElTy
= ConvertType(E
->getType());
1208 llvm::Type
*Ty
= llvm::PointerType::getUnqual(ElTy
);
1209 return MakeAddrLValue(
1210 Address(llvm::UndefValue::get(Ty
), ElTy
, CharUnits::One()), E
->getType());
1213 bool CodeGenFunction::IsWrappedCXXThis(const Expr
*Obj
) {
1214 const Expr
*Base
= Obj
;
1215 while (!isa
<CXXThisExpr
>(Base
)) {
1216 // The result of a dynamic_cast can be null.
1217 if (isa
<CXXDynamicCastExpr
>(Base
))
1220 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
1221 Base
= CE
->getSubExpr();
1222 } else if (const auto *PE
= dyn_cast
<ParenExpr
>(Base
)) {
1223 Base
= PE
->getSubExpr();
1224 } else if (const auto *UO
= dyn_cast
<UnaryOperator
>(Base
)) {
1225 if (UO
->getOpcode() == UO_Extension
)
1226 Base
= UO
->getSubExpr();
1236 LValue
CodeGenFunction::EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
) {
1238 if (SanOpts
.has(SanitizerKind::ArrayBounds
) && isa
<ArraySubscriptExpr
>(E
))
1239 LV
= EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
), /*Accessed*/true);
1242 if (!isa
<DeclRefExpr
>(E
) && !LV
.isBitField() && LV
.isSimple()) {
1243 SanitizerSet SkippedChecks
;
1244 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
)) {
1245 bool IsBaseCXXThis
= IsWrappedCXXThis(ME
->getBase());
1247 SkippedChecks
.set(SanitizerKind::Alignment
, true);
1248 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(ME
->getBase()))
1249 SkippedChecks
.set(SanitizerKind::Null
, true);
1251 EmitTypeCheck(TCK
, E
->getExprLoc(), LV
.getPointer(*this), E
->getType(),
1252 LV
.getAlignment(), SkippedChecks
);
1257 /// EmitLValue - Emit code to compute a designator that specifies the location
1258 /// of the expression.
1260 /// This can return one of two things: a simple address or a bitfield reference.
1261 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1262 /// an LLVM pointer type.
1264 /// If this returns a bitfield reference, nothing about the pointee type of the
1265 /// LLVM value is known: For example, it may not be a pointer to an integer.
1267 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1268 /// this method guarantees that the returned pointer type will point to an LLVM
1269 /// type of the same size of the lvalue's type. If the lvalue has a variable
1270 /// length type, this is not possible.
1272 LValue
CodeGenFunction::EmitLValue(const Expr
*E
) {
1273 ApplyDebugLocation
DL(*this, E
);
1274 switch (E
->getStmtClass()) {
1275 default: return EmitUnsupportedLValue(E
, "l-value expression");
1277 case Expr::ObjCPropertyRefExprClass
:
1278 llvm_unreachable("cannot emit a property reference directly");
1280 case Expr::ObjCSelectorExprClass
:
1281 return EmitObjCSelectorLValue(cast
<ObjCSelectorExpr
>(E
));
1282 case Expr::ObjCIsaExprClass
:
1283 return EmitObjCIsaExpr(cast
<ObjCIsaExpr
>(E
));
1284 case Expr::BinaryOperatorClass
:
1285 return EmitBinaryOperatorLValue(cast
<BinaryOperator
>(E
));
1286 case Expr::CompoundAssignOperatorClass
: {
1287 QualType Ty
= E
->getType();
1288 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1289 Ty
= AT
->getValueType();
1290 if (!Ty
->isAnyComplexType())
1291 return EmitCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1292 return EmitComplexCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1294 case Expr::CallExprClass
:
1295 case Expr::CXXMemberCallExprClass
:
1296 case Expr::CXXOperatorCallExprClass
:
1297 case Expr::UserDefinedLiteralClass
:
1298 return EmitCallExprLValue(cast
<CallExpr
>(E
));
1299 case Expr::CXXRewrittenBinaryOperatorClass
:
1300 return EmitLValue(cast
<CXXRewrittenBinaryOperator
>(E
)->getSemanticForm());
1301 case Expr::VAArgExprClass
:
1302 return EmitVAArgExprLValue(cast
<VAArgExpr
>(E
));
1303 case Expr::DeclRefExprClass
:
1304 return EmitDeclRefLValue(cast
<DeclRefExpr
>(E
));
1305 case Expr::ConstantExprClass
: {
1306 const ConstantExpr
*CE
= cast
<ConstantExpr
>(E
);
1307 if (llvm::Value
*Result
= ConstantEmitter(*this).tryEmitConstantExpr(CE
)) {
1308 QualType RetType
= cast
<CallExpr
>(CE
->getSubExpr()->IgnoreImplicit())
1309 ->getCallReturnType(getContext())
1311 return MakeNaturalAlignAddrLValue(Result
, RetType
);
1313 return EmitLValue(cast
<ConstantExpr
>(E
)->getSubExpr());
1315 case Expr::ParenExprClass
:
1316 return EmitLValue(cast
<ParenExpr
>(E
)->getSubExpr());
1317 case Expr::GenericSelectionExprClass
:
1318 return EmitLValue(cast
<GenericSelectionExpr
>(E
)->getResultExpr());
1319 case Expr::PredefinedExprClass
:
1320 return EmitPredefinedLValue(cast
<PredefinedExpr
>(E
));
1321 case Expr::StringLiteralClass
:
1322 return EmitStringLiteralLValue(cast
<StringLiteral
>(E
));
1323 case Expr::ObjCEncodeExprClass
:
1324 return EmitObjCEncodeExprLValue(cast
<ObjCEncodeExpr
>(E
));
1325 case Expr::PseudoObjectExprClass
:
1326 return EmitPseudoObjectLValue(cast
<PseudoObjectExpr
>(E
));
1327 case Expr::InitListExprClass
:
1328 return EmitInitListLValue(cast
<InitListExpr
>(E
));
1329 case Expr::CXXTemporaryObjectExprClass
:
1330 case Expr::CXXConstructExprClass
:
1331 return EmitCXXConstructLValue(cast
<CXXConstructExpr
>(E
));
1332 case Expr::CXXBindTemporaryExprClass
:
1333 return EmitCXXBindTemporaryLValue(cast
<CXXBindTemporaryExpr
>(E
));
1334 case Expr::CXXUuidofExprClass
:
1335 return EmitCXXUuidofLValue(cast
<CXXUuidofExpr
>(E
));
1336 case Expr::LambdaExprClass
:
1337 return EmitAggExprToLValue(E
);
1339 case Expr::ExprWithCleanupsClass
: {
1340 const auto *cleanups
= cast
<ExprWithCleanups
>(E
);
1341 RunCleanupsScope
Scope(*this);
1342 LValue LV
= EmitLValue(cleanups
->getSubExpr());
1343 if (LV
.isSimple()) {
1344 // Defend against branches out of gnu statement expressions surrounded by
1346 Address Addr
= LV
.getAddress(*this);
1347 llvm::Value
*V
= Addr
.getPointer();
1348 Scope
.ForceCleanup({&V
});
1349 return LValue::MakeAddr(Addr
.withPointer(V
), LV
.getType(), getContext(),
1350 LV
.getBaseInfo(), LV
.getTBAAInfo());
1352 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1353 // bitfield lvalue or some other non-simple lvalue?
1357 case Expr::CXXDefaultArgExprClass
: {
1358 auto *DAE
= cast
<CXXDefaultArgExpr
>(E
);
1359 CXXDefaultArgExprScope
Scope(*this, DAE
);
1360 return EmitLValue(DAE
->getExpr());
1362 case Expr::CXXDefaultInitExprClass
: {
1363 auto *DIE
= cast
<CXXDefaultInitExpr
>(E
);
1364 CXXDefaultInitExprScope
Scope(*this, DIE
);
1365 return EmitLValue(DIE
->getExpr());
1367 case Expr::CXXTypeidExprClass
:
1368 return EmitCXXTypeidLValue(cast
<CXXTypeidExpr
>(E
));
1370 case Expr::ObjCMessageExprClass
:
1371 return EmitObjCMessageExprLValue(cast
<ObjCMessageExpr
>(E
));
1372 case Expr::ObjCIvarRefExprClass
:
1373 return EmitObjCIvarRefLValue(cast
<ObjCIvarRefExpr
>(E
));
1374 case Expr::StmtExprClass
:
1375 return EmitStmtExprLValue(cast
<StmtExpr
>(E
));
1376 case Expr::UnaryOperatorClass
:
1377 return EmitUnaryOpLValue(cast
<UnaryOperator
>(E
));
1378 case Expr::ArraySubscriptExprClass
:
1379 return EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
));
1380 case Expr::MatrixSubscriptExprClass
:
1381 return EmitMatrixSubscriptExpr(cast
<MatrixSubscriptExpr
>(E
));
1382 case Expr::OMPArraySectionExprClass
:
1383 return EmitOMPArraySectionExpr(cast
<OMPArraySectionExpr
>(E
));
1384 case Expr::ExtVectorElementExprClass
:
1385 return EmitExtVectorElementExpr(cast
<ExtVectorElementExpr
>(E
));
1386 case Expr::CXXThisExprClass
:
1387 return MakeAddrLValue(LoadCXXThisAddress(), E
->getType());
1388 case Expr::MemberExprClass
:
1389 return EmitMemberExpr(cast
<MemberExpr
>(E
));
1390 case Expr::CompoundLiteralExprClass
:
1391 return EmitCompoundLiteralLValue(cast
<CompoundLiteralExpr
>(E
));
1392 case Expr::ConditionalOperatorClass
:
1393 return EmitConditionalOperatorLValue(cast
<ConditionalOperator
>(E
));
1394 case Expr::BinaryConditionalOperatorClass
:
1395 return EmitConditionalOperatorLValue(cast
<BinaryConditionalOperator
>(E
));
1396 case Expr::ChooseExprClass
:
1397 return EmitLValue(cast
<ChooseExpr
>(E
)->getChosenSubExpr());
1398 case Expr::OpaqueValueExprClass
:
1399 return EmitOpaqueValueLValue(cast
<OpaqueValueExpr
>(E
));
1400 case Expr::SubstNonTypeTemplateParmExprClass
:
1401 return EmitLValue(cast
<SubstNonTypeTemplateParmExpr
>(E
)->getReplacement());
1402 case Expr::ImplicitCastExprClass
:
1403 case Expr::CStyleCastExprClass
:
1404 case Expr::CXXFunctionalCastExprClass
:
1405 case Expr::CXXStaticCastExprClass
:
1406 case Expr::CXXDynamicCastExprClass
:
1407 case Expr::CXXReinterpretCastExprClass
:
1408 case Expr::CXXConstCastExprClass
:
1409 case Expr::CXXAddrspaceCastExprClass
:
1410 case Expr::ObjCBridgedCastExprClass
:
1411 return EmitCastLValue(cast
<CastExpr
>(E
));
1413 case Expr::MaterializeTemporaryExprClass
:
1414 return EmitMaterializeTemporaryExpr(cast
<MaterializeTemporaryExpr
>(E
));
1416 case Expr::CoawaitExprClass
:
1417 return EmitCoawaitLValue(cast
<CoawaitExpr
>(E
));
1418 case Expr::CoyieldExprClass
:
1419 return EmitCoyieldLValue(cast
<CoyieldExpr
>(E
));
1423 /// Given an object of the given canonical type, can we safely copy a
1424 /// value out of it based on its initializer?
1425 static bool isConstantEmittableObjectType(QualType type
) {
1426 assert(type
.isCanonical());
1427 assert(!type
->isReferenceType());
1429 // Must be const-qualified but non-volatile.
1430 Qualifiers qs
= type
.getLocalQualifiers();
1431 if (!qs
.hasConst() || qs
.hasVolatile()) return false;
1433 // Otherwise, all object types satisfy this except C++ classes with
1434 // mutable subobjects or non-trivial copy/destroy behavior.
1435 if (const auto *RT
= dyn_cast
<RecordType
>(type
))
1436 if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl()))
1437 if (RD
->hasMutableFields() || !RD
->isTrivial())
1443 /// Can we constant-emit a load of a reference to a variable of the
1444 /// given type? This is different from predicates like
1445 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1446 /// in situations that don't necessarily satisfy the language's rules
1447 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1448 /// to do this with const float variables even if those variables
1449 /// aren't marked 'constexpr'.
1450 enum ConstantEmissionKind
{
1452 CEK_AsReferenceOnly
,
1453 CEK_AsValueOrReference
,
1456 static ConstantEmissionKind
checkVarTypeForConstantEmission(QualType type
) {
1457 type
= type
.getCanonicalType();
1458 if (const auto *ref
= dyn_cast
<ReferenceType
>(type
)) {
1459 if (isConstantEmittableObjectType(ref
->getPointeeType()))
1460 return CEK_AsValueOrReference
;
1461 return CEK_AsReferenceOnly
;
1463 if (isConstantEmittableObjectType(type
))
1464 return CEK_AsValueOnly
;
1468 /// Try to emit a reference to the given value without producing it as
1469 /// an l-value. This is just an optimization, but it avoids us needing
1470 /// to emit global copies of variables if they're named without triggering
1471 /// a formal use in a context where we can't emit a direct reference to them,
1472 /// for instance if a block or lambda or a member of a local class uses a
1473 /// const int variable or constexpr variable from an enclosing function.
1474 CodeGenFunction::ConstantEmission
1475 CodeGenFunction::tryEmitAsConstant(DeclRefExpr
*refExpr
) {
1476 ValueDecl
*value
= refExpr
->getDecl();
1478 // The value needs to be an enum constant or a constant variable.
1479 ConstantEmissionKind CEK
;
1480 if (isa
<ParmVarDecl
>(value
)) {
1482 } else if (auto *var
= dyn_cast
<VarDecl
>(value
)) {
1483 CEK
= checkVarTypeForConstantEmission(var
->getType());
1484 } else if (isa
<EnumConstantDecl
>(value
)) {
1485 CEK
= CEK_AsValueOnly
;
1489 if (CEK
== CEK_None
) return ConstantEmission();
1491 Expr::EvalResult result
;
1492 bool resultIsReference
;
1493 QualType resultType
;
1495 // It's best to evaluate all the way as an r-value if that's permitted.
1496 if (CEK
!= CEK_AsReferenceOnly
&&
1497 refExpr
->EvaluateAsRValue(result
, getContext())) {
1498 resultIsReference
= false;
1499 resultType
= refExpr
->getType();
1501 // Otherwise, try to evaluate as an l-value.
1502 } else if (CEK
!= CEK_AsValueOnly
&&
1503 refExpr
->EvaluateAsLValue(result
, getContext())) {
1504 resultIsReference
= true;
1505 resultType
= value
->getType();
1509 return ConstantEmission();
1512 // In any case, if the initializer has side-effects, abandon ship.
1513 if (result
.HasSideEffects
)
1514 return ConstantEmission();
1516 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1517 // referencing a global host variable by copy. In this case the lambda should
1518 // make a copy of the value of the global host variable. The DRE of the
1519 // captured reference variable cannot be emitted as load from the host
1520 // global variable as compile time constant, since the host variable is not
1521 // accessible on device. The DRE of the captured reference variable has to be
1522 // loaded from captures.
1523 if (CGM
.getLangOpts().CUDAIsDevice
&& result
.Val
.isLValue() &&
1524 refExpr
->refersToEnclosingVariableOrCapture()) {
1525 auto *MD
= dyn_cast_or_null
<CXXMethodDecl
>(CurCodeDecl
);
1526 if (MD
&& MD
->getParent()->isLambda() &&
1527 MD
->getOverloadedOperator() == OO_Call
) {
1528 const APValue::LValueBase
&base
= result
.Val
.getLValueBase();
1529 if (const ValueDecl
*D
= base
.dyn_cast
<const ValueDecl
*>()) {
1530 if (const VarDecl
*VD
= dyn_cast
<const VarDecl
>(D
)) {
1531 if (!VD
->hasAttr
<CUDADeviceAttr
>()) {
1532 return ConstantEmission();
1539 // Emit as a constant.
1540 auto C
= ConstantEmitter(*this).emitAbstract(refExpr
->getLocation(),
1541 result
.Val
, resultType
);
1543 // Make sure we emit a debug reference to the global variable.
1544 // This should probably fire even for
1545 if (isa
<VarDecl
>(value
)) {
1546 if (!getContext().DeclMustBeEmitted(cast
<VarDecl
>(value
)))
1547 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1549 assert(isa
<EnumConstantDecl
>(value
));
1550 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1553 // If we emitted a reference constant, we need to dereference that.
1554 if (resultIsReference
)
1555 return ConstantEmission::forReference(C
);
1557 return ConstantEmission::forValue(C
);
1560 static DeclRefExpr
*tryToConvertMemberExprToDeclRefExpr(CodeGenFunction
&CGF
,
1561 const MemberExpr
*ME
) {
1562 if (auto *VD
= dyn_cast
<VarDecl
>(ME
->getMemberDecl())) {
1563 // Try to emit static variable member expressions as DREs.
1564 return DeclRefExpr::Create(
1565 CGF
.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD
,
1566 /*RefersToEnclosingVariableOrCapture=*/false, ME
->getExprLoc(),
1567 ME
->getType(), ME
->getValueKind(), nullptr, nullptr, ME
->isNonOdrUse());
1572 CodeGenFunction::ConstantEmission
1573 CodeGenFunction::tryEmitAsConstant(const MemberExpr
*ME
) {
1574 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, ME
))
1575 return tryEmitAsConstant(DRE
);
1576 return ConstantEmission();
1579 llvm::Value
*CodeGenFunction::emitScalarConstant(
1580 const CodeGenFunction::ConstantEmission
&Constant
, Expr
*E
) {
1581 assert(Constant
&& "not a constant");
1582 if (Constant
.isReference())
1583 return EmitLoadOfLValue(Constant
.getReferenceLValue(*this, E
),
1586 return Constant
.getValue();
1589 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(LValue lvalue
,
1590 SourceLocation Loc
) {
1591 return EmitLoadOfScalar(lvalue
.getAddress(*this), lvalue
.isVolatile(),
1592 lvalue
.getType(), Loc
, lvalue
.getBaseInfo(),
1593 lvalue
.getTBAAInfo(), lvalue
.isNontemporal());
1596 static bool hasBooleanRepresentation(QualType Ty
) {
1597 if (Ty
->isBooleanType())
1600 if (const EnumType
*ET
= Ty
->getAs
<EnumType
>())
1601 return ET
->getDecl()->getIntegerType()->isBooleanType();
1603 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1604 return hasBooleanRepresentation(AT
->getValueType());
1609 static bool getRangeForType(CodeGenFunction
&CGF
, QualType Ty
,
1610 llvm::APInt
&Min
, llvm::APInt
&End
,
1611 bool StrictEnums
, bool IsBool
) {
1612 const EnumType
*ET
= Ty
->getAs
<EnumType
>();
1613 bool IsRegularCPlusPlusEnum
= CGF
.getLangOpts().CPlusPlus
&& StrictEnums
&&
1614 ET
&& !ET
->getDecl()->isFixed();
1615 if (!IsBool
&& !IsRegularCPlusPlusEnum
)
1619 Min
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 0);
1620 End
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 2);
1622 const EnumDecl
*ED
= ET
->getDecl();
1623 ED
->getValueRange(End
, Min
);
1628 llvm::MDNode
*CodeGenFunction::getRangeForLoadFromType(QualType Ty
) {
1629 llvm::APInt Min
, End
;
1630 if (!getRangeForType(*this, Ty
, Min
, End
, CGM
.getCodeGenOpts().StrictEnums
,
1631 hasBooleanRepresentation(Ty
)))
1634 llvm::MDBuilder
MDHelper(getLLVMContext());
1635 return MDHelper
.createRange(Min
, End
);
1638 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
1639 SourceLocation Loc
) {
1640 bool HasBoolCheck
= SanOpts
.has(SanitizerKind::Bool
);
1641 bool HasEnumCheck
= SanOpts
.has(SanitizerKind::Enum
);
1642 if (!HasBoolCheck
&& !HasEnumCheck
)
1645 bool IsBool
= hasBooleanRepresentation(Ty
) ||
1646 NSAPI(CGM
.getContext()).isObjCBOOLType(Ty
);
1647 bool NeedsBoolCheck
= HasBoolCheck
&& IsBool
;
1648 bool NeedsEnumCheck
= HasEnumCheck
&& Ty
->getAs
<EnumType
>();
1649 if (!NeedsBoolCheck
&& !NeedsEnumCheck
)
1652 // Single-bit booleans don't need to be checked. Special-case this to avoid
1653 // a bit width mismatch when handling bitfield values. This is handled by
1654 // EmitFromMemory for the non-bitfield case.
1656 cast
<llvm::IntegerType
>(Value
->getType())->getBitWidth() == 1)
1659 llvm::APInt Min
, End
;
1660 if (!getRangeForType(*this, Ty
, Min
, End
, /*StrictEnums=*/true, IsBool
))
1663 auto &Ctx
= getLLVMContext();
1664 SanitizerScope
SanScope(this);
1668 Check
= Builder
.CreateICmpULE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1670 llvm::Value
*Upper
=
1671 Builder
.CreateICmpSLE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1672 llvm::Value
*Lower
=
1673 Builder
.CreateICmpSGE(Value
, llvm::ConstantInt::get(Ctx
, Min
));
1674 Check
= Builder
.CreateAnd(Upper
, Lower
);
1676 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
),
1677 EmitCheckTypeDescriptor(Ty
)};
1678 SanitizerMask Kind
=
1679 NeedsEnumCheck
? SanitizerKind::Enum
: SanitizerKind::Bool
;
1680 EmitCheck(std::make_pair(Check
, Kind
), SanitizerHandler::LoadInvalidValue
,
1681 StaticArgs
, EmitCheckValue(Value
));
1685 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(Address Addr
, bool Volatile
,
1688 LValueBaseInfo BaseInfo
,
1689 TBAAAccessInfo TBAAInfo
,
1690 bool isNontemporal
) {
1691 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1692 if (GV
->isThreadLocal())
1693 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
));
1695 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1696 // Boolean vectors use `iN` as storage type.
1697 if (ClangVecTy
->isExtVectorBoolType()) {
1698 llvm::Type
*ValTy
= ConvertType(Ty
);
1699 unsigned ValNumElems
=
1700 cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1701 // Load the `iP` storage object (P is the padded vector size).
1702 auto *RawIntV
= Builder
.CreateLoad(Addr
, Volatile
, "load_bits");
1703 const auto *RawIntTy
= RawIntV
->getType();
1704 assert(RawIntTy
->isIntegerTy() && "compressed iN storage for bitvectors");
1705 // Bitcast iP --> <P x i1>.
1706 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1707 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1708 llvm::Value
*V
= Builder
.CreateBitCast(RawIntV
, PaddedVecTy
);
1709 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1710 V
= emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1712 return EmitFromMemory(V
, Ty
);
1715 // Handle vectors of size 3 like size 4 for better performance.
1716 const llvm::Type
*EltTy
= Addr
.getElementType();
1717 const auto *VTy
= cast
<llvm::FixedVectorType
>(EltTy
);
1719 if (!CGM
.getCodeGenOpts().PreserveVec3Type
&& VTy
->getNumElements() == 3) {
1721 // Bitcast to vec4 type.
1722 llvm::VectorType
*vec4Ty
=
1723 llvm::FixedVectorType::get(VTy
->getElementType(), 4);
1724 Address Cast
= Builder
.CreateElementBitCast(Addr
, vec4Ty
, "castToVec4");
1726 llvm::Value
*V
= Builder
.CreateLoad(Cast
, Volatile
, "loadVec4");
1728 // Shuffle vector to get vec3.
1729 V
= Builder
.CreateShuffleVector(V
, ArrayRef
<int>{0, 1, 2}, "extractVec");
1730 return EmitFromMemory(V
, Ty
);
1734 // Atomic operations have to be done on integral types.
1735 LValue AtomicLValue
=
1736 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1737 if (Ty
->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue
)) {
1738 return EmitAtomicLoad(AtomicLValue
, Loc
).getScalarVal();
1741 llvm::LoadInst
*Load
= Builder
.CreateLoad(Addr
, Volatile
);
1742 if (isNontemporal
) {
1743 llvm::MDNode
*Node
= llvm::MDNode::get(
1744 Load
->getContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1745 Load
->setMetadata(CGM
.getModule().getMDKindID("nontemporal"), Node
);
1748 CGM
.DecorateInstructionWithTBAA(Load
, TBAAInfo
);
1750 if (EmitScalarRangeCheck(Load
, Ty
, Loc
)) {
1751 // In order to prevent the optimizer from throwing away the check, don't
1752 // attach range metadata to the load.
1753 // TODO: Enable range metadata for AMDGCN after issue
1754 // https://github.com/llvm/llvm-project/issues/58176 is fixed.
1755 } else if (CGM
.getCodeGenOpts().OptimizationLevel
> 0 &&
1756 !CGM
.getTriple().isAMDGCN())
1757 if (llvm::MDNode
*RangeInfo
= getRangeForLoadFromType(Ty
))
1758 Load
->setMetadata(llvm::LLVMContext::MD_range
, RangeInfo
);
1760 return EmitFromMemory(Load
, Ty
);
1763 llvm::Value
*CodeGenFunction::EmitToMemory(llvm::Value
*Value
, QualType Ty
) {
1764 // Bool has a different representation in memory than in registers.
1765 if (hasBooleanRepresentation(Ty
)) {
1766 // This should really always be an i1, but sometimes it's already
1767 // an i8, and it's awkward to track those cases down.
1768 if (Value
->getType()->isIntegerTy(1))
1769 return Builder
.CreateZExt(Value
, ConvertTypeForMem(Ty
), "frombool");
1770 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1771 "wrong value rep of bool");
1777 llvm::Value
*CodeGenFunction::EmitFromMemory(llvm::Value
*Value
, QualType Ty
) {
1778 // Bool has a different representation in memory than in registers.
1779 if (hasBooleanRepresentation(Ty
)) {
1780 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1781 "wrong value rep of bool");
1782 return Builder
.CreateTrunc(Value
, Builder
.getInt1Ty(), "tobool");
1784 if (Ty
->isExtVectorBoolType()) {
1785 const auto *RawIntTy
= Value
->getType();
1786 // Bitcast iP --> <P x i1>.
1787 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1788 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1789 auto *V
= Builder
.CreateBitCast(Value
, PaddedVecTy
);
1790 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1791 llvm::Type
*ValTy
= ConvertType(Ty
);
1792 unsigned ValNumElems
= cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1793 return emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1799 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1800 // MatrixType), if it points to a array (the memory type of MatrixType).
1801 static Address
MaybeConvertMatrixAddress(Address Addr
, CodeGenFunction
&CGF
,
1802 bool IsVector
= true) {
1803 auto *ArrayTy
= dyn_cast
<llvm::ArrayType
>(Addr
.getElementType());
1804 if (ArrayTy
&& IsVector
) {
1805 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
1806 ArrayTy
->getNumElements());
1808 return Address(CGF
.Builder
.CreateElementBitCast(Addr
, VectorTy
));
1810 auto *VectorTy
= dyn_cast
<llvm::VectorType
>(Addr
.getElementType());
1811 if (VectorTy
&& !IsVector
) {
1812 auto *ArrayTy
= llvm::ArrayType::get(
1813 VectorTy
->getElementType(),
1814 cast
<llvm::FixedVectorType
>(VectorTy
)->getNumElements());
1816 return Address(CGF
.Builder
.CreateElementBitCast(Addr
, ArrayTy
));
1822 // Emit a store of a matrix LValue. This may require casting the original
1823 // pointer to memory address (ArrayType) to a pointer to the value type
1825 static void EmitStoreOfMatrixScalar(llvm::Value
*value
, LValue lvalue
,
1826 bool isInit
, CodeGenFunction
&CGF
) {
1827 Address Addr
= MaybeConvertMatrixAddress(lvalue
.getAddress(CGF
), CGF
,
1828 value
->getType()->isVectorTy());
1829 CGF
.EmitStoreOfScalar(value
, Addr
, lvalue
.isVolatile(), lvalue
.getType(),
1830 lvalue
.getBaseInfo(), lvalue
.getTBAAInfo(), isInit
,
1831 lvalue
.isNontemporal());
1834 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
1835 bool Volatile
, QualType Ty
,
1836 LValueBaseInfo BaseInfo
,
1837 TBAAAccessInfo TBAAInfo
,
1838 bool isInit
, bool isNontemporal
) {
1839 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1840 if (GV
->isThreadLocal())
1841 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
));
1843 llvm::Type
*SrcTy
= Value
->getType();
1844 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1845 auto *VecTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
);
1846 if (VecTy
&& ClangVecTy
->isExtVectorBoolType()) {
1847 auto *MemIntTy
= cast
<llvm::IntegerType
>(Addr
.getElementType());
1848 // Expand to the memory bit width.
1849 unsigned MemNumElems
= MemIntTy
->getPrimitiveSizeInBits();
1850 // <N x i1> --> <P x i1>.
1851 Value
= emitBoolVecConversion(Value
, MemNumElems
, "insertvec");
1853 Value
= Builder
.CreateBitCast(Value
, MemIntTy
);
1854 } else if (!CGM
.getCodeGenOpts().PreserveVec3Type
) {
1855 // Handle vec3 special.
1856 if (VecTy
&& cast
<llvm::FixedVectorType
>(VecTy
)->getNumElements() == 3) {
1857 // Our source is a vec3, do a shuffle vector to make it a vec4.
1858 Value
= Builder
.CreateShuffleVector(Value
, ArrayRef
<int>{0, 1, 2, -1},
1860 SrcTy
= llvm::FixedVectorType::get(VecTy
->getElementType(), 4);
1862 if (Addr
.getElementType() != SrcTy
) {
1863 Addr
= Builder
.CreateElementBitCast(Addr
, SrcTy
, "storetmp");
1868 Value
= EmitToMemory(Value
, Ty
);
1870 LValue AtomicLValue
=
1871 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1872 if (Ty
->isAtomicType() ||
1873 (!isInit
&& LValueIsSuitableForInlineAtomic(AtomicLValue
))) {
1874 EmitAtomicStore(RValue::get(Value
), AtomicLValue
, isInit
);
1878 llvm::StoreInst
*Store
= Builder
.CreateStore(Value
, Addr
, Volatile
);
1879 if (isNontemporal
) {
1880 llvm::MDNode
*Node
=
1881 llvm::MDNode::get(Store
->getContext(),
1882 llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1883 Store
->setMetadata(CGM
.getModule().getMDKindID("nontemporal"), Node
);
1886 CGM
.DecorateInstructionWithTBAA(Store
, TBAAInfo
);
1889 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
,
1891 if (lvalue
.getType()->isConstantMatrixType()) {
1892 EmitStoreOfMatrixScalar(value
, lvalue
, isInit
, *this);
1896 EmitStoreOfScalar(value
, lvalue
.getAddress(*this), lvalue
.isVolatile(),
1897 lvalue
.getType(), lvalue
.getBaseInfo(),
1898 lvalue
.getTBAAInfo(), isInit
, lvalue
.isNontemporal());
1901 // Emit a load of a LValue of matrix type. This may require casting the pointer
1902 // to memory address (ArrayType) to a pointer to the value type (VectorType).
1903 static RValue
EmitLoadOfMatrixLValue(LValue LV
, SourceLocation Loc
,
1904 CodeGenFunction
&CGF
) {
1905 assert(LV
.getType()->isConstantMatrixType());
1906 Address Addr
= MaybeConvertMatrixAddress(LV
.getAddress(CGF
), CGF
);
1907 LV
.setAddress(Addr
);
1908 return RValue::get(CGF
.EmitLoadOfScalar(LV
, Loc
));
1911 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1912 /// method emits the address of the lvalue, then loads the result as an rvalue,
1913 /// returning the rvalue.
1914 RValue
CodeGenFunction::EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
1915 if (LV
.isObjCWeak()) {
1916 // load of a __weak object.
1917 Address AddrWeakObj
= LV
.getAddress(*this);
1918 return RValue::get(CGM
.getObjCRuntime().EmitObjCWeakRead(*this,
1921 if (LV
.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak
) {
1922 // In MRC mode, we do a load+autorelease.
1923 if (!getLangOpts().ObjCAutoRefCount
) {
1924 return RValue::get(EmitARCLoadWeak(LV
.getAddress(*this)));
1927 // In ARC mode, we load retained and then consume the value.
1928 llvm::Value
*Object
= EmitARCLoadWeakRetained(LV
.getAddress(*this));
1929 Object
= EmitObjCConsumeObject(LV
.getType(), Object
);
1930 return RValue::get(Object
);
1933 if (LV
.isSimple()) {
1934 assert(!LV
.getType()->isFunctionType());
1936 if (LV
.getType()->isConstantMatrixType())
1937 return EmitLoadOfMatrixLValue(LV
, Loc
, *this);
1939 // Everything needs a load.
1940 return RValue::get(EmitLoadOfScalar(LV
, Loc
));
1943 if (LV
.isVectorElt()) {
1944 llvm::LoadInst
*Load
= Builder
.CreateLoad(LV
.getVectorAddress(),
1945 LV
.isVolatileQualified());
1946 return RValue::get(Builder
.CreateExtractElement(Load
, LV
.getVectorIdx(),
1950 // If this is a reference to a subset of the elements of a vector, either
1951 // shuffle the input or extract/insert them as appropriate.
1952 if (LV
.isExtVectorElt()) {
1953 return EmitLoadOfExtVectorElementLValue(LV
);
1956 // Global Register variables always invoke intrinsics
1957 if (LV
.isGlobalReg())
1958 return EmitLoadOfGlobalRegLValue(LV
);
1960 if (LV
.isMatrixElt()) {
1961 llvm::Value
*Idx
= LV
.getMatrixIdx();
1962 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
1963 const auto *const MatTy
= LV
.getType()->castAs
<ConstantMatrixType
>();
1964 llvm::MatrixBuilder
MB(Builder
);
1965 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
1967 llvm::LoadInst
*Load
=
1968 Builder
.CreateLoad(LV
.getMatrixAddress(), LV
.isVolatileQualified());
1969 return RValue::get(Builder
.CreateExtractElement(Load
, Idx
, "matrixext"));
1972 assert(LV
.isBitField() && "Unknown LValue type!");
1973 return EmitLoadOfBitfieldLValue(LV
, Loc
);
1976 RValue
CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV
,
1977 SourceLocation Loc
) {
1978 const CGBitFieldInfo
&Info
= LV
.getBitFieldInfo();
1980 // Get the output type.
1981 llvm::Type
*ResLTy
= ConvertType(LV
.getType());
1983 Address Ptr
= LV
.getBitFieldAddress();
1985 Builder
.CreateLoad(Ptr
, LV
.isVolatileQualified(), "bf.load");
1987 bool UseVolatile
= LV
.isVolatileQualified() &&
1988 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
1989 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
1990 const unsigned StorageSize
=
1991 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
1992 if (Info
.IsSigned
) {
1993 assert(static_cast<unsigned>(Offset
+ Info
.Size
) <= StorageSize
);
1994 unsigned HighBits
= StorageSize
- Offset
- Info
.Size
;
1996 Val
= Builder
.CreateShl(Val
, HighBits
, "bf.shl");
1997 if (Offset
+ HighBits
)
1998 Val
= Builder
.CreateAShr(Val
, Offset
+ HighBits
, "bf.ashr");
2001 Val
= Builder
.CreateLShr(Val
, Offset
, "bf.lshr");
2002 if (static_cast<unsigned>(Offset
) + Info
.Size
< StorageSize
)
2003 Val
= Builder
.CreateAnd(
2004 Val
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
), "bf.clear");
2006 Val
= Builder
.CreateIntCast(Val
, ResLTy
, Info
.IsSigned
, "bf.cast");
2007 EmitScalarRangeCheck(Val
, LV
.getType(), Loc
);
2008 return RValue::get(Val
);
2011 // If this is a reference to a subset of the elements of a vector, create an
2012 // appropriate shufflevector.
2013 RValue
CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV
) {
2014 llvm::Value
*Vec
= Builder
.CreateLoad(LV
.getExtVectorAddress(),
2015 LV
.isVolatileQualified());
2017 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2019 // If the result of the expression is a non-vector type, we must be extracting
2020 // a single element. Just codegen as an extractelement.
2021 const VectorType
*ExprVT
= LV
.getType()->getAs
<VectorType
>();
2023 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2024 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2025 return RValue::get(Builder
.CreateExtractElement(Vec
, Elt
));
2028 // Always use shuffle vector to try to retain the original program structure
2029 unsigned NumResultElts
= ExprVT
->getNumElements();
2031 SmallVector
<int, 4> Mask
;
2032 for (unsigned i
= 0; i
!= NumResultElts
; ++i
)
2033 Mask
.push_back(getAccessedFieldNo(i
, Elts
));
2035 Vec
= Builder
.CreateShuffleVector(Vec
, Mask
);
2036 return RValue::get(Vec
);
2039 /// Generates lvalue for partial ext_vector access.
2040 Address
CodeGenFunction::EmitExtVectorElementLValue(LValue LV
) {
2041 Address VectorAddress
= LV
.getExtVectorAddress();
2042 QualType EQT
= LV
.getType()->castAs
<VectorType
>()->getElementType();
2043 llvm::Type
*VectorElementTy
= CGM
.getTypes().ConvertType(EQT
);
2045 Address CastToPointerElement
=
2046 Builder
.CreateElementBitCast(VectorAddress
, VectorElementTy
,
2047 "conv.ptr.element");
2049 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2050 unsigned ix
= getAccessedFieldNo(0, Elts
);
2052 Address VectorBasePtrPlusIx
=
2053 Builder
.CreateConstInBoundsGEP(CastToPointerElement
, ix
,
2056 return VectorBasePtrPlusIx
;
2059 /// Load of global gamed gegisters are always calls to intrinsics.
2060 RValue
CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV
) {
2061 assert((LV
.getType()->isIntegerType() || LV
.getType()->isPointerType()) &&
2062 "Bad type for register variable");
2063 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2064 cast
<llvm::MetadataAsValue
>(LV
.getGlobalReg())->getMetadata());
2066 // We accept integer and pointer types only
2067 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(LV
.getType());
2068 llvm::Type
*Ty
= OrigTy
;
2069 if (OrigTy
->isPointerTy())
2070 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2071 llvm::Type
*Types
[] = { Ty
};
2073 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::read_register
, Types
);
2074 llvm::Value
*Call
= Builder
.CreateCall(
2075 F
, llvm::MetadataAsValue::get(Ty
->getContext(), RegName
));
2076 if (OrigTy
->isPointerTy())
2077 Call
= Builder
.CreateIntToPtr(Call
, OrigTy
);
2078 return RValue::get(Call
);
2081 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2082 /// lvalue, where both are guaranteed to the have the same type, and that type
2084 void CodeGenFunction::EmitStoreThroughLValue(RValue Src
, LValue Dst
,
2086 if (!Dst
.isSimple()) {
2087 if (Dst
.isVectorElt()) {
2088 // Read/modify/write the vector, inserting the new element.
2089 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getVectorAddress(),
2090 Dst
.isVolatileQualified());
2091 auto *IRStoreTy
= dyn_cast
<llvm::IntegerType
>(Vec
->getType());
2093 auto *IRVecTy
= llvm::FixedVectorType::get(
2094 Builder
.getInt1Ty(), IRStoreTy
->getPrimitiveSizeInBits());
2095 Vec
= Builder
.CreateBitCast(Vec
, IRVecTy
);
2098 Vec
= Builder
.CreateInsertElement(Vec
, Src
.getScalarVal(),
2099 Dst
.getVectorIdx(), "vecins");
2101 // <N x i1> --> <iN>.
2102 Vec
= Builder
.CreateBitCast(Vec
, IRStoreTy
);
2104 Builder
.CreateStore(Vec
, Dst
.getVectorAddress(),
2105 Dst
.isVolatileQualified());
2109 // If this is an update of extended vector elements, insert them as
2111 if (Dst
.isExtVectorElt())
2112 return EmitStoreThroughExtVectorComponentLValue(Src
, Dst
);
2114 if (Dst
.isGlobalReg())
2115 return EmitStoreThroughGlobalRegLValue(Src
, Dst
);
2117 if (Dst
.isMatrixElt()) {
2118 llvm::Value
*Idx
= Dst
.getMatrixIdx();
2119 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2120 const auto *const MatTy
= Dst
.getType()->castAs
<ConstantMatrixType
>();
2121 llvm::MatrixBuilder
MB(Builder
);
2122 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2124 llvm::Instruction
*Load
= Builder
.CreateLoad(Dst
.getMatrixAddress());
2126 Builder
.CreateInsertElement(Load
, Src
.getScalarVal(), Idx
, "matins");
2127 Builder
.CreateStore(Vec
, Dst
.getMatrixAddress(),
2128 Dst
.isVolatileQualified());
2132 assert(Dst
.isBitField() && "Unknown LValue type");
2133 return EmitStoreThroughBitfieldLValue(Src
, Dst
);
2136 // There's special magic for assigning into an ARC-qualified l-value.
2137 if (Qualifiers::ObjCLifetime Lifetime
= Dst
.getQuals().getObjCLifetime()) {
2139 case Qualifiers::OCL_None
:
2140 llvm_unreachable("present but none");
2142 case Qualifiers::OCL_ExplicitNone
:
2146 case Qualifiers::OCL_Strong
:
2148 Src
= RValue::get(EmitARCRetain(Dst
.getType(), Src
.getScalarVal()));
2151 EmitARCStoreStrong(Dst
, Src
.getScalarVal(), /*ignore*/ true);
2154 case Qualifiers::OCL_Weak
:
2156 // Initialize and then skip the primitive store.
2157 EmitARCInitWeak(Dst
.getAddress(*this), Src
.getScalarVal());
2159 EmitARCStoreWeak(Dst
.getAddress(*this), Src
.getScalarVal(),
2163 case Qualifiers::OCL_Autoreleasing
:
2164 Src
= RValue::get(EmitObjCExtendObjectLifetime(Dst
.getType(),
2165 Src
.getScalarVal()));
2166 // fall into the normal path
2171 if (Dst
.isObjCWeak() && !Dst
.isNonGC()) {
2172 // load of a __weak object.
2173 Address LvalueDst
= Dst
.getAddress(*this);
2174 llvm::Value
*src
= Src
.getScalarVal();
2175 CGM
.getObjCRuntime().EmitObjCWeakAssign(*this, src
, LvalueDst
);
2179 if (Dst
.isObjCStrong() && !Dst
.isNonGC()) {
2180 // load of a __strong object.
2181 Address LvalueDst
= Dst
.getAddress(*this);
2182 llvm::Value
*src
= Src
.getScalarVal();
2183 if (Dst
.isObjCIvar()) {
2184 assert(Dst
.getBaseIvarExp() && "BaseIvarExp is NULL");
2185 llvm::Type
*ResultType
= IntPtrTy
;
2186 Address dst
= EmitPointerWithAlignment(Dst
.getBaseIvarExp());
2187 llvm::Value
*RHS
= dst
.getPointer();
2188 RHS
= Builder
.CreatePtrToInt(RHS
, ResultType
, "sub.ptr.rhs.cast");
2190 Builder
.CreatePtrToInt(LvalueDst
.getPointer(), ResultType
,
2191 "sub.ptr.lhs.cast");
2192 llvm::Value
*BytesBetween
= Builder
.CreateSub(LHS
, RHS
, "ivar.offset");
2193 CGM
.getObjCRuntime().EmitObjCIvarAssign(*this, src
, dst
,
2195 } else if (Dst
.isGlobalObjCRef()) {
2196 CGM
.getObjCRuntime().EmitObjCGlobalAssign(*this, src
, LvalueDst
,
2197 Dst
.isThreadLocalRef());
2200 CGM
.getObjCRuntime().EmitObjCStrongCastAssign(*this, src
, LvalueDst
);
2204 assert(Src
.isScalar() && "Can't emit an agg store with this method");
2205 EmitStoreOfScalar(Src
.getScalarVal(), Dst
, isInit
);
2208 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
2209 llvm::Value
**Result
) {
2210 const CGBitFieldInfo
&Info
= Dst
.getBitFieldInfo();
2211 llvm::Type
*ResLTy
= ConvertTypeForMem(Dst
.getType());
2212 Address Ptr
= Dst
.getBitFieldAddress();
2214 // Get the source value, truncated to the width of the bit-field.
2215 llvm::Value
*SrcVal
= Src
.getScalarVal();
2217 // Cast the source to the storage type and shift it into place.
2218 SrcVal
= Builder
.CreateIntCast(SrcVal
, Ptr
.getElementType(),
2219 /*isSigned=*/false);
2220 llvm::Value
*MaskedVal
= SrcVal
;
2222 const bool UseVolatile
=
2223 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&& Dst
.isVolatileQualified() &&
2224 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2225 const unsigned StorageSize
=
2226 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2227 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2228 // See if there are other bits in the bitfield's storage we'll need to load
2229 // and mask together with source before storing.
2230 if (StorageSize
!= Info
.Size
) {
2231 assert(StorageSize
> Info
.Size
&& "Invalid bitfield size.");
2233 Builder
.CreateLoad(Ptr
, Dst
.isVolatileQualified(), "bf.load");
2235 // Mask the source value as needed.
2236 if (!hasBooleanRepresentation(Dst
.getType()))
2237 SrcVal
= Builder
.CreateAnd(
2238 SrcVal
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
),
2242 SrcVal
= Builder
.CreateShl(SrcVal
, Offset
, "bf.shl");
2244 // Mask out the original value.
2245 Val
= Builder
.CreateAnd(
2246 Val
, ~llvm::APInt::getBitsSet(StorageSize
, Offset
, Offset
+ Info
.Size
),
2249 // Or together the unchanged values and the source value.
2250 SrcVal
= Builder
.CreateOr(Val
, SrcVal
, "bf.set");
2252 assert(Offset
== 0);
2253 // According to the AACPS:
2254 // When a volatile bit-field is written, and its container does not overlap
2255 // with any non-bit-field member, its container must be read exactly once
2256 // and written exactly once using the access width appropriate to the type
2257 // of the container. The two accesses are not atomic.
2258 if (Dst
.isVolatileQualified() && isAAPCS(CGM
.getTarget()) &&
2259 CGM
.getCodeGenOpts().ForceAAPCSBitfieldLoad
)
2260 Builder
.CreateLoad(Ptr
, true, "bf.load");
2263 // Write the new value back out.
2264 Builder
.CreateStore(SrcVal
, Ptr
, Dst
.isVolatileQualified());
2266 // Return the new value of the bit-field, if requested.
2268 llvm::Value
*ResultVal
= MaskedVal
;
2270 // Sign extend the value if needed.
2271 if (Info
.IsSigned
) {
2272 assert(Info
.Size
<= StorageSize
);
2273 unsigned HighBits
= StorageSize
- Info
.Size
;
2275 ResultVal
= Builder
.CreateShl(ResultVal
, HighBits
, "bf.result.shl");
2276 ResultVal
= Builder
.CreateAShr(ResultVal
, HighBits
, "bf.result.ashr");
2280 ResultVal
= Builder
.CreateIntCast(ResultVal
, ResLTy
, Info
.IsSigned
,
2282 *Result
= EmitFromMemory(ResultVal
, Dst
.getType());
2286 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src
,
2288 // This access turns into a read/modify/write of the vector. Load the input
2290 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getExtVectorAddress(),
2291 Dst
.isVolatileQualified());
2292 const llvm::Constant
*Elts
= Dst
.getExtVectorElts();
2294 llvm::Value
*SrcVal
= Src
.getScalarVal();
2296 if (const VectorType
*VTy
= Dst
.getType()->getAs
<VectorType
>()) {
2297 unsigned NumSrcElts
= VTy
->getNumElements();
2298 unsigned NumDstElts
=
2299 cast
<llvm::FixedVectorType
>(Vec
->getType())->getNumElements();
2300 if (NumDstElts
== NumSrcElts
) {
2301 // Use shuffle vector is the src and destination are the same number of
2302 // elements and restore the vector mask since it is on the side it will be
2304 SmallVector
<int, 4> Mask(NumDstElts
);
2305 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2306 Mask
[getAccessedFieldNo(i
, Elts
)] = i
;
2308 Vec
= Builder
.CreateShuffleVector(SrcVal
, Mask
);
2309 } else if (NumDstElts
> NumSrcElts
) {
2310 // Extended the source vector to the same length and then shuffle it
2311 // into the destination.
2312 // FIXME: since we're shuffling with undef, can we just use the indices
2313 // into that? This could be simpler.
2314 SmallVector
<int, 4> ExtMask
;
2315 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2316 ExtMask
.push_back(i
);
2317 ExtMask
.resize(NumDstElts
, -1);
2318 llvm::Value
*ExtSrcVal
= Builder
.CreateShuffleVector(SrcVal
, ExtMask
);
2320 SmallVector
<int, 4> Mask
;
2321 for (unsigned i
= 0; i
!= NumDstElts
; ++i
)
2324 // When the vector size is odd and .odd or .hi is used, the last element
2325 // of the Elts constant array will be one past the size of the vector.
2326 // Ignore the last element here, if it is greater than the mask size.
2327 if (getAccessedFieldNo(NumSrcElts
- 1, Elts
) == Mask
.size())
2330 // modify when what gets shuffled in
2331 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2332 Mask
[getAccessedFieldNo(i
, Elts
)] = i
+ NumDstElts
;
2333 Vec
= Builder
.CreateShuffleVector(Vec
, ExtSrcVal
, Mask
);
2335 // We should never shorten the vector
2336 llvm_unreachable("unexpected shorten vector length");
2339 // If the Src is a scalar (not a vector) it must be updating one element.
2340 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2341 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2342 Vec
= Builder
.CreateInsertElement(Vec
, SrcVal
, Elt
);
2345 Builder
.CreateStore(Vec
, Dst
.getExtVectorAddress(),
2346 Dst
.isVolatileQualified());
2349 /// Store of global named registers are always calls to intrinsics.
2350 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
) {
2351 assert((Dst
.getType()->isIntegerType() || Dst
.getType()->isPointerType()) &&
2352 "Bad type for register variable");
2353 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2354 cast
<llvm::MetadataAsValue
>(Dst
.getGlobalReg())->getMetadata());
2355 assert(RegName
&& "Register LValue is not metadata");
2357 // We accept integer and pointer types only
2358 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(Dst
.getType());
2359 llvm::Type
*Ty
= OrigTy
;
2360 if (OrigTy
->isPointerTy())
2361 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2362 llvm::Type
*Types
[] = { Ty
};
2364 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
2365 llvm::Value
*Value
= Src
.getScalarVal();
2366 if (OrigTy
->isPointerTy())
2367 Value
= Builder
.CreatePtrToInt(Value
, Ty
);
2369 F
, {llvm::MetadataAsValue::get(Ty
->getContext(), RegName
), Value
});
2372 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2373 // generating write-barries API. It is currently a global, ivar,
2375 static void setObjCGCLValueClass(const ASTContext
&Ctx
, const Expr
*E
,
2377 bool IsMemberAccess
=false) {
2378 if (Ctx
.getLangOpts().getGC() == LangOptions::NonGC
)
2381 if (isa
<ObjCIvarRefExpr
>(E
)) {
2382 QualType ExpTy
= E
->getType();
2383 if (IsMemberAccess
&& ExpTy
->isPointerType()) {
2384 // If ivar is a structure pointer, assigning to field of
2385 // this struct follows gcc's behavior and makes it a non-ivar
2386 // writer-barrier conservatively.
2387 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2388 if (ExpTy
->isRecordType()) {
2389 LV
.setObjCIvar(false);
2393 LV
.setObjCIvar(true);
2394 auto *Exp
= cast
<ObjCIvarRefExpr
>(const_cast<Expr
*>(E
));
2395 LV
.setBaseIvarExp(Exp
->getBase());
2396 LV
.setObjCArray(E
->getType()->isArrayType());
2400 if (const auto *Exp
= dyn_cast
<DeclRefExpr
>(E
)) {
2401 if (const auto *VD
= dyn_cast
<VarDecl
>(Exp
->getDecl())) {
2402 if (VD
->hasGlobalStorage()) {
2403 LV
.setGlobalObjCRef(true);
2404 LV
.setThreadLocalRef(VD
->getTLSKind() != VarDecl::TLS_None
);
2407 LV
.setObjCArray(E
->getType()->isArrayType());
2411 if (const auto *Exp
= dyn_cast
<UnaryOperator
>(E
)) {
2412 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2416 if (const auto *Exp
= dyn_cast
<ParenExpr
>(E
)) {
2417 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2418 if (LV
.isObjCIvar()) {
2419 // If cast is to a structure pointer, follow gcc's behavior and make it
2420 // a non-ivar write-barrier.
2421 QualType ExpTy
= E
->getType();
2422 if (ExpTy
->isPointerType())
2423 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2424 if (ExpTy
->isRecordType())
2425 LV
.setObjCIvar(false);
2430 if (const auto *Exp
= dyn_cast
<GenericSelectionExpr
>(E
)) {
2431 setObjCGCLValueClass(Ctx
, Exp
->getResultExpr(), LV
);
2435 if (const auto *Exp
= dyn_cast
<ImplicitCastExpr
>(E
)) {
2436 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2440 if (const auto *Exp
= dyn_cast
<CStyleCastExpr
>(E
)) {
2441 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2445 if (const auto *Exp
= dyn_cast
<ObjCBridgedCastExpr
>(E
)) {
2446 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2450 if (const auto *Exp
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
2451 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
);
2452 if (LV
.isObjCIvar() && !LV
.isObjCArray())
2453 // Using array syntax to assigning to what an ivar points to is not
2454 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2455 LV
.setObjCIvar(false);
2456 else if (LV
.isGlobalObjCRef() && !LV
.isObjCArray())
2457 // Using array syntax to assigning to what global points to is not
2458 // same as assigning to the global itself. {id *G;} G[i] = 0;
2459 LV
.setGlobalObjCRef(false);
2463 if (const auto *Exp
= dyn_cast
<MemberExpr
>(E
)) {
2464 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
, true);
2465 // We don't know if member is an 'ivar', but this flag is looked at
2466 // only in the context of LV.isObjCIvar().
2467 LV
.setObjCArray(E
->getType()->isArrayType());
2472 static llvm::Value
*
2473 EmitBitCastOfLValueToProperType(CodeGenFunction
&CGF
,
2474 llvm::Value
*V
, llvm::Type
*IRType
,
2475 StringRef Name
= StringRef()) {
2476 unsigned AS
= cast
<llvm::PointerType
>(V
->getType())->getAddressSpace();
2477 return CGF
.Builder
.CreateBitCast(V
, IRType
->getPointerTo(AS
), Name
);
2480 static LValue
EmitThreadPrivateVarDeclLValue(
2481 CodeGenFunction
&CGF
, const VarDecl
*VD
, QualType T
, Address Addr
,
2482 llvm::Type
*RealVarTy
, SourceLocation Loc
) {
2483 if (CGF
.CGM
.getLangOpts().OpenMPIRBuilder
)
2484 Addr
= CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2485 CGF
, VD
, Addr
, Loc
);
2488 CGF
.CGM
.getOpenMPRuntime().getAddrOfThreadPrivate(CGF
, VD
, Addr
, Loc
);
2490 Addr
= CGF
.Builder
.CreateElementBitCast(Addr
, RealVarTy
);
2491 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2494 static Address
emitDeclTargetVarDeclLValue(CodeGenFunction
&CGF
,
2495 const VarDecl
*VD
, QualType T
) {
2496 llvm::Optional
<OMPDeclareTargetDeclAttr::MapTypeTy
> Res
=
2497 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD
);
2498 // Return an invalid address if variable is MT_To and unified
2499 // memory is not enabled. For all other cases: MT_Link and
2500 // MT_To with unified memory, return a valid address.
2501 if (!Res
|| (*Res
== OMPDeclareTargetDeclAttr::MT_To
&&
2502 !CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2503 return Address::invalid();
2504 assert(((*Res
== OMPDeclareTargetDeclAttr::MT_Link
) ||
2505 (*Res
== OMPDeclareTargetDeclAttr::MT_To
&&
2506 CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2507 "Expected link clause OR to clause with unified memory enabled.");
2508 QualType PtrTy
= CGF
.getContext().getPointerType(VD
->getType());
2509 Address Addr
= CGF
.CGM
.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD
);
2510 return CGF
.EmitLoadOfPointer(Addr
, PtrTy
->castAs
<PointerType
>());
2514 CodeGenFunction::EmitLoadOfReference(LValue RefLVal
,
2515 LValueBaseInfo
*PointeeBaseInfo
,
2516 TBAAAccessInfo
*PointeeTBAAInfo
) {
2517 llvm::LoadInst
*Load
=
2518 Builder
.CreateLoad(RefLVal
.getAddress(*this), RefLVal
.isVolatile());
2519 CGM
.DecorateInstructionWithTBAA(Load
, RefLVal
.getTBAAInfo());
2521 QualType PointeeType
= RefLVal
.getType()->getPointeeType();
2522 CharUnits Align
= CGM
.getNaturalTypeAlignment(
2523 PointeeType
, PointeeBaseInfo
, PointeeTBAAInfo
,
2524 /* forPointeeType= */ true);
2525 return Address(Load
, ConvertTypeForMem(PointeeType
), Align
);
2528 LValue
CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal
) {
2529 LValueBaseInfo PointeeBaseInfo
;
2530 TBAAAccessInfo PointeeTBAAInfo
;
2531 Address PointeeAddr
= EmitLoadOfReference(RefLVal
, &PointeeBaseInfo
,
2533 return MakeAddrLValue(PointeeAddr
, RefLVal
.getType()->getPointeeType(),
2534 PointeeBaseInfo
, PointeeTBAAInfo
);
2537 Address
CodeGenFunction::EmitLoadOfPointer(Address Ptr
,
2538 const PointerType
*PtrTy
,
2539 LValueBaseInfo
*BaseInfo
,
2540 TBAAAccessInfo
*TBAAInfo
) {
2541 llvm::Value
*Addr
= Builder
.CreateLoad(Ptr
);
2542 return Address(Addr
, ConvertTypeForMem(PtrTy
->getPointeeType()),
2543 CGM
.getNaturalTypeAlignment(PtrTy
->getPointeeType(), BaseInfo
,
2545 /*forPointeeType=*/true));
2548 LValue
CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr
,
2549 const PointerType
*PtrTy
) {
2550 LValueBaseInfo BaseInfo
;
2551 TBAAAccessInfo TBAAInfo
;
2552 Address Addr
= EmitLoadOfPointer(PtrAddr
, PtrTy
, &BaseInfo
, &TBAAInfo
);
2553 return MakeAddrLValue(Addr
, PtrTy
->getPointeeType(), BaseInfo
, TBAAInfo
);
2556 static LValue
EmitGlobalVarDeclLValue(CodeGenFunction
&CGF
,
2557 const Expr
*E
, const VarDecl
*VD
) {
2558 QualType T
= E
->getType();
2560 // If it's thread_local, emit a call to its wrapper function instead.
2561 if (VD
->getTLSKind() == VarDecl::TLS_Dynamic
&&
2562 CGF
.CGM
.getCXXABI().usesThreadWrapperFunction(VD
))
2563 return CGF
.CGM
.getCXXABI().EmitThreadLocalVarDeclLValue(CGF
, VD
, T
);
2564 // Check if the variable is marked as declare target with link clause in
2566 if (CGF
.getLangOpts().OpenMPIsDevice
) {
2567 Address Addr
= emitDeclTargetVarDeclLValue(CGF
, VD
, T
);
2569 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2572 llvm::Value
*V
= CGF
.CGM
.GetAddrOfGlobalVar(VD
);
2574 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2575 V
= CGF
.Builder
.CreateThreadLocalAddress(V
);
2577 llvm::Type
*RealVarTy
= CGF
.getTypes().ConvertTypeForMem(VD
->getType());
2578 V
= EmitBitCastOfLValueToProperType(CGF
, V
, RealVarTy
);
2579 CharUnits Alignment
= CGF
.getContext().getDeclAlign(VD
);
2580 Address
Addr(V
, RealVarTy
, Alignment
);
2581 // Emit reference to the private copy of the variable if it is an OpenMP
2582 // threadprivate variable.
2583 if (CGF
.getLangOpts().OpenMP
&& !CGF
.getLangOpts().OpenMPSimd
&&
2584 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2585 return EmitThreadPrivateVarDeclLValue(CGF
, VD
, T
, Addr
, RealVarTy
,
2588 LValue LV
= VD
->getType()->isReferenceType() ?
2589 CGF
.EmitLoadOfReferenceLValue(Addr
, VD
->getType(),
2590 AlignmentSource::Decl
) :
2591 CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2592 setObjCGCLValueClass(CGF
.getContext(), E
, LV
);
2596 static llvm::Constant
*EmitFunctionDeclPointer(CodeGenModule
&CGM
,
2598 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2599 if (FD
->hasAttr
<WeakRefAttr
>()) {
2600 ConstantAddress aliasee
= CGM
.GetWeakRefReference(FD
);
2601 return aliasee
.getPointer();
2604 llvm::Constant
*V
= CGM
.GetAddrOfFunction(GD
);
2605 if (!FD
->hasPrototype()) {
2606 if (const FunctionProtoType
*Proto
=
2607 FD
->getType()->getAs
<FunctionProtoType
>()) {
2608 // Ugly case: for a K&R-style definition, the type of the definition
2609 // isn't the same as the type of a use. Correct for this with a
2611 QualType NoProtoType
=
2612 CGM
.getContext().getFunctionNoProtoType(Proto
->getReturnType());
2613 NoProtoType
= CGM
.getContext().getPointerType(NoProtoType
);
2614 V
= llvm::ConstantExpr::getBitCast(V
,
2615 CGM
.getTypes().ConvertType(NoProtoType
));
2621 static LValue
EmitFunctionDeclLValue(CodeGenFunction
&CGF
, const Expr
*E
,
2623 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2624 llvm::Value
*V
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
2625 CharUnits Alignment
= CGF
.getContext().getDeclAlign(FD
);
2626 return CGF
.MakeAddrLValue(V
, E
->getType(), Alignment
,
2627 AlignmentSource::Decl
);
2630 static LValue
EmitCapturedFieldLValue(CodeGenFunction
&CGF
, const FieldDecl
*FD
,
2631 llvm::Value
*ThisValue
) {
2632 QualType TagType
= CGF
.getContext().getTagDeclType(FD
->getParent());
2633 LValue LV
= CGF
.MakeNaturalAlignAddrLValue(ThisValue
, TagType
);
2634 return CGF
.EmitLValueForField(LV
, FD
);
2637 /// Named Registers are named metadata pointing to the register name
2638 /// which will be read from/written to as an argument to the intrinsic
2639 /// @llvm.read/write_register.
2640 /// So far, only the name is being passed down, but other options such as
2641 /// register type, allocation type or even optimization options could be
2642 /// passed down via the metadata node.
2643 static LValue
EmitGlobalNamedRegister(const VarDecl
*VD
, CodeGenModule
&CGM
) {
2644 SmallString
<64> Name("llvm.named.register.");
2645 AsmLabelAttr
*Asm
= VD
->getAttr
<AsmLabelAttr
>();
2646 assert(Asm
->getLabel().size() < 64-Name
.size() &&
2647 "Register name too big");
2648 Name
.append(Asm
->getLabel());
2649 llvm::NamedMDNode
*M
=
2650 CGM
.getModule().getOrInsertNamedMetadata(Name
);
2651 if (M
->getNumOperands() == 0) {
2652 llvm::MDString
*Str
= llvm::MDString::get(CGM
.getLLVMContext(),
2654 llvm::Metadata
*Ops
[] = {Str
};
2655 M
->addOperand(llvm::MDNode::get(CGM
.getLLVMContext(), Ops
));
2658 CharUnits Alignment
= CGM
.getContext().getDeclAlign(VD
);
2661 llvm::MetadataAsValue::get(CGM
.getLLVMContext(), M
->getOperand(0));
2662 return LValue::MakeGlobalReg(Ptr
, Alignment
, VD
->getType());
2665 /// Determine whether we can emit a reference to \p VD from the current
2666 /// context, despite not necessarily having seen an odr-use of the variable in
2668 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction
&CGF
,
2669 const DeclRefExpr
*E
,
2672 // For a variable declared in an enclosing scope, do not emit a spurious
2673 // reference even if we have a capture, as that will emit an unwarranted
2674 // reference to our capture state, and will likely generate worse code than
2675 // emitting a local copy.
2676 if (E
->refersToEnclosingVariableOrCapture())
2679 // For a local declaration declared in this function, we can always reference
2680 // it even if we don't have an odr-use.
2681 if (VD
->hasLocalStorage()) {
2682 return VD
->getDeclContext() ==
2683 dyn_cast_or_null
<DeclContext
>(CGF
.CurCodeDecl
);
2686 // For a global declaration, we can emit a reference to it if we know
2687 // for sure that we are able to emit a definition of it.
2688 VD
= VD
->getDefinition(CGF
.getContext());
2692 // Don't emit a spurious reference if it might be to a variable that only
2693 // exists on a different device / target.
2694 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2695 // cross-target reference.
2696 if (CGF
.getLangOpts().OpenMP
|| CGF
.getLangOpts().CUDA
||
2697 CGF
.getLangOpts().OpenCL
) {
2701 // We can emit a spurious reference only if the linkage implies that we'll
2702 // be emitting a non-interposable symbol that will be retained until link
2704 switch (CGF
.CGM
.getLLVMLinkageVarDefinition(VD
, IsConstant
)) {
2705 case llvm::GlobalValue::ExternalLinkage
:
2706 case llvm::GlobalValue::LinkOnceODRLinkage
:
2707 case llvm::GlobalValue::WeakODRLinkage
:
2708 case llvm::GlobalValue::InternalLinkage
:
2709 case llvm::GlobalValue::PrivateLinkage
:
2716 LValue
CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr
*E
) {
2717 const NamedDecl
*ND
= E
->getDecl();
2718 QualType T
= E
->getType();
2720 assert(E
->isNonOdrUse() != NOUR_Unevaluated
&&
2721 "should not emit an unevaluated operand");
2723 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2724 // Global Named registers access via intrinsics only
2725 if (VD
->getStorageClass() == SC_Register
&&
2726 VD
->hasAttr
<AsmLabelAttr
>() && !VD
->isLocalVarDecl())
2727 return EmitGlobalNamedRegister(VD
, CGM
);
2729 // If this DeclRefExpr does not constitute an odr-use of the variable,
2730 // we're not permitted to emit a reference to it in general, and it might
2731 // not be captured if capture would be necessary for a use. Emit the
2732 // constant value directly instead.
2733 if (E
->isNonOdrUse() == NOUR_Constant
&&
2734 (VD
->getType()->isReferenceType() ||
2735 !canEmitSpuriousReferenceToVariable(*this, E
, VD
, true))) {
2736 VD
->getAnyInitializer(VD
);
2737 llvm::Constant
*Val
= ConstantEmitter(*this).emitAbstract(
2738 E
->getLocation(), *VD
->evaluateValue(), VD
->getType());
2739 assert(Val
&& "failed to emit constant expression");
2741 Address Addr
= Address::invalid();
2742 if (!VD
->getType()->isReferenceType()) {
2743 // Spill the constant value to a global.
2744 Addr
= CGM
.createUnnamedGlobalFrom(*VD
, Val
,
2745 getContext().getDeclAlign(VD
));
2746 llvm::Type
*VarTy
= getTypes().ConvertTypeForMem(VD
->getType());
2747 auto *PTy
= llvm::PointerType::get(
2748 VarTy
, getContext().getTargetAddressSpace(VD
->getType()));
2749 Addr
= Builder
.CreatePointerBitCastOrAddrSpaceCast(Addr
, PTy
, VarTy
);
2751 // Should we be using the alignment of the constant pointer we emitted?
2752 CharUnits Alignment
=
2753 CGM
.getNaturalTypeAlignment(E
->getType(),
2754 /* BaseInfo= */ nullptr,
2755 /* TBAAInfo= */ nullptr,
2756 /* forPointeeType= */ true);
2757 Addr
= Address(Val
, ConvertTypeForMem(E
->getType()), Alignment
);
2759 return MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2762 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2764 // Check for captured variables.
2765 if (E
->refersToEnclosingVariableOrCapture()) {
2766 VD
= VD
->getCanonicalDecl();
2767 if (auto *FD
= LambdaCaptureFields
.lookup(VD
))
2768 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2769 if (CapturedStmtInfo
) {
2770 auto I
= LocalDeclMap
.find(VD
);
2771 if (I
!= LocalDeclMap
.end()) {
2773 if (VD
->getType()->isReferenceType())
2774 CapLVal
= EmitLoadOfReferenceLValue(I
->second
, VD
->getType(),
2775 AlignmentSource::Decl
);
2777 CapLVal
= MakeAddrLValue(I
->second
, T
);
2778 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2780 if (getLangOpts().OpenMP
&&
2781 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2782 CapLVal
.setNontemporal(/*Value=*/true);
2786 EmitCapturedFieldLValue(*this, CapturedStmtInfo
->lookup(VD
),
2787 CapturedStmtInfo
->getContextValue());
2788 Address LValueAddress
= CapLVal
.getAddress(*this);
2789 CapLVal
= MakeAddrLValue(
2790 Address(LValueAddress
.getPointer(), LValueAddress
.getElementType(),
2791 getContext().getDeclAlign(VD
)),
2792 CapLVal
.getType(), LValueBaseInfo(AlignmentSource::Decl
),
2793 CapLVal
.getTBAAInfo());
2794 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2796 if (getLangOpts().OpenMP
&&
2797 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2798 CapLVal
.setNontemporal(/*Value=*/true);
2802 assert(isa
<BlockDecl
>(CurCodeDecl
));
2803 Address addr
= GetAddrOfBlockDecl(VD
);
2804 return MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2808 // FIXME: We should be able to assert this for FunctionDecls as well!
2809 // FIXME: We should be able to assert this for all DeclRefExprs, not just
2810 // those with a valid source location.
2811 assert((ND
->isUsed(false) || !isa
<VarDecl
>(ND
) || E
->isNonOdrUse() ||
2812 !E
->getLocation().isValid()) &&
2813 "Should not use decl without marking it used!");
2815 if (ND
->hasAttr
<WeakRefAttr
>()) {
2816 const auto *VD
= cast
<ValueDecl
>(ND
);
2817 ConstantAddress Aliasee
= CGM
.GetWeakRefReference(VD
);
2818 return MakeAddrLValue(Aliasee
, T
, AlignmentSource::Decl
);
2821 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2822 // Check if this is a global variable.
2823 if (VD
->hasLinkage() || VD
->isStaticDataMember())
2824 return EmitGlobalVarDeclLValue(*this, E
, VD
);
2826 Address addr
= Address::invalid();
2828 // The variable should generally be present in the local decl map.
2829 auto iter
= LocalDeclMap
.find(VD
);
2830 if (iter
!= LocalDeclMap
.end()) {
2831 addr
= iter
->second
;
2833 // Otherwise, it might be static local we haven't emitted yet for
2834 // some reason; most likely, because it's in an outer function.
2835 } else if (VD
->isStaticLocal()) {
2836 llvm::Constant
*var
= CGM
.getOrCreateStaticVarDecl(
2837 *VD
, CGM
.getLLVMLinkageVarDefinition(VD
, /*IsConstant=*/false));
2839 var
, ConvertTypeForMem(VD
->getType()), getContext().getDeclAlign(VD
));
2841 // No other cases for now.
2843 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2846 // Handle threadlocal function locals.
2847 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2849 addr
.withPointer(Builder
.CreateThreadLocalAddress(addr
.getPointer()));
2851 // Check for OpenMP threadprivate variables.
2852 if (getLangOpts().OpenMP
&& !getLangOpts().OpenMPSimd
&&
2853 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2854 return EmitThreadPrivateVarDeclLValue(
2855 *this, VD
, T
, addr
, getTypes().ConvertTypeForMem(VD
->getType()),
2859 // Drill into block byref variables.
2860 bool isBlockByref
= VD
->isEscapingByref();
2862 addr
= emitBlockByrefAddress(addr
, VD
);
2865 // Drill into reference types.
2866 LValue LV
= VD
->getType()->isReferenceType() ?
2867 EmitLoadOfReferenceLValue(addr
, VD
->getType(), AlignmentSource::Decl
) :
2868 MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2870 bool isLocalStorage
= VD
->hasLocalStorage();
2872 bool NonGCable
= isLocalStorage
&&
2873 !VD
->getType()->isReferenceType() &&
2876 LV
.getQuals().removeObjCGCAttr();
2880 bool isImpreciseLifetime
=
2881 (isLocalStorage
&& !VD
->hasAttr
<ObjCPreciseLifetimeAttr
>());
2882 if (isImpreciseLifetime
)
2883 LV
.setARCPreciseLifetime(ARCImpreciseLifetime
);
2884 setObjCGCLValueClass(getContext(), E
, LV
);
2888 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
)) {
2889 LValue LV
= EmitFunctionDeclLValue(*this, E
, FD
);
2891 // Emit debuginfo for the function declaration if the target wants to.
2892 if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
2893 if (CGDebugInfo
*DI
= CGM
.getModuleDebugInfo()) {
2895 cast
<llvm::Function
>(LV
.getPointer(*this)->stripPointerCasts());
2896 if (!Fn
->getSubprogram())
2897 DI
->EmitFunctionDecl(FD
, FD
->getLocation(), T
, Fn
);
2904 // FIXME: While we're emitting a binding from an enclosing scope, all other
2905 // DeclRefExprs we see should be implicitly treated as if they also refer to
2906 // an enclosing scope.
2907 if (const auto *BD
= dyn_cast
<BindingDecl
>(ND
)) {
2908 if (E
->refersToEnclosingVariableOrCapture()) {
2909 auto *FD
= LambdaCaptureFields
.lookup(BD
);
2910 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2912 return EmitLValue(BD
->getBinding());
2915 // We can form DeclRefExprs naming GUID declarations when reconstituting
2916 // non-type template parameters into expressions.
2917 if (const auto *GD
= dyn_cast
<MSGuidDecl
>(ND
))
2918 return MakeAddrLValue(CGM
.GetAddrOfMSGuidDecl(GD
), T
,
2919 AlignmentSource::Decl
);
2921 if (const auto *TPO
= dyn_cast
<TemplateParamObjectDecl
>(ND
))
2922 return MakeAddrLValue(CGM
.GetAddrOfTemplateParamObject(TPO
), T
,
2923 AlignmentSource::Decl
);
2925 llvm_unreachable("Unhandled DeclRefExpr");
2928 LValue
CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator
*E
) {
2929 // __extension__ doesn't affect lvalue-ness.
2930 if (E
->getOpcode() == UO_Extension
)
2931 return EmitLValue(E
->getSubExpr());
2933 QualType ExprTy
= getContext().getCanonicalType(E
->getSubExpr()->getType());
2934 switch (E
->getOpcode()) {
2935 default: llvm_unreachable("Unknown unary operator lvalue!");
2937 QualType T
= E
->getSubExpr()->getType()->getPointeeType();
2938 assert(!T
.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2940 LValueBaseInfo BaseInfo
;
2941 TBAAAccessInfo TBAAInfo
;
2942 Address Addr
= EmitPointerWithAlignment(E
->getSubExpr(), &BaseInfo
,
2944 LValue LV
= MakeAddrLValue(Addr
, T
, BaseInfo
, TBAAInfo
);
2945 LV
.getQuals().setAddressSpace(ExprTy
.getAddressSpace());
2947 // We should not generate __weak write barrier on indirect reference
2948 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2949 // But, we continue to generate __strong write barrier on indirect write
2950 // into a pointer to object.
2951 if (getLangOpts().ObjC
&&
2952 getLangOpts().getGC() != LangOptions::NonGC
&&
2954 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
2959 LValue LV
= EmitLValue(E
->getSubExpr());
2960 assert(LV
.isSimple() && "real/imag on non-ordinary l-value");
2962 // __real is valid on scalars. This is a faster way of testing that.
2963 // __imag can only produce an rvalue on scalars.
2964 if (E
->getOpcode() == UO_Real
&&
2965 !LV
.getAddress(*this).getElementType()->isStructTy()) {
2966 assert(E
->getSubExpr()->getType()->isArithmeticType());
2970 QualType T
= ExprTy
->castAs
<ComplexType
>()->getElementType();
2973 (E
->getOpcode() == UO_Real
2974 ? emitAddrOfRealComponent(LV
.getAddress(*this), LV
.getType())
2975 : emitAddrOfImagComponent(LV
.getAddress(*this), LV
.getType()));
2976 LValue ElemLV
= MakeAddrLValue(Component
, T
, LV
.getBaseInfo(),
2977 CGM
.getTBAAInfoForSubobject(LV
, T
));
2978 ElemLV
.getQuals().addQualifiers(LV
.getQuals());
2983 LValue LV
= EmitLValue(E
->getSubExpr());
2984 bool isInc
= E
->getOpcode() == UO_PreInc
;
2986 if (E
->getType()->isAnyComplexType())
2987 EmitComplexPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
2989 EmitScalarPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
2995 LValue
CodeGenFunction::EmitStringLiteralLValue(const StringLiteral
*E
) {
2996 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromLiteral(E
),
2997 E
->getType(), AlignmentSource::Decl
);
3000 LValue
CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
) {
3001 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromObjCEncode(E
),
3002 E
->getType(), AlignmentSource::Decl
);
3005 LValue
CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr
*E
) {
3006 auto SL
= E
->getFunctionName();
3007 assert(SL
!= nullptr && "No StringLiteral name in PredefinedExpr");
3008 StringRef FnName
= CurFn
->getName();
3009 if (FnName
.startswith("\01"))
3010 FnName
= FnName
.substr(1);
3011 StringRef NameItems
[] = {
3012 PredefinedExpr::getIdentKindName(E
->getIdentKind()), FnName
};
3013 std::string GVName
= llvm::join(NameItems
, NameItems
+ 2, ".");
3014 if (auto *BD
= dyn_cast_or_null
<BlockDecl
>(CurCodeDecl
)) {
3015 std::string Name
= std::string(SL
->getString());
3016 if (!Name
.empty()) {
3017 unsigned Discriminator
=
3018 CGM
.getCXXABI().getMangleContext().getBlockId(BD
, true);
3020 Name
+= "_" + Twine(Discriminator
+ 1).str();
3021 auto C
= CGM
.GetAddrOfConstantCString(Name
, GVName
.c_str());
3022 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3025 CGM
.GetAddrOfConstantCString(std::string(FnName
), GVName
.c_str());
3026 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3029 auto C
= CGM
.GetAddrOfConstantStringFromLiteral(SL
, GVName
);
3030 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3033 /// Emit a type description suitable for use by a runtime sanitizer library. The
3034 /// format of a type descriptor is
3037 /// { i16 TypeKind, i16 TypeInfo }
3040 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3041 /// integer, 1 for a floating point value, and -1 for anything else.
3042 llvm::Constant
*CodeGenFunction::EmitCheckTypeDescriptor(QualType T
) {
3043 // Only emit each type's descriptor once.
3044 if (llvm::Constant
*C
= CGM
.getTypeDescriptorFromMap(T
))
3047 uint16_t TypeKind
= -1;
3048 uint16_t TypeInfo
= 0;
3050 if (T
->isIntegerType()) {
3052 TypeInfo
= (llvm::Log2_32(getContext().getTypeSize(T
)) << 1) |
3053 (T
->isSignedIntegerType() ? 1 : 0);
3054 } else if (T
->isFloatingType()) {
3056 TypeInfo
= getContext().getTypeSize(T
);
3059 // Format the type name as if for a diagnostic, including quotes and
3060 // optionally an 'aka'.
3061 SmallString
<32> Buffer
;
3062 CGM
.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype
,
3063 (intptr_t)T
.getAsOpaquePtr(),
3064 StringRef(), StringRef(), None
, Buffer
,
3067 llvm::Constant
*Components
[] = {
3068 Builder
.getInt16(TypeKind
), Builder
.getInt16(TypeInfo
),
3069 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer
)
3071 llvm::Constant
*Descriptor
= llvm::ConstantStruct::getAnon(Components
);
3073 auto *GV
= new llvm::GlobalVariable(
3074 CGM
.getModule(), Descriptor
->getType(),
3075 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
, Descriptor
);
3076 GV
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3077 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(GV
);
3079 // Remember the descriptor for this type.
3080 CGM
.setTypeDescriptorInMap(T
, GV
);
3085 llvm::Value
*CodeGenFunction::EmitCheckValue(llvm::Value
*V
) {
3086 llvm::Type
*TargetTy
= IntPtrTy
;
3088 if (V
->getType() == TargetTy
)
3091 // Floating-point types which fit into intptr_t are bitcast to integers
3092 // and then passed directly (after zero-extension, if necessary).
3093 if (V
->getType()->isFloatingPointTy()) {
3094 unsigned Bits
= V
->getType()->getPrimitiveSizeInBits().getFixedSize();
3095 if (Bits
<= TargetTy
->getIntegerBitWidth())
3096 V
= Builder
.CreateBitCast(V
, llvm::Type::getIntNTy(getLLVMContext(),
3100 // Integers which fit in intptr_t are zero-extended and passed directly.
3101 if (V
->getType()->isIntegerTy() &&
3102 V
->getType()->getIntegerBitWidth() <= TargetTy
->getIntegerBitWidth())
3103 return Builder
.CreateZExt(V
, TargetTy
);
3105 // Pointers are passed directly, everything else is passed by address.
3106 if (!V
->getType()->isPointerTy()) {
3107 Address Ptr
= CreateDefaultAlignTempAlloca(V
->getType());
3108 Builder
.CreateStore(V
, Ptr
);
3109 V
= Ptr
.getPointer();
3111 return Builder
.CreatePtrToInt(V
, TargetTy
);
3114 /// Emit a representation of a SourceLocation for passing to a handler
3115 /// in a sanitizer runtime library. The format for this data is:
3117 /// struct SourceLocation {
3118 /// const char *Filename;
3119 /// int32_t Line, Column;
3122 /// For an invalid SourceLocation, the Filename pointer is null.
3123 llvm::Constant
*CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc
) {
3124 llvm::Constant
*Filename
;
3127 PresumedLoc PLoc
= getContext().getSourceManager().getPresumedLoc(Loc
);
3128 if (PLoc
.isValid()) {
3129 StringRef FilenameString
= PLoc
.getFilename();
3131 int PathComponentsToStrip
=
3132 CGM
.getCodeGenOpts().EmitCheckPathComponentsToStrip
;
3133 if (PathComponentsToStrip
< 0) {
3134 assert(PathComponentsToStrip
!= INT_MIN
);
3135 int PathComponentsToKeep
= -PathComponentsToStrip
;
3136 auto I
= llvm::sys::path::rbegin(FilenameString
);
3137 auto E
= llvm::sys::path::rend(FilenameString
);
3138 while (I
!= E
&& --PathComponentsToKeep
)
3141 FilenameString
= FilenameString
.substr(I
- E
);
3142 } else if (PathComponentsToStrip
> 0) {
3143 auto I
= llvm::sys::path::begin(FilenameString
);
3144 auto E
= llvm::sys::path::end(FilenameString
);
3145 while (I
!= E
&& PathComponentsToStrip
--)
3150 FilenameString
.substr(I
- llvm::sys::path::begin(FilenameString
));
3152 FilenameString
= llvm::sys::path::filename(FilenameString
);
3156 CGM
.GetAddrOfConstantCString(std::string(FilenameString
), ".src");
3157 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(
3158 cast
<llvm::GlobalVariable
>(FilenameGV
.getPointer()));
3159 Filename
= FilenameGV
.getPointer();
3160 Line
= PLoc
.getLine();
3161 Column
= PLoc
.getColumn();
3163 Filename
= llvm::Constant::getNullValue(Int8PtrTy
);
3167 llvm::Constant
*Data
[] = {Filename
, Builder
.getInt32(Line
),
3168 Builder
.getInt32(Column
)};
3170 return llvm::ConstantStruct::getAnon(Data
);
3174 /// Specify under what conditions this check can be recovered
3175 enum class CheckRecoverableKind
{
3176 /// Always terminate program execution if this check fails.
3178 /// Check supports recovering, runtime has both fatal (noreturn) and
3179 /// non-fatal handlers for this check.
3181 /// Runtime conditionally aborts, always need to support recovery.
3186 static CheckRecoverableKind
getRecoverableKind(SanitizerMask Kind
) {
3187 assert(Kind
.countPopulation() == 1);
3188 if (Kind
== SanitizerKind::Function
|| Kind
== SanitizerKind::Vptr
)
3189 return CheckRecoverableKind::AlwaysRecoverable
;
3190 else if (Kind
== SanitizerKind::Return
|| Kind
== SanitizerKind::Unreachable
)
3191 return CheckRecoverableKind::Unrecoverable
;
3193 return CheckRecoverableKind::Recoverable
;
3197 struct SanitizerHandlerInfo
{
3198 char const *const Name
;
3203 const SanitizerHandlerInfo SanitizerHandlers
[] = {
3204 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3205 LIST_SANITIZER_CHECKS
3206 #undef SANITIZER_CHECK
3209 static void emitCheckHandlerCall(CodeGenFunction
&CGF
,
3210 llvm::FunctionType
*FnType
,
3211 ArrayRef
<llvm::Value
*> FnArgs
,
3212 SanitizerHandler CheckHandler
,
3213 CheckRecoverableKind RecoverKind
, bool IsFatal
,
3214 llvm::BasicBlock
*ContBB
) {
3215 assert(IsFatal
|| RecoverKind
!= CheckRecoverableKind::Unrecoverable
);
3216 Optional
<ApplyDebugLocation
> DL
;
3217 if (!CGF
.Builder
.getCurrentDebugLocation()) {
3218 // Ensure that the call has at least an artificial debug location.
3219 DL
.emplace(CGF
, SourceLocation());
3221 bool NeedsAbortSuffix
=
3222 IsFatal
&& RecoverKind
!= CheckRecoverableKind::Unrecoverable
;
3223 bool MinimalRuntime
= CGF
.CGM
.getCodeGenOpts().SanitizeMinimalRuntime
;
3224 const SanitizerHandlerInfo
&CheckInfo
= SanitizerHandlers
[CheckHandler
];
3225 const StringRef CheckName
= CheckInfo
.Name
;
3226 std::string FnName
= "__ubsan_handle_" + CheckName
.str();
3227 if (CheckInfo
.Version
&& !MinimalRuntime
)
3228 FnName
+= "_v" + llvm::utostr(CheckInfo
.Version
);
3230 FnName
+= "_minimal";
3231 if (NeedsAbortSuffix
)
3234 !IsFatal
|| RecoverKind
== CheckRecoverableKind::AlwaysRecoverable
;
3236 llvm::AttrBuilder
B(CGF
.getLLVMContext());
3238 B
.addAttribute(llvm::Attribute::NoReturn
)
3239 .addAttribute(llvm::Attribute::NoUnwind
);
3241 B
.addUWTableAttr(llvm::UWTableKind::Default
);
3243 llvm::FunctionCallee Fn
= CGF
.CGM
.CreateRuntimeFunction(
3245 llvm::AttributeList::get(CGF
.getLLVMContext(),
3246 llvm::AttributeList::FunctionIndex
, B
),
3248 llvm::CallInst
*HandlerCall
= CGF
.EmitNounwindRuntimeCall(Fn
, FnArgs
);
3250 HandlerCall
->setDoesNotReturn();
3251 CGF
.Builder
.CreateUnreachable();
3253 CGF
.Builder
.CreateBr(ContBB
);
3257 void CodeGenFunction::EmitCheck(
3258 ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
3259 SanitizerHandler CheckHandler
, ArrayRef
<llvm::Constant
*> StaticArgs
,
3260 ArrayRef
<llvm::Value
*> DynamicArgs
) {
3261 assert(IsSanitizerScope
);
3262 assert(Checked
.size() > 0);
3263 assert(CheckHandler
>= 0 &&
3264 size_t(CheckHandler
) < std::size(SanitizerHandlers
));
3265 const StringRef CheckName
= SanitizerHandlers
[CheckHandler
].Name
;
3267 llvm::Value
*FatalCond
= nullptr;
3268 llvm::Value
*RecoverableCond
= nullptr;
3269 llvm::Value
*TrapCond
= nullptr;
3270 for (int i
= 0, n
= Checked
.size(); i
< n
; ++i
) {
3271 llvm::Value
*Check
= Checked
[i
].first
;
3272 // -fsanitize-trap= overrides -fsanitize-recover=.
3273 llvm::Value
*&Cond
=
3274 CGM
.getCodeGenOpts().SanitizeTrap
.has(Checked
[i
].second
)
3276 : CGM
.getCodeGenOpts().SanitizeRecover
.has(Checked
[i
].second
)
3279 Cond
= Cond
? Builder
.CreateAnd(Cond
, Check
) : Check
;
3283 EmitTrapCheck(TrapCond
, CheckHandler
);
3284 if (!FatalCond
&& !RecoverableCond
)
3287 llvm::Value
*JointCond
;
3288 if (FatalCond
&& RecoverableCond
)
3289 JointCond
= Builder
.CreateAnd(FatalCond
, RecoverableCond
);
3291 JointCond
= FatalCond
? FatalCond
: RecoverableCond
;
3294 CheckRecoverableKind RecoverKind
= getRecoverableKind(Checked
[0].second
);
3295 assert(SanOpts
.has(Checked
[0].second
));
3297 for (int i
= 1, n
= Checked
.size(); i
< n
; ++i
) {
3298 assert(RecoverKind
== getRecoverableKind(Checked
[i
].second
) &&
3299 "All recoverable kinds in a single check must be same!");
3300 assert(SanOpts
.has(Checked
[i
].second
));
3304 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3305 llvm::BasicBlock
*Handlers
= createBasicBlock("handler." + CheckName
);
3306 llvm::Instruction
*Branch
= Builder
.CreateCondBr(JointCond
, Cont
, Handlers
);
3307 // Give hint that we very much don't expect to execute the handler
3308 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3309 llvm::MDBuilder
MDHelper(getLLVMContext());
3310 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3311 Branch
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3312 EmitBlock(Handlers
);
3314 // Handler functions take an i8* pointing to the (handler-specific) static
3315 // information block, followed by a sequence of intptr_t arguments
3316 // representing operand values.
3317 SmallVector
<llvm::Value
*, 4> Args
;
3318 SmallVector
<llvm::Type
*, 4> ArgTypes
;
3319 if (!CGM
.getCodeGenOpts().SanitizeMinimalRuntime
) {
3320 Args
.reserve(DynamicArgs
.size() + 1);
3321 ArgTypes
.reserve(DynamicArgs
.size() + 1);
3323 // Emit handler arguments and create handler function type.
3324 if (!StaticArgs
.empty()) {
3325 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3327 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3328 llvm::GlobalVariable::PrivateLinkage
, Info
);
3329 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3330 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3331 Args
.push_back(Builder
.CreateBitCast(InfoPtr
, Int8PtrTy
));
3332 ArgTypes
.push_back(Int8PtrTy
);
3335 for (size_t i
= 0, n
= DynamicArgs
.size(); i
!= n
; ++i
) {
3336 Args
.push_back(EmitCheckValue(DynamicArgs
[i
]));
3337 ArgTypes
.push_back(IntPtrTy
);
3341 llvm::FunctionType
*FnType
=
3342 llvm::FunctionType::get(CGM
.VoidTy
, ArgTypes
, false);
3344 if (!FatalCond
|| !RecoverableCond
) {
3345 // Simple case: we need to generate a single handler call, either
3346 // fatal, or non-fatal.
3347 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
,
3348 (FatalCond
!= nullptr), Cont
);
3350 // Emit two handler calls: first one for set of unrecoverable checks,
3351 // another one for recoverable.
3352 llvm::BasicBlock
*NonFatalHandlerBB
=
3353 createBasicBlock("non_fatal." + CheckName
);
3354 llvm::BasicBlock
*FatalHandlerBB
= createBasicBlock("fatal." + CheckName
);
3355 Builder
.CreateCondBr(FatalCond
, NonFatalHandlerBB
, FatalHandlerBB
);
3356 EmitBlock(FatalHandlerBB
);
3357 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, true,
3359 EmitBlock(NonFatalHandlerBB
);
3360 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, false,
3367 void CodeGenFunction::EmitCfiSlowPathCheck(
3368 SanitizerMask Kind
, llvm::Value
*Cond
, llvm::ConstantInt
*TypeId
,
3369 llvm::Value
*Ptr
, ArrayRef
<llvm::Constant
*> StaticArgs
) {
3370 llvm::BasicBlock
*Cont
= createBasicBlock("cfi.cont");
3372 llvm::BasicBlock
*CheckBB
= createBasicBlock("cfi.slowpath");
3373 llvm::BranchInst
*BI
= Builder
.CreateCondBr(Cond
, Cont
, CheckBB
);
3375 llvm::MDBuilder
MDHelper(getLLVMContext());
3376 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3377 BI
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3381 bool WithDiag
= !CGM
.getCodeGenOpts().SanitizeTrap
.has(Kind
);
3383 llvm::CallInst
*CheckCall
;
3384 llvm::FunctionCallee SlowPathFn
;
3386 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3388 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3389 llvm::GlobalVariable::PrivateLinkage
, Info
);
3390 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3391 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3393 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3394 "__cfi_slowpath_diag",
3395 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
},
3397 CheckCall
= Builder
.CreateCall(
3398 SlowPathFn
, {TypeId
, Ptr
, Builder
.CreateBitCast(InfoPtr
, Int8PtrTy
)});
3400 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3402 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
}, false));
3403 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
});
3407 cast
<llvm::GlobalValue
>(SlowPathFn
.getCallee()->stripPointerCasts()));
3408 CheckCall
->setDoesNotThrow();
3413 // Emit a stub for __cfi_check function so that the linker knows about this
3414 // symbol in LTO mode.
3415 void CodeGenFunction::EmitCfiCheckStub() {
3416 llvm::Module
*M
= &CGM
.getModule();
3417 auto &Ctx
= M
->getContext();
3418 llvm::Function
*F
= llvm::Function::Create(
3419 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
}, false),
3420 llvm::GlobalValue::WeakAnyLinkage
, "__cfi_check", M
);
3422 llvm::BasicBlock
*BB
= llvm::BasicBlock::Create(Ctx
, "entry", F
);
3423 // FIXME: consider emitting an intrinsic call like
3424 // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3425 // which can be lowered in CrossDSOCFI pass to the actual contents of
3426 // __cfi_check. This would allow inlining of __cfi_check calls.
3427 llvm::CallInst::Create(
3428 llvm::Intrinsic::getDeclaration(M
, llvm::Intrinsic::trap
), "", BB
);
3429 llvm::ReturnInst::Create(Ctx
, nullptr, BB
);
3432 // This function is basically a switch over the CFI failure kind, which is
3433 // extracted from CFICheckFailData (1st function argument). Each case is either
3434 // llvm.trap or a call to one of the two runtime handlers, based on
3435 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3436 // failure kind) traps, but this should really never happen. CFICheckFailData
3437 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3438 // check kind; in this case __cfi_check_fail traps as well.
3439 void CodeGenFunction::EmitCfiCheckFail() {
3440 SanitizerScope
SanScope(this);
3441 FunctionArgList Args
;
3442 ImplicitParamDecl
ArgData(getContext(), getContext().VoidPtrTy
,
3443 ImplicitParamDecl::Other
);
3444 ImplicitParamDecl
ArgAddr(getContext(), getContext().VoidPtrTy
,
3445 ImplicitParamDecl::Other
);
3446 Args
.push_back(&ArgData
);
3447 Args
.push_back(&ArgAddr
);
3449 const CGFunctionInfo
&FI
=
3450 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy
, Args
);
3452 llvm::Function
*F
= llvm::Function::Create(
3453 llvm::FunctionType::get(VoidTy
, {VoidPtrTy
, VoidPtrTy
}, false),
3454 llvm::GlobalValue::WeakODRLinkage
, "__cfi_check_fail", &CGM
.getModule());
3456 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, F
, /*IsThunk=*/false);
3457 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, F
);
3458 F
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
3460 StartFunction(GlobalDecl(), CGM
.getContext().VoidTy
, F
, FI
, Args
,
3463 // This function is not affected by NoSanitizeList. This function does
3464 // not have a source location, but "src:*" would still apply. Revert any
3465 // changes to SanOpts made in StartFunction.
3466 SanOpts
= CGM
.getLangOpts().Sanitize
;
3469 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData
), /*Volatile=*/false,
3470 CGM
.getContext().VoidPtrTy
, ArgData
.getLocation());
3472 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr
), /*Volatile=*/false,
3473 CGM
.getContext().VoidPtrTy
, ArgAddr
.getLocation());
3475 // Data == nullptr means the calling module has trap behaviour for this check.
3476 llvm::Value
*DataIsNotNullPtr
=
3477 Builder
.CreateICmpNE(Data
, llvm::ConstantPointerNull::get(Int8PtrTy
));
3478 EmitTrapCheck(DataIsNotNullPtr
, SanitizerHandler::CFICheckFail
);
3480 llvm::StructType
*SourceLocationTy
=
3481 llvm::StructType::get(VoidPtrTy
, Int32Ty
, Int32Ty
);
3482 llvm::StructType
*CfiCheckFailDataTy
=
3483 llvm::StructType::get(Int8Ty
, SourceLocationTy
, VoidPtrTy
);
3485 llvm::Value
*V
= Builder
.CreateConstGEP2_32(
3487 Builder
.CreatePointerCast(Data
, CfiCheckFailDataTy
->getPointerTo(0)), 0,
3490 Address
CheckKindAddr(V
, Int8Ty
, getIntAlign());
3491 llvm::Value
*CheckKind
= Builder
.CreateLoad(CheckKindAddr
);
3493 llvm::Value
*AllVtables
= llvm::MetadataAsValue::get(
3494 CGM
.getLLVMContext(),
3495 llvm::MDString::get(CGM
.getLLVMContext(), "all-vtables"));
3496 llvm::Value
*ValidVtable
= Builder
.CreateZExt(
3497 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::type_test
),
3498 {Addr
, AllVtables
}),
3501 const std::pair
<int, SanitizerMask
> CheckKinds
[] = {
3502 {CFITCK_VCall
, SanitizerKind::CFIVCall
},
3503 {CFITCK_NVCall
, SanitizerKind::CFINVCall
},
3504 {CFITCK_DerivedCast
, SanitizerKind::CFIDerivedCast
},
3505 {CFITCK_UnrelatedCast
, SanitizerKind::CFIUnrelatedCast
},
3506 {CFITCK_ICall
, SanitizerKind::CFIICall
}};
3508 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 5> Checks
;
3509 for (auto CheckKindMaskPair
: CheckKinds
) {
3510 int Kind
= CheckKindMaskPair
.first
;
3511 SanitizerMask Mask
= CheckKindMaskPair
.second
;
3513 Builder
.CreateICmpNE(CheckKind
, llvm::ConstantInt::get(Int8Ty
, Kind
));
3514 if (CGM
.getLangOpts().Sanitize
.has(Mask
))
3515 EmitCheck(std::make_pair(Cond
, Mask
), SanitizerHandler::CFICheckFail
, {},
3516 {Data
, Addr
, ValidVtable
});
3518 EmitTrapCheck(Cond
, SanitizerHandler::CFICheckFail
);
3522 // The only reference to this function will be created during LTO link.
3523 // Make sure it survives until then.
3524 CGM
.addUsedGlobal(F
);
3527 void CodeGenFunction::EmitUnreachable(SourceLocation Loc
) {
3528 if (SanOpts
.has(SanitizerKind::Unreachable
)) {
3529 SanitizerScope
SanScope(this);
3530 EmitCheck(std::make_pair(static_cast<llvm::Value
*>(Builder
.getFalse()),
3531 SanitizerKind::Unreachable
),
3532 SanitizerHandler::BuiltinUnreachable
,
3533 EmitCheckSourceLocation(Loc
), None
);
3535 Builder
.CreateUnreachable();
3538 void CodeGenFunction::EmitTrapCheck(llvm::Value
*Checked
,
3539 SanitizerHandler CheckHandlerID
) {
3540 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3542 // If we're optimizing, collapse all calls to trap down to just one per
3543 // check-type per function to save on code size.
3544 if (TrapBBs
.size() <= CheckHandlerID
)
3545 TrapBBs
.resize(CheckHandlerID
+ 1);
3546 llvm::BasicBlock
*&TrapBB
= TrapBBs
[CheckHandlerID
];
3548 if (!CGM
.getCodeGenOpts().OptimizationLevel
|| !TrapBB
) {
3549 TrapBB
= createBasicBlock("trap");
3550 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3553 llvm::CallInst
*TrapCall
=
3554 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::ubsantrap
),
3555 llvm::ConstantInt::get(CGM
.Int8Ty
, CheckHandlerID
));
3557 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3558 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3559 CGM
.getCodeGenOpts().TrapFuncName
);
3560 TrapCall
->addFnAttr(A
);
3562 TrapCall
->setDoesNotReturn();
3563 TrapCall
->setDoesNotThrow();
3564 Builder
.CreateUnreachable();
3566 auto Call
= TrapBB
->begin();
3567 assert(isa
<llvm::CallInst
>(Call
) && "Expected call in trap BB");
3569 Call
->applyMergedLocation(Call
->getDebugLoc(),
3570 Builder
.getCurrentDebugLocation());
3571 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3577 llvm::CallInst
*CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID
) {
3578 llvm::CallInst
*TrapCall
=
3579 Builder
.CreateCall(CGM
.getIntrinsic(IntrID
));
3581 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3582 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3583 CGM
.getCodeGenOpts().TrapFuncName
);
3584 TrapCall
->addFnAttr(A
);
3590 Address
CodeGenFunction::EmitArrayToPointerDecay(const Expr
*E
,
3591 LValueBaseInfo
*BaseInfo
,
3592 TBAAAccessInfo
*TBAAInfo
) {
3593 assert(E
->getType()->isArrayType() &&
3594 "Array to pointer decay must have array source type!");
3596 // Expressions of array type can't be bitfields or vector elements.
3597 LValue LV
= EmitLValue(E
);
3598 Address Addr
= LV
.getAddress(*this);
3600 // If the array type was an incomplete type, we need to make sure
3601 // the decay ends up being the right type.
3602 llvm::Type
*NewTy
= ConvertType(E
->getType());
3603 Addr
= Builder
.CreateElementBitCast(Addr
, NewTy
);
3605 // Note that VLA pointers are always decayed, so we don't need to do
3607 if (!E
->getType()->isVariableArrayType()) {
3608 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3609 "Expected pointer to array");
3610 Addr
= Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
3613 // The result of this decay conversion points to an array element within the
3614 // base lvalue. However, since TBAA currently does not support representing
3615 // accesses to elements of member arrays, we conservatively represent accesses
3616 // to the pointee object as if it had no any base lvalue specified.
3617 // TODO: Support TBAA for member arrays.
3618 QualType EltType
= E
->getType()->castAsArrayTypeUnsafe()->getElementType();
3619 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
3620 if (TBAAInfo
) *TBAAInfo
= CGM
.getTBAAAccessInfo(EltType
);
3622 return Builder
.CreateElementBitCast(Addr
, ConvertTypeForMem(EltType
));
3625 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3626 /// array to pointer, return the array subexpression.
3627 static const Expr
*isSimpleArrayDecayOperand(const Expr
*E
) {
3628 // If this isn't just an array->pointer decay, bail out.
3629 const auto *CE
= dyn_cast
<CastExpr
>(E
);
3630 if (!CE
|| CE
->getCastKind() != CK_ArrayToPointerDecay
)
3633 // If this is a decay from variable width array, bail out.
3634 const Expr
*SubExpr
= CE
->getSubExpr();
3635 if (SubExpr
->getType()->isVariableArrayType())
3641 static llvm::Value
*emitArraySubscriptGEP(CodeGenFunction
&CGF
,
3642 llvm::Type
*elemType
,
3644 ArrayRef
<llvm::Value
*> indices
,
3648 const llvm::Twine
&name
= "arrayidx") {
3650 return CGF
.EmitCheckedInBoundsGEP(elemType
, ptr
, indices
, signedIndices
,
3651 CodeGenFunction::NotSubtraction
, loc
,
3654 return CGF
.Builder
.CreateGEP(elemType
, ptr
, indices
, name
);
3658 static CharUnits
getArrayElementAlign(CharUnits arrayAlign
,
3660 CharUnits eltSize
) {
3661 // If we have a constant index, we can use the exact offset of the
3662 // element we're accessing.
3663 if (auto constantIdx
= dyn_cast
<llvm::ConstantInt
>(idx
)) {
3664 CharUnits offset
= constantIdx
->getZExtValue() * eltSize
;
3665 return arrayAlign
.alignmentAtOffset(offset
);
3667 // Otherwise, use the worst-case alignment for any element.
3669 return arrayAlign
.alignmentOfArrayElement(eltSize
);
3673 static QualType
getFixedSizeElementType(const ASTContext
&ctx
,
3674 const VariableArrayType
*vla
) {
3677 eltType
= vla
->getElementType();
3678 } while ((vla
= ctx
.getAsVariableArrayType(eltType
)));
3682 /// Given an array base, check whether its member access belongs to a record
3683 /// with preserve_access_index attribute or not.
3684 static bool IsPreserveAIArrayBase(CodeGenFunction
&CGF
, const Expr
*ArrayBase
) {
3685 if (!ArrayBase
|| !CGF
.getDebugInfo())
3688 // Only support base as either a MemberExpr or DeclRefExpr.
3689 // DeclRefExpr to cover cases like:
3690 // struct s { int a; int b[10]; };
3693 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3694 // p->b[5] is a MemberExpr example.
3695 const Expr
*E
= ArrayBase
->IgnoreImpCasts();
3696 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
))
3697 return ME
->getMemberDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3699 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
3700 const auto *VarDef
= dyn_cast
<VarDecl
>(DRE
->getDecl());
3704 const auto *PtrT
= VarDef
->getType()->getAs
<PointerType
>();
3708 const auto *PointeeT
= PtrT
->getPointeeType()
3709 ->getUnqualifiedDesugaredType();
3710 if (const auto *RecT
= dyn_cast
<RecordType
>(PointeeT
))
3711 return RecT
->getDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3718 static Address
emitArraySubscriptGEP(CodeGenFunction
&CGF
, Address addr
,
3719 ArrayRef
<llvm::Value
*> indices
,
3720 QualType eltType
, bool inbounds
,
3721 bool signedIndices
, SourceLocation loc
,
3722 QualType
*arrayType
= nullptr,
3723 const Expr
*Base
= nullptr,
3724 const llvm::Twine
&name
= "arrayidx") {
3725 // All the indices except that last must be zero.
3727 for (auto *idx
: indices
.drop_back())
3728 assert(isa
<llvm::ConstantInt
>(idx
) &&
3729 cast
<llvm::ConstantInt
>(idx
)->isZero());
3732 // Determine the element size of the statically-sized base. This is
3733 // the thing that the indices are expressed in terms of.
3734 if (auto vla
= CGF
.getContext().getAsVariableArrayType(eltType
)) {
3735 eltType
= getFixedSizeElementType(CGF
.getContext(), vla
);
3738 // We can use that to compute the best alignment of the element.
3739 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(eltType
);
3740 CharUnits eltAlign
=
3741 getArrayElementAlign(addr
.getAlignment(), indices
.back(), eltSize
);
3743 llvm::Value
*eltPtr
;
3744 auto LastIndex
= dyn_cast
<llvm::ConstantInt
>(indices
.back());
3746 (!CGF
.IsInPreservedAIRegion
&& !IsPreserveAIArrayBase(CGF
, Base
))) {
3747 eltPtr
= emitArraySubscriptGEP(
3748 CGF
, addr
.getElementType(), addr
.getPointer(), indices
, inbounds
,
3749 signedIndices
, loc
, name
);
3751 // Remember the original array subscript for bpf target
3752 unsigned idx
= LastIndex
->getZExtValue();
3753 llvm::DIType
*DbgInfo
= nullptr;
3755 DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(*arrayType
, loc
);
3756 eltPtr
= CGF
.Builder
.CreatePreserveArrayAccessIndex(addr
.getElementType(),
3762 return Address(eltPtr
, CGF
.ConvertTypeForMem(eltType
), eltAlign
);
3765 LValue
CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
3767 // The index must always be an integer, which is not an aggregate. Emit it
3768 // in lexical order (this complexity is, sadly, required by C++17).
3769 llvm::Value
*IdxPre
=
3770 (E
->getLHS() == E
->getIdx()) ? EmitScalarExpr(E
->getIdx()) : nullptr;
3771 bool SignedIndices
= false;
3772 auto EmitIdxAfterBase
= [&, IdxPre
](bool Promote
) -> llvm::Value
* {
3774 if (E
->getLHS() != E
->getIdx()) {
3775 assert(E
->getRHS() == E
->getIdx() && "index was neither LHS nor RHS");
3776 Idx
= EmitScalarExpr(E
->getIdx());
3779 QualType IdxTy
= E
->getIdx()->getType();
3780 bool IdxSigned
= IdxTy
->isSignedIntegerOrEnumerationType();
3781 SignedIndices
|= IdxSigned
;
3783 if (SanOpts
.has(SanitizerKind::ArrayBounds
))
3784 EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, Accessed
);
3786 // Extend or truncate the index type to 32 or 64-bits.
3787 if (Promote
&& Idx
->getType() != IntPtrTy
)
3788 Idx
= Builder
.CreateIntCast(Idx
, IntPtrTy
, IdxSigned
, "idxprom");
3794 // If the base is a vector type, then we are forming a vector element lvalue
3795 // with this subscript.
3796 if (E
->getBase()->getType()->isVectorType() &&
3797 !isa
<ExtVectorElementExpr
>(E
->getBase())) {
3798 // Emit the vector as an lvalue to get its address.
3799 LValue LHS
= EmitLValue(E
->getBase());
3800 auto *Idx
= EmitIdxAfterBase(/*Promote*/false);
3801 assert(LHS
.isSimple() && "Can only subscript lvalue vectors here!");
3802 return LValue::MakeVectorElt(LHS
.getAddress(*this), Idx
,
3803 E
->getBase()->getType(), LHS
.getBaseInfo(),
3807 // All the other cases basically behave like simple offsetting.
3809 // Handle the extvector case we ignored above.
3810 if (isa
<ExtVectorElementExpr
>(E
->getBase())) {
3811 LValue LV
= EmitLValue(E
->getBase());
3812 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3813 Address Addr
= EmitExtVectorElementLValue(LV
);
3815 QualType EltType
= LV
.getType()->castAs
<VectorType
>()->getElementType();
3816 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, EltType
, /*inbounds*/ true,
3817 SignedIndices
, E
->getExprLoc());
3818 return MakeAddrLValue(Addr
, EltType
, LV
.getBaseInfo(),
3819 CGM
.getTBAAInfoForSubobject(LV
, EltType
));
3822 LValueBaseInfo EltBaseInfo
;
3823 TBAAAccessInfo EltTBAAInfo
;
3824 Address Addr
= Address::invalid();
3825 if (const VariableArrayType
*vla
=
3826 getContext().getAsVariableArrayType(E
->getType())) {
3827 // The base must be a pointer, which is not an aggregate. Emit
3828 // it. It needs to be emitted first in case it's what captures
3830 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3831 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3833 // The element count here is the total number of non-VLA elements.
3834 llvm::Value
*numElements
= getVLASize(vla
).NumElts
;
3836 // Effectively, the multiply by the VLA size is part of the GEP.
3837 // GEP indexes are signed, and scaling an index isn't permitted to
3838 // signed-overflow, so we use the same semantics for our explicit
3839 // multiply. We suppress this if overflow is not undefined behavior.
3840 if (getLangOpts().isSignedOverflowDefined()) {
3841 Idx
= Builder
.CreateMul(Idx
, numElements
);
3843 Idx
= Builder
.CreateNSWMul(Idx
, numElements
);
3846 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, vla
->getElementType(),
3847 !getLangOpts().isSignedOverflowDefined(),
3848 SignedIndices
, E
->getExprLoc());
3850 } else if (const ObjCObjectType
*OIT
= E
->getType()->getAs
<ObjCObjectType
>()){
3851 // Indexing over an interface, as in "NSString *P; P[4];"
3853 // Emit the base pointer.
3854 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3855 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3857 CharUnits InterfaceSize
= getContext().getTypeSizeInChars(OIT
);
3858 llvm::Value
*InterfaceSizeVal
=
3859 llvm::ConstantInt::get(Idx
->getType(), InterfaceSize
.getQuantity());
3861 llvm::Value
*ScaledIdx
= Builder
.CreateMul(Idx
, InterfaceSizeVal
);
3863 // We don't necessarily build correct LLVM struct types for ObjC
3864 // interfaces, so we can't rely on GEP to do this scaling
3865 // correctly, so we need to cast to i8*. FIXME: is this actually
3866 // true? A lot of other things in the fragile ABI would break...
3867 llvm::Type
*OrigBaseElemTy
= Addr
.getElementType();
3868 Addr
= Builder
.CreateElementBitCast(Addr
, Int8Ty
);
3871 CharUnits EltAlign
=
3872 getArrayElementAlign(Addr
.getAlignment(), Idx
, InterfaceSize
);
3873 llvm::Value
*EltPtr
=
3874 emitArraySubscriptGEP(*this, Addr
.getElementType(), Addr
.getPointer(),
3875 ScaledIdx
, false, SignedIndices
, E
->getExprLoc());
3876 Addr
= Address(EltPtr
, Addr
.getElementType(), EltAlign
);
3879 Addr
= Builder
.CreateElementBitCast(Addr
, OrigBaseElemTy
);
3880 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
3881 // If this is A[i] where A is an array, the frontend will have decayed the
3882 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3883 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3884 // "gep x, i" here. Emit one "gep A, 0, i".
3885 assert(Array
->getType()->isArrayType() &&
3886 "Array to pointer decay must have array source type!");
3888 // For simple multidimensional array indexing, set the 'accessed' flag for
3889 // better bounds-checking of the base expression.
3890 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
3891 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
3893 ArrayLV
= EmitLValue(Array
);
3894 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3896 // Propagate the alignment from the array itself to the result.
3897 QualType arrayType
= Array
->getType();
3898 Addr
= emitArraySubscriptGEP(
3899 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
3900 E
->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices
,
3901 E
->getExprLoc(), &arrayType
, E
->getBase());
3902 EltBaseInfo
= ArrayLV
.getBaseInfo();
3903 EltTBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, E
->getType());
3905 // The base must be a pointer; emit it with an estimate of its alignment.
3906 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3907 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3908 QualType ptrType
= E
->getBase()->getType();
3909 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, E
->getType(),
3910 !getLangOpts().isSignedOverflowDefined(),
3911 SignedIndices
, E
->getExprLoc(), &ptrType
,
3915 LValue LV
= MakeAddrLValue(Addr
, E
->getType(), EltBaseInfo
, EltTBAAInfo
);
3917 if (getLangOpts().ObjC
&&
3918 getLangOpts().getGC() != LangOptions::NonGC
) {
3919 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
3920 setObjCGCLValueClass(getContext(), E
, LV
);
3925 LValue
CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
) {
3927 !E
->isIncomplete() &&
3928 "incomplete matrix subscript expressions should be rejected during Sema");
3929 LValue Base
= EmitLValue(E
->getBase());
3930 llvm::Value
*RowIdx
= EmitScalarExpr(E
->getRowIdx());
3931 llvm::Value
*ColIdx
= EmitScalarExpr(E
->getColumnIdx());
3932 llvm::Value
*NumRows
= Builder
.getIntN(
3933 RowIdx
->getType()->getScalarSizeInBits(),
3934 E
->getBase()->getType()->castAs
<ConstantMatrixType
>()->getNumRows());
3935 llvm::Value
*FinalIdx
=
3936 Builder
.CreateAdd(Builder
.CreateMul(ColIdx
, NumRows
), RowIdx
);
3937 return LValue::MakeMatrixElt(
3938 MaybeConvertMatrixAddress(Base
.getAddress(*this), *this), FinalIdx
,
3939 E
->getBase()->getType(), Base
.getBaseInfo(), TBAAAccessInfo());
3942 static Address
emitOMPArraySectionBase(CodeGenFunction
&CGF
, const Expr
*Base
,
3943 LValueBaseInfo
&BaseInfo
,
3944 TBAAAccessInfo
&TBAAInfo
,
3945 QualType BaseTy
, QualType ElTy
,
3946 bool IsLowerBound
) {
3948 if (auto *ASE
= dyn_cast
<OMPArraySectionExpr
>(Base
->IgnoreParenImpCasts())) {
3949 BaseLVal
= CGF
.EmitOMPArraySectionExpr(ASE
, IsLowerBound
);
3950 if (BaseTy
->isArrayType()) {
3951 Address Addr
= BaseLVal
.getAddress(CGF
);
3952 BaseInfo
= BaseLVal
.getBaseInfo();
3954 // If the array type was an incomplete type, we need to make sure
3955 // the decay ends up being the right type.
3956 llvm::Type
*NewTy
= CGF
.ConvertType(BaseTy
);
3957 Addr
= CGF
.Builder
.CreateElementBitCast(Addr
, NewTy
);
3959 // Note that VLA pointers are always decayed, so we don't need to do
3961 if (!BaseTy
->isVariableArrayType()) {
3962 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3963 "Expected pointer to array");
3964 Addr
= CGF
.Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
3967 return CGF
.Builder
.CreateElementBitCast(Addr
,
3968 CGF
.ConvertTypeForMem(ElTy
));
3970 LValueBaseInfo TypeBaseInfo
;
3971 TBAAAccessInfo TypeTBAAInfo
;
3973 CGF
.CGM
.getNaturalTypeAlignment(ElTy
, &TypeBaseInfo
, &TypeTBAAInfo
);
3974 BaseInfo
.mergeForCast(TypeBaseInfo
);
3975 TBAAInfo
= CGF
.CGM
.mergeTBAAInfoForCast(TBAAInfo
, TypeTBAAInfo
);
3976 return Address(CGF
.Builder
.CreateLoad(BaseLVal
.getAddress(CGF
)),
3977 CGF
.ConvertTypeForMem(ElTy
), Align
);
3979 return CGF
.EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
3982 LValue
CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr
*E
,
3983 bool IsLowerBound
) {
3984 QualType BaseTy
= OMPArraySectionExpr::getBaseOriginalType(E
->getBase());
3985 QualType ResultExprTy
;
3986 if (auto *AT
= getContext().getAsArrayType(BaseTy
))
3987 ResultExprTy
= AT
->getElementType();
3989 ResultExprTy
= BaseTy
->getPointeeType();
3990 llvm::Value
*Idx
= nullptr;
3991 if (IsLowerBound
|| E
->getColonLocFirst().isInvalid()) {
3992 // Requesting lower bound or upper bound, but without provided length and
3993 // without ':' symbol for the default length -> length = 1.
3994 // Idx = LowerBound ?: 0;
3995 if (auto *LowerBound
= E
->getLowerBound()) {
3996 Idx
= Builder
.CreateIntCast(
3997 EmitScalarExpr(LowerBound
), IntPtrTy
,
3998 LowerBound
->getType()->hasSignedIntegerRepresentation());
4000 Idx
= llvm::ConstantInt::getNullValue(IntPtrTy
);
4002 // Try to emit length or lower bound as constant. If this is possible, 1
4003 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4004 // IR (LB + Len) - 1.
4005 auto &C
= CGM
.getContext();
4006 auto *Length
= E
->getLength();
4007 llvm::APSInt ConstLength
;
4009 // Idx = LowerBound + Length - 1;
4010 if (Optional
<llvm::APSInt
> CL
= Length
->getIntegerConstantExpr(C
)) {
4011 ConstLength
= CL
->zextOrTrunc(PointerWidthInBits
);
4014 auto *LowerBound
= E
->getLowerBound();
4015 llvm::APSInt
ConstLowerBound(PointerWidthInBits
, /*isUnsigned=*/false);
4017 if (Optional
<llvm::APSInt
> LB
= LowerBound
->getIntegerConstantExpr(C
)) {
4018 ConstLowerBound
= LB
->zextOrTrunc(PointerWidthInBits
);
4019 LowerBound
= nullptr;
4024 else if (!LowerBound
)
4027 if (Length
|| LowerBound
) {
4028 auto *LowerBoundVal
=
4030 ? Builder
.CreateIntCast(
4031 EmitScalarExpr(LowerBound
), IntPtrTy
,
4032 LowerBound
->getType()->hasSignedIntegerRepresentation())
4033 : llvm::ConstantInt::get(IntPtrTy
, ConstLowerBound
);
4036 ? Builder
.CreateIntCast(
4037 EmitScalarExpr(Length
), IntPtrTy
,
4038 Length
->getType()->hasSignedIntegerRepresentation())
4039 : llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4040 Idx
= Builder
.CreateAdd(LowerBoundVal
, LengthVal
, "lb_add_len",
4042 !getLangOpts().isSignedOverflowDefined());
4043 if (Length
&& LowerBound
) {
4044 Idx
= Builder
.CreateSub(
4045 Idx
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "idx_sub_1",
4046 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4049 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
+ ConstLowerBound
);
4051 // Idx = ArraySize - 1;
4052 QualType ArrayTy
= BaseTy
->isPointerType()
4053 ? E
->getBase()->IgnoreParenImpCasts()->getType()
4055 if (auto *VAT
= C
.getAsVariableArrayType(ArrayTy
)) {
4056 Length
= VAT
->getSizeExpr();
4057 if (Optional
<llvm::APSInt
> L
= Length
->getIntegerConstantExpr(C
)) {
4062 auto *CAT
= C
.getAsConstantArrayType(ArrayTy
);
4063 ConstLength
= CAT
->getSize();
4066 auto *LengthVal
= Builder
.CreateIntCast(
4067 EmitScalarExpr(Length
), IntPtrTy
,
4068 Length
->getType()->hasSignedIntegerRepresentation());
4069 Idx
= Builder
.CreateSub(
4070 LengthVal
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "len_sub_1",
4071 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4073 ConstLength
= ConstLength
.zextOrTrunc(PointerWidthInBits
);
4075 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4081 Address EltPtr
= Address::invalid();
4082 LValueBaseInfo BaseInfo
;
4083 TBAAAccessInfo TBAAInfo
;
4084 if (auto *VLA
= getContext().getAsVariableArrayType(ResultExprTy
)) {
4085 // The base must be a pointer, which is not an aggregate. Emit
4086 // it. It needs to be emitted first in case it's what captures
4089 emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
, TBAAInfo
,
4090 BaseTy
, VLA
->getElementType(), IsLowerBound
);
4091 // The element count here is the total number of non-VLA elements.
4092 llvm::Value
*NumElements
= getVLASize(VLA
).NumElts
;
4094 // Effectively, the multiply by the VLA size is part of the GEP.
4095 // GEP indexes are signed, and scaling an index isn't permitted to
4096 // signed-overflow, so we use the same semantics for our explicit
4097 // multiply. We suppress this if overflow is not undefined behavior.
4098 if (getLangOpts().isSignedOverflowDefined())
4099 Idx
= Builder
.CreateMul(Idx
, NumElements
);
4101 Idx
= Builder
.CreateNSWMul(Idx
, NumElements
);
4102 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, VLA
->getElementType(),
4103 !getLangOpts().isSignedOverflowDefined(),
4104 /*signedIndices=*/false, E
->getExprLoc());
4105 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
4106 // If this is A[i] where A is an array, the frontend will have decayed the
4107 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4108 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4109 // "gep x, i" here. Emit one "gep A, 0, i".
4110 assert(Array
->getType()->isArrayType() &&
4111 "Array to pointer decay must have array source type!");
4113 // For simple multidimensional array indexing, set the 'accessed' flag for
4114 // better bounds-checking of the base expression.
4115 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
4116 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
4118 ArrayLV
= EmitLValue(Array
);
4120 // Propagate the alignment from the array itself to the result.
4121 EltPtr
= emitArraySubscriptGEP(
4122 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
4123 ResultExprTy
, !getLangOpts().isSignedOverflowDefined(),
4124 /*signedIndices=*/false, E
->getExprLoc());
4125 BaseInfo
= ArrayLV
.getBaseInfo();
4126 TBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, ResultExprTy
);
4128 Address Base
= emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
,
4129 TBAAInfo
, BaseTy
, ResultExprTy
,
4131 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, ResultExprTy
,
4132 !getLangOpts().isSignedOverflowDefined(),
4133 /*signedIndices=*/false, E
->getExprLoc());
4136 return MakeAddrLValue(EltPtr
, ResultExprTy
, BaseInfo
, TBAAInfo
);
4139 LValue
CodeGenFunction::
4140 EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
) {
4141 // Emit the base vector as an l-value.
4144 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4146 // If it is a pointer to a vector, emit the address and form an lvalue with
4148 LValueBaseInfo BaseInfo
;
4149 TBAAAccessInfo TBAAInfo
;
4150 Address Ptr
= EmitPointerWithAlignment(E
->getBase(), &BaseInfo
, &TBAAInfo
);
4151 const auto *PT
= E
->getBase()->getType()->castAs
<PointerType
>();
4152 Base
= MakeAddrLValue(Ptr
, PT
->getPointeeType(), BaseInfo
, TBAAInfo
);
4153 Base
.getQuals().removeObjCGCAttr();
4154 } else if (E
->getBase()->isGLValue()) {
4155 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4156 // emit the base as an lvalue.
4157 assert(E
->getBase()->getType()->isVectorType());
4158 Base
= EmitLValue(E
->getBase());
4160 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4161 assert(E
->getBase()->getType()->isVectorType() &&
4162 "Result must be a vector");
4163 llvm::Value
*Vec
= EmitScalarExpr(E
->getBase());
4165 // Store the vector to memory (because LValue wants an address).
4166 Address VecMem
= CreateMemTemp(E
->getBase()->getType());
4167 Builder
.CreateStore(Vec
, VecMem
);
4168 Base
= MakeAddrLValue(VecMem
, E
->getBase()->getType(),
4169 AlignmentSource::Decl
);
4173 E
->getType().withCVRQualifiers(Base
.getQuals().getCVRQualifiers());
4175 // Encode the element access list into a vector of unsigned indices.
4176 SmallVector
<uint32_t, 4> Indices
;
4177 E
->getEncodedElementAccess(Indices
);
4179 if (Base
.isSimple()) {
4180 llvm::Constant
*CV
=
4181 llvm::ConstantDataVector::get(getLLVMContext(), Indices
);
4182 return LValue::MakeExtVectorElt(Base
.getAddress(*this), CV
, type
,
4183 Base
.getBaseInfo(), TBAAAccessInfo());
4185 assert(Base
.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4187 llvm::Constant
*BaseElts
= Base
.getExtVectorElts();
4188 SmallVector
<llvm::Constant
*, 4> CElts
;
4190 for (unsigned i
= 0, e
= Indices
.size(); i
!= e
; ++i
)
4191 CElts
.push_back(BaseElts
->getAggregateElement(Indices
[i
]));
4192 llvm::Constant
*CV
= llvm::ConstantVector::get(CElts
);
4193 return LValue::MakeExtVectorElt(Base
.getExtVectorAddress(), CV
, type
,
4194 Base
.getBaseInfo(), TBAAAccessInfo());
4197 LValue
CodeGenFunction::EmitMemberExpr(const MemberExpr
*E
) {
4198 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, E
)) {
4199 EmitIgnoredExpr(E
->getBase());
4200 return EmitDeclRefLValue(DRE
);
4203 Expr
*BaseExpr
= E
->getBase();
4204 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4207 LValueBaseInfo BaseInfo
;
4208 TBAAAccessInfo TBAAInfo
;
4209 Address Addr
= EmitPointerWithAlignment(BaseExpr
, &BaseInfo
, &TBAAInfo
);
4210 QualType PtrTy
= BaseExpr
->getType()->getPointeeType();
4211 SanitizerSet SkippedChecks
;
4212 bool IsBaseCXXThis
= IsWrappedCXXThis(BaseExpr
);
4214 SkippedChecks
.set(SanitizerKind::Alignment
, true);
4215 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(BaseExpr
))
4216 SkippedChecks
.set(SanitizerKind::Null
, true);
4217 EmitTypeCheck(TCK_MemberAccess
, E
->getExprLoc(), Addr
.getPointer(), PtrTy
,
4218 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
4219 BaseLV
= MakeAddrLValue(Addr
, PtrTy
, BaseInfo
, TBAAInfo
);
4221 BaseLV
= EmitCheckedLValue(BaseExpr
, TCK_MemberAccess
);
4223 NamedDecl
*ND
= E
->getMemberDecl();
4224 if (auto *Field
= dyn_cast
<FieldDecl
>(ND
)) {
4225 LValue LV
= EmitLValueForField(BaseLV
, Field
);
4226 setObjCGCLValueClass(getContext(), E
, LV
);
4227 if (getLangOpts().OpenMP
) {
4228 // If the member was explicitly marked as nontemporal, mark it as
4229 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4230 // to children as nontemporal too.
4231 if ((IsWrappedCXXThis(BaseExpr
) &&
4232 CGM
.getOpenMPRuntime().isNontemporalDecl(Field
)) ||
4233 BaseLV
.isNontemporal())
4234 LV
.setNontemporal(/*Value=*/true);
4239 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
))
4240 return EmitFunctionDeclLValue(*this, E
, FD
);
4242 llvm_unreachable("Unhandled member declaration!");
4245 /// Given that we are currently emitting a lambda, emit an l-value for
4246 /// one of its members.
4247 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
) {
4249 assert(cast
<CXXMethodDecl
>(CurCodeDecl
)->getParent()->isLambda());
4250 assert(cast
<CXXMethodDecl
>(CurCodeDecl
)->getParent() == Field
->getParent());
4252 QualType LambdaTagType
=
4253 getContext().getTagDeclType(Field
->getParent());
4254 LValue LambdaLV
= MakeNaturalAlignAddrLValue(CXXABIThisValue
, LambdaTagType
);
4255 return EmitLValueForField(LambdaLV
, Field
);
4258 /// Get the field index in the debug info. The debug info structure/union
4259 /// will ignore the unnamed bitfields.
4260 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl
*Rec
,
4261 unsigned FieldIndex
) {
4262 unsigned I
= 0, Skipped
= 0;
4264 for (auto *F
: Rec
->getDefinition()->fields()) {
4265 if (I
== FieldIndex
)
4267 if (F
->isUnnamedBitfield())
4272 return FieldIndex
- Skipped
;
4275 /// Get the address of a zero-sized field within a record. The resulting
4276 /// address doesn't necessarily have the right type.
4277 static Address
emitAddrOfZeroSizeField(CodeGenFunction
&CGF
, Address Base
,
4278 const FieldDecl
*Field
) {
4279 CharUnits Offset
= CGF
.getContext().toCharUnitsFromBits(
4280 CGF
.getContext().getFieldOffset(Field
));
4281 if (Offset
.isZero())
4283 Base
= CGF
.Builder
.CreateElementBitCast(Base
, CGF
.Int8Ty
);
4284 return CGF
.Builder
.CreateConstInBoundsByteGEP(Base
, Offset
);
4287 /// Drill down to the storage of a field without walking into
4288 /// reference types.
4290 /// The resulting address doesn't necessarily have the right type.
4291 static Address
emitAddrOfFieldStorage(CodeGenFunction
&CGF
, Address base
,
4292 const FieldDecl
*field
) {
4293 if (field
->isZeroSize(CGF
.getContext()))
4294 return emitAddrOfZeroSizeField(CGF
, base
, field
);
4296 const RecordDecl
*rec
= field
->getParent();
4299 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4301 return CGF
.Builder
.CreateStructGEP(base
, idx
, field
->getName());
4304 static Address
emitPreserveStructAccess(CodeGenFunction
&CGF
, LValue base
,
4305 Address addr
, const FieldDecl
*field
) {
4306 const RecordDecl
*rec
= field
->getParent();
4307 llvm::DIType
*DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(
4308 base
.getType(), rec
->getLocation());
4311 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4313 return CGF
.Builder
.CreatePreserveStructAccessIndex(
4314 addr
, idx
, CGF
.getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
);
4317 static bool hasAnyVptr(const QualType Type
, const ASTContext
&Context
) {
4318 const auto *RD
= Type
.getTypePtr()->getAsCXXRecordDecl();
4322 if (RD
->isDynamicClass())
4325 for (const auto &Base
: RD
->bases())
4326 if (hasAnyVptr(Base
.getType(), Context
))
4329 for (const FieldDecl
*Field
: RD
->fields())
4330 if (hasAnyVptr(Field
->getType(), Context
))
4336 LValue
CodeGenFunction::EmitLValueForField(LValue base
,
4337 const FieldDecl
*field
) {
4338 LValueBaseInfo BaseInfo
= base
.getBaseInfo();
4340 if (field
->isBitField()) {
4341 const CGRecordLayout
&RL
=
4342 CGM
.getTypes().getCGRecordLayout(field
->getParent());
4343 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(field
);
4344 const bool UseVolatile
= isAAPCS(CGM
.getTarget()) &&
4345 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&&
4346 Info
.VolatileStorageSize
!= 0 &&
4348 .withCVRQualifiers(base
.getVRQualifiers())
4349 .isVolatileQualified();
4350 Address Addr
= base
.getAddress(*this);
4351 unsigned Idx
= RL
.getLLVMFieldNo(field
);
4352 const RecordDecl
*rec
= field
->getParent();
4354 if (!IsInPreservedAIRegion
&&
4355 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4357 // For structs, we GEP to the field that the record layout suggests.
4358 Addr
= Builder
.CreateStructGEP(Addr
, Idx
, field
->getName());
4360 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateRecordType(
4361 getContext().getRecordType(rec
), rec
->getLocation());
4362 Addr
= Builder
.CreatePreserveStructAccessIndex(
4363 Addr
, Idx
, getDebugInfoFIndex(rec
, field
->getFieldIndex()),
4368 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
4369 // Get the access type.
4370 llvm::Type
*FieldIntTy
= llvm::Type::getIntNTy(getLLVMContext(), SS
);
4371 if (Addr
.getElementType() != FieldIntTy
)
4372 Addr
= Builder
.CreateElementBitCast(Addr
, FieldIntTy
);
4374 const unsigned VolatileOffset
= Info
.VolatileStorageOffset
.getQuantity();
4376 Addr
= Builder
.CreateConstInBoundsGEP(Addr
, VolatileOffset
);
4379 QualType fieldType
=
4380 field
->getType().withCVRQualifiers(base
.getVRQualifiers());
4381 // TODO: Support TBAA for bit fields.
4382 LValueBaseInfo
FieldBaseInfo(BaseInfo
.getAlignmentSource());
4383 return LValue::MakeBitfield(Addr
, Info
, fieldType
, FieldBaseInfo
,
4387 // Fields of may-alias structures are may-alias themselves.
4388 // FIXME: this should get propagated down through anonymous structs
4390 QualType FieldType
= field
->getType();
4391 const RecordDecl
*rec
= field
->getParent();
4392 AlignmentSource BaseAlignSource
= BaseInfo
.getAlignmentSource();
4393 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource
));
4394 TBAAAccessInfo FieldTBAAInfo
;
4395 if (base
.getTBAAInfo().isMayAlias() ||
4396 rec
->hasAttr
<MayAliasAttr
>() || FieldType
->isVectorType()) {
4397 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4398 } else if (rec
->isUnion()) {
4399 // TODO: Support TBAA for unions.
4400 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4402 // If no base type been assigned for the base access, then try to generate
4403 // one for this base lvalue.
4404 FieldTBAAInfo
= base
.getTBAAInfo();
4405 if (!FieldTBAAInfo
.BaseType
) {
4406 FieldTBAAInfo
.BaseType
= CGM
.getTBAABaseTypeInfo(base
.getType());
4407 assert(!FieldTBAAInfo
.Offset
&&
4408 "Nonzero offset for an access with no base type!");
4411 // Adjust offset to be relative to the base type.
4412 const ASTRecordLayout
&Layout
=
4413 getContext().getASTRecordLayout(field
->getParent());
4414 unsigned CharWidth
= getContext().getCharWidth();
4415 if (FieldTBAAInfo
.BaseType
)
4416 FieldTBAAInfo
.Offset
+=
4417 Layout
.getFieldOffset(field
->getFieldIndex()) / CharWidth
;
4419 // Update the final access type and size.
4420 FieldTBAAInfo
.AccessType
= CGM
.getTBAATypeInfo(FieldType
);
4421 FieldTBAAInfo
.Size
=
4422 getContext().getTypeSizeInChars(FieldType
).getQuantity();
4425 Address addr
= base
.getAddress(*this);
4426 if (auto *ClassDef
= dyn_cast
<CXXRecordDecl
>(rec
)) {
4427 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4428 ClassDef
->isDynamicClass()) {
4429 // Getting to any field of dynamic object requires stripping dynamic
4430 // information provided by invariant.group. This is because accessing
4431 // fields may leak the real address of dynamic object, which could result
4432 // in miscompilation when leaked pointer would be compared.
4433 auto *stripped
= Builder
.CreateStripInvariantGroup(addr
.getPointer());
4434 addr
= Address(stripped
, addr
.getElementType(), addr
.getAlignment());
4438 unsigned RecordCVR
= base
.getVRQualifiers();
4439 if (rec
->isUnion()) {
4440 // For unions, there is no pointer adjustment.
4441 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4442 hasAnyVptr(FieldType
, getContext()))
4443 // Because unions can easily skip invariant.barriers, we need to add
4444 // a barrier every time CXXRecord field with vptr is referenced.
4445 addr
= Builder
.CreateLaunderInvariantGroup(addr
);
4447 if (IsInPreservedAIRegion
||
4448 (getDebugInfo() && rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4449 // Remember the original union field index
4450 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(base
.getType(),
4451 rec
->getLocation());
4453 Builder
.CreatePreserveUnionAccessIndex(
4454 addr
.getPointer(), getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
),
4455 addr
.getElementType(), addr
.getAlignment());
4458 if (FieldType
->isReferenceType())
4459 addr
= Builder
.CreateElementBitCast(
4460 addr
, CGM
.getTypes().ConvertTypeForMem(FieldType
), field
->getName());
4462 if (!IsInPreservedAIRegion
&&
4463 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>()))
4464 // For structs, we GEP to the field that the record layout suggests.
4465 addr
= emitAddrOfFieldStorage(*this, addr
, field
);
4467 // Remember the original struct field index
4468 addr
= emitPreserveStructAccess(*this, base
, addr
, field
);
4471 // If this is a reference field, load the reference right now.
4472 if (FieldType
->isReferenceType()) {
4474 MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4475 if (RecordCVR
& Qualifiers::Volatile
)
4476 RefLVal
.getQuals().addVolatile();
4477 addr
= EmitLoadOfReference(RefLVal
, &FieldBaseInfo
, &FieldTBAAInfo
);
4479 // Qualifiers on the struct don't apply to the referencee.
4481 FieldType
= FieldType
->getPointeeType();
4484 // Make sure that the address is pointing to the right type. This is critical
4485 // for both unions and structs. A union needs a bitcast, a struct element
4486 // will need a bitcast if the LLVM type laid out doesn't match the desired
4488 addr
= Builder
.CreateElementBitCast(
4489 addr
, CGM
.getTypes().ConvertTypeForMem(FieldType
), field
->getName());
4491 if (field
->hasAttr
<AnnotateAttr
>())
4492 addr
= EmitFieldAnnotations(field
, addr
);
4494 LValue LV
= MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4495 LV
.getQuals().addCVRQualifiers(RecordCVR
);
4497 // __weak attribute on a field is ignored.
4498 if (LV
.getQuals().getObjCGCAttr() == Qualifiers::Weak
)
4499 LV
.getQuals().removeObjCGCAttr();
4505 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base
,
4506 const FieldDecl
*Field
) {
4507 QualType FieldType
= Field
->getType();
4509 if (!FieldType
->isReferenceType())
4510 return EmitLValueForField(Base
, Field
);
4512 Address V
= emitAddrOfFieldStorage(*this, Base
.getAddress(*this), Field
);
4514 // Make sure that the address is pointing to the right type.
4515 llvm::Type
*llvmType
= ConvertTypeForMem(FieldType
);
4516 V
= Builder
.CreateElementBitCast(V
, llvmType
, Field
->getName());
4518 // TODO: Generate TBAA information that describes this access as a structure
4519 // member access and not just an access to an object of the field's type. This
4520 // should be similar to what we do in EmitLValueForField().
4521 LValueBaseInfo BaseInfo
= Base
.getBaseInfo();
4522 AlignmentSource FieldAlignSource
= BaseInfo
.getAlignmentSource();
4523 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource
));
4524 return MakeAddrLValue(V
, FieldType
, FieldBaseInfo
,
4525 CGM
.getTBAAInfoForSubobject(Base
, FieldType
));
4528 LValue
CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
){
4529 if (E
->isFileScope()) {
4530 ConstantAddress GlobalPtr
= CGM
.GetAddrOfConstantCompoundLiteral(E
);
4531 return MakeAddrLValue(GlobalPtr
, E
->getType(), AlignmentSource::Decl
);
4533 if (E
->getType()->isVariablyModifiedType())
4534 // make sure to emit the VLA size.
4535 EmitVariablyModifiedType(E
->getType());
4537 Address DeclPtr
= CreateMemTemp(E
->getType(), ".compoundliteral");
4538 const Expr
*InitExpr
= E
->getInitializer();
4539 LValue Result
= MakeAddrLValue(DeclPtr
, E
->getType(), AlignmentSource::Decl
);
4541 EmitAnyExprToMem(InitExpr
, DeclPtr
, E
->getType().getQualifiers(),
4544 // Block-scope compound literals are destroyed at the end of the enclosing
4546 if (!getLangOpts().CPlusPlus
)
4547 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
4548 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind
), DeclPtr
,
4549 E
->getType(), getDestroyer(DtorKind
),
4550 DtorKind
& EHCleanup
);
4555 LValue
CodeGenFunction::EmitInitListLValue(const InitListExpr
*E
) {
4556 if (!E
->isGLValue())
4557 // Initializing an aggregate temporary in C++11: T{...}.
4558 return EmitAggExprToLValue(E
);
4560 // An lvalue initializer list must be initializing a reference.
4561 assert(E
->isTransparent() && "non-transparent glvalue init list");
4562 return EmitLValue(E
->getInit(0));
4565 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4566 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4567 /// LValue is returned and the current block has been terminated.
4568 static Optional
<LValue
> EmitLValueOrThrowExpression(CodeGenFunction
&CGF
,
4569 const Expr
*Operand
) {
4570 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Operand
->IgnoreParens())) {
4571 CGF
.EmitCXXThrowExpr(ThrowExpr
, /*KeepInsertionPoint*/false);
4575 return CGF
.EmitLValue(Operand
);
4579 // Handle the case where the condition is a constant evaluatable simple integer,
4580 // which means we don't have to separately handle the true/false blocks.
4581 llvm::Optional
<LValue
> HandleConditionalOperatorLValueSimpleCase(
4582 CodeGenFunction
&CGF
, const AbstractConditionalOperator
*E
) {
4583 const Expr
*condExpr
= E
->getCond();
4585 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
4586 const Expr
*Live
= E
->getTrueExpr(), *Dead
= E
->getFalseExpr();
4588 std::swap(Live
, Dead
);
4590 if (!CGF
.ContainsLabel(Dead
)) {
4591 // If the true case is live, we need to track its region.
4593 CGF
.incrementProfileCounter(E
);
4594 // If a throw expression we emit it and return an undefined lvalue
4595 // because it can't be used.
4596 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Live
->IgnoreParens())) {
4597 CGF
.EmitCXXThrowExpr(ThrowExpr
);
4598 llvm::Type
*ElemTy
= CGF
.ConvertType(Dead
->getType());
4599 llvm::Type
*Ty
= llvm::PointerType::getUnqual(ElemTy
);
4600 return CGF
.MakeAddrLValue(
4601 Address(llvm::UndefValue::get(Ty
), ElemTy
, CharUnits::One()),
4604 return CGF
.EmitLValue(Live
);
4609 struct ConditionalInfo
{
4610 llvm::BasicBlock
*lhsBlock
, *rhsBlock
;
4611 Optional
<LValue
> LHS
, RHS
;
4614 // Create and generate the 3 blocks for a conditional operator.
4615 // Leaves the 'current block' in the continuation basic block.
4616 template<typename FuncTy
>
4617 ConditionalInfo
EmitConditionalBlocks(CodeGenFunction
&CGF
,
4618 const AbstractConditionalOperator
*E
,
4619 const FuncTy
&BranchGenFunc
) {
4620 ConditionalInfo Info
{CGF
.createBasicBlock("cond.true"),
4621 CGF
.createBasicBlock("cond.false"), llvm::None
,
4623 llvm::BasicBlock
*endBlock
= CGF
.createBasicBlock("cond.end");
4625 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
4626 CGF
.EmitBranchOnBoolExpr(E
->getCond(), Info
.lhsBlock
, Info
.rhsBlock
,
4627 CGF
.getProfileCount(E
));
4629 // Any temporaries created here are conditional.
4630 CGF
.EmitBlock(Info
.lhsBlock
);
4631 CGF
.incrementProfileCounter(E
);
4633 Info
.LHS
= BranchGenFunc(CGF
, E
->getTrueExpr());
4635 Info
.lhsBlock
= CGF
.Builder
.GetInsertBlock();
4638 CGF
.Builder
.CreateBr(endBlock
);
4640 // Any temporaries created here are conditional.
4641 CGF
.EmitBlock(Info
.rhsBlock
);
4643 Info
.RHS
= BranchGenFunc(CGF
, E
->getFalseExpr());
4645 Info
.rhsBlock
= CGF
.Builder
.GetInsertBlock();
4646 CGF
.EmitBlock(endBlock
);
4652 void CodeGenFunction::EmitIgnoredConditionalOperator(
4653 const AbstractConditionalOperator
*E
) {
4654 if (!E
->isGLValue()) {
4655 // ?: here should be an aggregate.
4656 assert(hasAggregateEvaluationKind(E
->getType()) &&
4657 "Unexpected conditional operator!");
4658 return (void)EmitAggExprToLValue(E
);
4661 OpaqueValueMapping
binding(*this, E
);
4662 if (HandleConditionalOperatorLValueSimpleCase(*this, E
))
4665 EmitConditionalBlocks(*this, E
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4666 CGF
.EmitIgnoredExpr(E
);
4670 LValue
CodeGenFunction::EmitConditionalOperatorLValue(
4671 const AbstractConditionalOperator
*expr
) {
4672 if (!expr
->isGLValue()) {
4673 // ?: here should be an aggregate.
4674 assert(hasAggregateEvaluationKind(expr
->getType()) &&
4675 "Unexpected conditional operator!");
4676 return EmitAggExprToLValue(expr
);
4679 OpaqueValueMapping
binding(*this, expr
);
4680 if (llvm::Optional
<LValue
> Res
=
4681 HandleConditionalOperatorLValueSimpleCase(*this, expr
))
4684 ConditionalInfo Info
= EmitConditionalBlocks(
4685 *this, expr
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4686 return EmitLValueOrThrowExpression(CGF
, E
);
4689 if ((Info
.LHS
&& !Info
.LHS
->isSimple()) ||
4690 (Info
.RHS
&& !Info
.RHS
->isSimple()))
4691 return EmitUnsupportedLValue(expr
, "conditional operator");
4693 if (Info
.LHS
&& Info
.RHS
) {
4694 Address lhsAddr
= Info
.LHS
->getAddress(*this);
4695 Address rhsAddr
= Info
.RHS
->getAddress(*this);
4696 llvm::PHINode
*phi
= Builder
.CreatePHI(lhsAddr
.getType(), 2, "cond-lvalue");
4697 phi
->addIncoming(lhsAddr
.getPointer(), Info
.lhsBlock
);
4698 phi
->addIncoming(rhsAddr
.getPointer(), Info
.rhsBlock
);
4699 Address
result(phi
, lhsAddr
.getElementType(),
4700 std::min(lhsAddr
.getAlignment(), rhsAddr
.getAlignment()));
4701 AlignmentSource alignSource
=
4702 std::max(Info
.LHS
->getBaseInfo().getAlignmentSource(),
4703 Info
.RHS
->getBaseInfo().getAlignmentSource());
4704 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForConditionalOperator(
4705 Info
.LHS
->getTBAAInfo(), Info
.RHS
->getTBAAInfo());
4706 return MakeAddrLValue(result
, expr
->getType(), LValueBaseInfo(alignSource
),
4709 assert((Info
.LHS
|| Info
.RHS
) &&
4710 "both operands of glvalue conditional are throw-expressions?");
4711 return Info
.LHS
? *Info
.LHS
: *Info
.RHS
;
4715 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4716 /// type. If the cast is to a reference, we can have the usual lvalue result,
4717 /// otherwise if a cast is needed by the code generator in an lvalue context,
4718 /// then it must mean that we need the address of an aggregate in order to
4719 /// access one of its members. This can happen for all the reasons that casts
4720 /// are permitted with aggregate result, including noop aggregate casts, and
4721 /// cast from scalar to union.
4722 LValue
CodeGenFunction::EmitCastLValue(const CastExpr
*E
) {
4723 switch (E
->getCastKind()) {
4726 case CK_LValueToRValueBitCast
:
4727 case CK_ArrayToPointerDecay
:
4728 case CK_FunctionToPointerDecay
:
4729 case CK_NullToMemberPointer
:
4730 case CK_NullToPointer
:
4731 case CK_IntegralToPointer
:
4732 case CK_PointerToIntegral
:
4733 case CK_PointerToBoolean
:
4734 case CK_VectorSplat
:
4735 case CK_IntegralCast
:
4736 case CK_BooleanToSignedIntegral
:
4737 case CK_IntegralToBoolean
:
4738 case CK_IntegralToFloating
:
4739 case CK_FloatingToIntegral
:
4740 case CK_FloatingToBoolean
:
4741 case CK_FloatingCast
:
4742 case CK_FloatingRealToComplex
:
4743 case CK_FloatingComplexToReal
:
4744 case CK_FloatingComplexToBoolean
:
4745 case CK_FloatingComplexCast
:
4746 case CK_FloatingComplexToIntegralComplex
:
4747 case CK_IntegralRealToComplex
:
4748 case CK_IntegralComplexToReal
:
4749 case CK_IntegralComplexToBoolean
:
4750 case CK_IntegralComplexCast
:
4751 case CK_IntegralComplexToFloatingComplex
:
4752 case CK_DerivedToBaseMemberPointer
:
4753 case CK_BaseToDerivedMemberPointer
:
4754 case CK_MemberPointerToBoolean
:
4755 case CK_ReinterpretMemberPointer
:
4756 case CK_AnyPointerToBlockPointerCast
:
4757 case CK_ARCProduceObject
:
4758 case CK_ARCConsumeObject
:
4759 case CK_ARCReclaimReturnedObject
:
4760 case CK_ARCExtendBlockObject
:
4761 case CK_CopyAndAutoreleaseBlockObject
:
4762 case CK_IntToOCLSampler
:
4763 case CK_FloatingToFixedPoint
:
4764 case CK_FixedPointToFloating
:
4765 case CK_FixedPointCast
:
4766 case CK_FixedPointToBoolean
:
4767 case CK_FixedPointToIntegral
:
4768 case CK_IntegralToFixedPoint
:
4770 return EmitUnsupportedLValue(E
, "unexpected cast lvalue");
4773 llvm_unreachable("dependent cast kind in IR gen!");
4775 case CK_BuiltinFnToFnPtr
:
4776 llvm_unreachable("builtin functions are handled elsewhere");
4778 // These are never l-values; just use the aggregate emission code.
4779 case CK_NonAtomicToAtomic
:
4780 case CK_AtomicToNonAtomic
:
4781 return EmitAggExprToLValue(E
);
4784 LValue LV
= EmitLValue(E
->getSubExpr());
4785 Address V
= LV
.getAddress(*this);
4786 const auto *DCE
= cast
<CXXDynamicCastExpr
>(E
);
4787 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V
, DCE
), E
->getType());
4790 case CK_ConstructorConversion
:
4791 case CK_UserDefinedConversion
:
4792 case CK_CPointerToObjCPointerCast
:
4793 case CK_BlockPointerToObjCPointerCast
:
4794 case CK_LValueToRValue
:
4795 return EmitLValue(E
->getSubExpr());
4798 // CK_NoOp can model a qualification conversion, which can remove an array
4799 // bound and change the IR type.
4800 // FIXME: Once pointee types are removed from IR, remove this.
4801 LValue LV
= EmitLValue(E
->getSubExpr());
4802 if (LV
.isSimple()) {
4803 Address V
= LV
.getAddress(*this);
4805 llvm::Type
*T
= ConvertTypeForMem(E
->getType());
4806 if (V
.getElementType() != T
)
4807 LV
.setAddress(Builder
.CreateElementBitCast(V
, T
));
4813 case CK_UncheckedDerivedToBase
:
4814 case CK_DerivedToBase
: {
4815 const auto *DerivedClassTy
=
4816 E
->getSubExpr()->getType()->castAs
<RecordType
>();
4817 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4819 LValue LV
= EmitLValue(E
->getSubExpr());
4820 Address This
= LV
.getAddress(*this);
4822 // Perform the derived-to-base conversion
4823 Address Base
= GetAddressOfBaseClass(
4824 This
, DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4825 /*NullCheckValue=*/false, E
->getExprLoc());
4827 // TODO: Support accesses to members of base classes in TBAA. For now, we
4828 // conservatively pretend that the complete object is of the base class
4830 return MakeAddrLValue(Base
, E
->getType(), LV
.getBaseInfo(),
4831 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4834 return EmitAggExprToLValue(E
);
4835 case CK_BaseToDerived
: {
4836 const auto *DerivedClassTy
= E
->getType()->castAs
<RecordType
>();
4837 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4839 LValue LV
= EmitLValue(E
->getSubExpr());
4841 // Perform the base-to-derived conversion
4842 Address Derived
= GetAddressOfDerivedClass(
4843 LV
.getAddress(*this), DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4844 /*NullCheckValue=*/false);
4846 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
4847 // performed and the object is not of the derived type.
4848 if (sanitizePerformTypeCheck())
4849 EmitTypeCheck(TCK_DowncastReference
, E
->getExprLoc(),
4850 Derived
.getPointer(), E
->getType());
4852 if (SanOpts
.has(SanitizerKind::CFIDerivedCast
))
4853 EmitVTablePtrCheckForCast(E
->getType(), Derived
,
4854 /*MayBeNull=*/false, CFITCK_DerivedCast
,
4857 return MakeAddrLValue(Derived
, E
->getType(), LV
.getBaseInfo(),
4858 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4860 case CK_LValueBitCast
: {
4861 // This must be a reinterpret_cast (or c-style equivalent).
4862 const auto *CE
= cast
<ExplicitCastExpr
>(E
);
4864 CGM
.EmitExplicitCastExprType(CE
, this);
4865 LValue LV
= EmitLValue(E
->getSubExpr());
4866 Address V
= Builder
.CreateElementBitCast(
4867 LV
.getAddress(*this),
4868 ConvertTypeForMem(CE
->getTypeAsWritten()->getPointeeType()));
4870 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
))
4871 EmitVTablePtrCheckForCast(E
->getType(), V
,
4872 /*MayBeNull=*/false, CFITCK_UnrelatedCast
,
4875 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4876 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4878 case CK_AddressSpaceConversion
: {
4879 LValue LV
= EmitLValue(E
->getSubExpr());
4880 QualType DestTy
= getContext().getPointerType(E
->getType());
4881 llvm::Value
*V
= getTargetHooks().performAddrSpaceCast(
4882 *this, LV
.getPointer(*this),
4883 E
->getSubExpr()->getType().getAddressSpace(),
4884 E
->getType().getAddressSpace(), ConvertType(DestTy
));
4885 return MakeAddrLValue(Address(V
, ConvertTypeForMem(E
->getType()),
4886 LV
.getAddress(*this).getAlignment()),
4887 E
->getType(), LV
.getBaseInfo(), LV
.getTBAAInfo());
4889 case CK_ObjCObjectLValueCast
: {
4890 LValue LV
= EmitLValue(E
->getSubExpr());
4891 Address V
= Builder
.CreateElementBitCast(LV
.getAddress(*this),
4892 ConvertType(E
->getType()));
4893 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4894 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4896 case CK_ZeroToOCLOpaqueType
:
4897 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
4900 llvm_unreachable("Unhandled lvalue cast kind?");
4903 LValue
CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr
*e
) {
4904 assert(OpaqueValueMappingData::shouldBindAsLValue(e
));
4905 return getOrCreateOpaqueLValueMapping(e
);
4909 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
) {
4910 assert(OpaqueValueMapping::shouldBindAsLValue(e
));
4912 llvm::DenseMap
<const OpaqueValueExpr
*,LValue
>::iterator
4913 it
= OpaqueLValues
.find(e
);
4915 if (it
!= OpaqueLValues
.end())
4918 assert(e
->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
4919 return EmitLValue(e
->getSourceExpr());
4923 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
) {
4924 assert(!OpaqueValueMapping::shouldBindAsLValue(e
));
4926 llvm::DenseMap
<const OpaqueValueExpr
*,RValue
>::iterator
4927 it
= OpaqueRValues
.find(e
);
4929 if (it
!= OpaqueRValues
.end())
4932 assert(e
->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
4933 return EmitAnyExpr(e
->getSourceExpr());
4936 RValue
CodeGenFunction::EmitRValueForField(LValue LV
,
4937 const FieldDecl
*FD
,
4938 SourceLocation Loc
) {
4939 QualType FT
= FD
->getType();
4940 LValue FieldLV
= EmitLValueForField(LV
, FD
);
4941 switch (getEvaluationKind(FT
)) {
4943 return RValue::getComplex(EmitLoadOfComplex(FieldLV
, Loc
));
4945 return FieldLV
.asAggregateRValue(*this);
4947 // This routine is used to load fields one-by-one to perform a copy, so
4948 // don't load reference fields.
4949 if (FD
->getType()->isReferenceType())
4950 return RValue::get(FieldLV
.getPointer(*this));
4951 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
4953 if (FieldLV
.isBitField())
4954 return EmitLoadOfLValue(FieldLV
, Loc
);
4955 return RValue::get(EmitLoadOfScalar(FieldLV
, Loc
));
4957 llvm_unreachable("bad evaluation kind");
4960 //===--------------------------------------------------------------------===//
4961 // Expression Emission
4962 //===--------------------------------------------------------------------===//
4964 RValue
CodeGenFunction::EmitCallExpr(const CallExpr
*E
,
4965 ReturnValueSlot ReturnValue
) {
4966 // Builtins never have block type.
4967 if (E
->getCallee()->getType()->isBlockPointerType())
4968 return EmitBlockCallExpr(E
, ReturnValue
);
4970 if (const auto *CE
= dyn_cast
<CXXMemberCallExpr
>(E
))
4971 return EmitCXXMemberCallExpr(CE
, ReturnValue
);
4973 if (const auto *CE
= dyn_cast
<CUDAKernelCallExpr
>(E
))
4974 return EmitCUDAKernelCallExpr(CE
, ReturnValue
);
4976 if (const auto *CE
= dyn_cast
<CXXOperatorCallExpr
>(E
))
4977 if (const CXXMethodDecl
*MD
=
4978 dyn_cast_or_null
<CXXMethodDecl
>(CE
->getCalleeDecl()))
4979 return EmitCXXOperatorMemberCallExpr(CE
, MD
, ReturnValue
);
4981 CGCallee callee
= EmitCallee(E
->getCallee());
4983 if (callee
.isBuiltin()) {
4984 return EmitBuiltinExpr(callee
.getBuiltinDecl(), callee
.getBuiltinID(),
4988 if (callee
.isPseudoDestructor()) {
4989 return EmitCXXPseudoDestructorExpr(callee
.getPseudoDestructorExpr());
4992 return EmitCall(E
->getCallee()->getType(), callee
, E
, ReturnValue
);
4995 /// Emit a CallExpr without considering whether it might be a subclass.
4996 RValue
CodeGenFunction::EmitSimpleCallExpr(const CallExpr
*E
,
4997 ReturnValueSlot ReturnValue
) {
4998 CGCallee Callee
= EmitCallee(E
->getCallee());
4999 return EmitCall(E
->getCallee()->getType(), Callee
, E
, ReturnValue
);
5002 // Detect the unusual situation where an inline version is shadowed by a
5003 // non-inline version. In that case we should pick the external one
5004 // everywhere. That's GCC behavior too.
5005 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl
*FD
) {
5006 for (const FunctionDecl
*PD
= FD
; PD
; PD
= PD
->getPreviousDecl())
5007 if (!PD
->isInlineBuiltinDeclaration())
5012 static CGCallee
EmitDirectCallee(CodeGenFunction
&CGF
, GlobalDecl GD
) {
5013 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
5015 if (auto builtinID
= FD
->getBuiltinID()) {
5016 std::string NoBuiltinFD
= ("no-builtin-" + FD
->getName()).str();
5017 std::string NoBuiltins
= "no-builtins";
5019 StringRef Ident
= CGF
.CGM
.getMangledName(GD
);
5020 std::string FDInlineName
= (Ident
+ ".inline").str();
5022 bool IsPredefinedLibFunction
=
5023 CGF
.getContext().BuiltinInfo
.isPredefinedLibFunction(builtinID
);
5024 bool HasAttributeNoBuiltin
=
5025 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltinFD
) ||
5026 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltins
);
5028 // When directing calling an inline builtin, call it through it's mangled
5029 // name to make it clear it's not the actual builtin.
5030 if (CGF
.CurFn
->getName() != FDInlineName
&&
5031 OnlyHasInlineBuiltinDeclaration(FD
)) {
5032 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5033 llvm::Function
*Fn
= llvm::cast
<llvm::Function
>(CalleePtr
);
5034 llvm::Module
*M
= Fn
->getParent();
5035 llvm::Function
*Clone
= M
->getFunction(FDInlineName
);
5037 Clone
= llvm::Function::Create(Fn
->getFunctionType(),
5038 llvm::GlobalValue::InternalLinkage
,
5039 Fn
->getAddressSpace(), FDInlineName
, M
);
5040 Clone
->addFnAttr(llvm::Attribute::AlwaysInline
);
5042 return CGCallee::forDirect(Clone
, GD
);
5045 // Replaceable builtins provide their own implementation of a builtin. If we
5046 // are in an inline builtin implementation, avoid trivial infinite
5047 // recursion. Honor __attribute__((no_builtin("foo"))) or
5048 // __attribute__((no_builtin)) on the current function unless foo is
5049 // not a predefined library function which means we must generate the
5050 // builtin no matter what.
5051 else if (!IsPredefinedLibFunction
|| !HasAttributeNoBuiltin
)
5052 return CGCallee::forBuiltin(builtinID
, FD
);
5055 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5056 if (CGF
.CGM
.getLangOpts().CUDA
&& !CGF
.CGM
.getLangOpts().CUDAIsDevice
&&
5057 FD
->hasAttr
<CUDAGlobalAttr
>())
5058 CalleePtr
= CGF
.CGM
.getCUDARuntime().getKernelStub(
5059 cast
<llvm::GlobalValue
>(CalleePtr
->stripPointerCasts()));
5061 return CGCallee::forDirect(CalleePtr
, GD
);
5064 CGCallee
CodeGenFunction::EmitCallee(const Expr
*E
) {
5065 E
= E
->IgnoreParens();
5067 // Look through function-to-pointer decay.
5068 if (auto ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
5069 if (ICE
->getCastKind() == CK_FunctionToPointerDecay
||
5070 ICE
->getCastKind() == CK_BuiltinFnToFnPtr
) {
5071 return EmitCallee(ICE
->getSubExpr());
5074 // Resolve direct calls.
5075 } else if (auto DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
5076 if (auto FD
= dyn_cast
<FunctionDecl
>(DRE
->getDecl())) {
5077 return EmitDirectCallee(*this, FD
);
5079 } else if (auto ME
= dyn_cast
<MemberExpr
>(E
)) {
5080 if (auto FD
= dyn_cast
<FunctionDecl
>(ME
->getMemberDecl())) {
5081 EmitIgnoredExpr(ME
->getBase());
5082 return EmitDirectCallee(*this, FD
);
5085 // Look through template substitutions.
5086 } else if (auto NTTP
= dyn_cast
<SubstNonTypeTemplateParmExpr
>(E
)) {
5087 return EmitCallee(NTTP
->getReplacement());
5089 // Treat pseudo-destructor calls differently.
5090 } else if (auto PDE
= dyn_cast
<CXXPseudoDestructorExpr
>(E
)) {
5091 return CGCallee::forPseudoDestructor(PDE
);
5094 // Otherwise, we have an indirect reference.
5095 llvm::Value
*calleePtr
;
5096 QualType functionType
;
5097 if (auto ptrType
= E
->getType()->getAs
<PointerType
>()) {
5098 calleePtr
= EmitScalarExpr(E
);
5099 functionType
= ptrType
->getPointeeType();
5101 functionType
= E
->getType();
5102 calleePtr
= EmitLValue(E
).getPointer(*this);
5104 assert(functionType
->isFunctionType());
5107 if (const auto *VD
=
5108 dyn_cast_or_null
<VarDecl
>(E
->getReferencedDeclOfCallee()))
5109 GD
= GlobalDecl(VD
);
5111 CGCalleeInfo
calleeInfo(functionType
->getAs
<FunctionProtoType
>(), GD
);
5112 CGCallee
callee(calleeInfo
, calleePtr
);
5116 LValue
CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator
*E
) {
5117 // Comma expressions just emit their LHS then their RHS as an l-value.
5118 if (E
->getOpcode() == BO_Comma
) {
5119 EmitIgnoredExpr(E
->getLHS());
5120 EnsureInsertPoint();
5121 return EmitLValue(E
->getRHS());
5124 if (E
->getOpcode() == BO_PtrMemD
||
5125 E
->getOpcode() == BO_PtrMemI
)
5126 return EmitPointerToDataMemberBinaryExpr(E
);
5128 assert(E
->getOpcode() == BO_Assign
&& "unexpected binary l-value");
5130 // Note that in all of these cases, __block variables need the RHS
5131 // evaluated first just in case the variable gets moved by the RHS.
5133 switch (getEvaluationKind(E
->getType())) {
5135 switch (E
->getLHS()->getType().getObjCLifetime()) {
5136 case Qualifiers::OCL_Strong
:
5137 return EmitARCStoreStrong(E
, /*ignored*/ false).first
;
5139 case Qualifiers::OCL_Autoreleasing
:
5140 return EmitARCStoreAutoreleasing(E
).first
;
5142 // No reason to do any of these differently.
5143 case Qualifiers::OCL_None
:
5144 case Qualifiers::OCL_ExplicitNone
:
5145 case Qualifiers::OCL_Weak
:
5149 RValue RV
= EmitAnyExpr(E
->getRHS());
5150 LValue LV
= EmitCheckedLValue(E
->getLHS(), TCK_Store
);
5152 EmitNullabilityCheck(LV
, RV
.getScalarVal(), E
->getExprLoc());
5153 EmitStoreThroughLValue(RV
, LV
);
5154 if (getLangOpts().OpenMP
)
5155 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5161 return EmitComplexAssignmentLValue(E
);
5164 return EmitAggExprToLValue(E
);
5166 llvm_unreachable("bad evaluation kind");
5169 LValue
CodeGenFunction::EmitCallExprLValue(const CallExpr
*E
) {
5170 RValue RV
= EmitCallExpr(E
);
5173 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5174 AlignmentSource::Decl
);
5176 assert(E
->getCallReturnType(getContext())->isReferenceType() &&
5177 "Can't have a scalar return unless the return type is a "
5180 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5183 LValue
CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr
*E
) {
5184 // FIXME: This shouldn't require another copy.
5185 return EmitAggExprToLValue(E
);
5188 LValue
CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr
*E
) {
5189 assert(E
->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5190 && "binding l-value to type which needs a temporary");
5191 AggValueSlot Slot
= CreateAggTemp(E
->getType());
5192 EmitCXXConstructExpr(E
, Slot
);
5193 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5197 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr
*E
) {
5198 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E
), E
->getType());
5201 Address
CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr
*E
) {
5202 return Builder
.CreateElementBitCast(CGM
.GetAddrOfMSGuidDecl(E
->getGuidDecl()),
5203 ConvertType(E
->getType()));
5206 LValue
CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr
*E
) {
5207 return MakeAddrLValue(EmitCXXUuidofExpr(E
), E
->getType(),
5208 AlignmentSource::Decl
);
5212 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
) {
5213 AggValueSlot Slot
= CreateAggTemp(E
->getType(), "temp.lvalue");
5214 Slot
.setExternallyDestructed();
5215 EmitAggExpr(E
->getSubExpr(), Slot
);
5216 EmitCXXTemporary(E
->getTemporary(), E
->getType(), Slot
.getAddress());
5217 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5220 LValue
CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
) {
5221 RValue RV
= EmitObjCMessageExpr(E
);
5224 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5225 AlignmentSource::Decl
);
5227 assert(E
->getMethodDecl()->getReturnType()->isReferenceType() &&
5228 "Can't have a scalar return unless the return type is a "
5231 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5234 LValue
CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
) {
5236 CGM
.getObjCRuntime().GetAddrOfSelector(*this, E
->getSelector());
5237 return MakeAddrLValue(V
, E
->getType(), AlignmentSource::Decl
);
5240 llvm::Value
*CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
5241 const ObjCIvarDecl
*Ivar
) {
5242 return CGM
.getObjCRuntime().EmitIvarOffset(*this, Interface
, Ivar
);
5246 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl
*Interface
,
5247 const ObjCIvarDecl
*Ivar
) {
5248 llvm::Value
*OffsetValue
= EmitIvarOffset(Interface
, Ivar
);
5249 QualType PointerDiffType
= getContext().getPointerDiffType();
5250 return Builder
.CreateZExtOrTrunc(OffsetValue
,
5251 getTypes().ConvertType(PointerDiffType
));
5254 LValue
CodeGenFunction::EmitLValueForIvar(QualType ObjectTy
,
5255 llvm::Value
*BaseValue
,
5256 const ObjCIvarDecl
*Ivar
,
5257 unsigned CVRQualifiers
) {
5258 return CGM
.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy
, BaseValue
,
5259 Ivar
, CVRQualifiers
);
5262 LValue
CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
) {
5263 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5264 llvm::Value
*BaseValue
= nullptr;
5265 const Expr
*BaseExpr
= E
->getBase();
5266 Qualifiers BaseQuals
;
5269 BaseValue
= EmitScalarExpr(BaseExpr
);
5270 ObjectTy
= BaseExpr
->getType()->getPointeeType();
5271 BaseQuals
= ObjectTy
.getQualifiers();
5273 LValue BaseLV
= EmitLValue(BaseExpr
);
5274 BaseValue
= BaseLV
.getPointer(*this);
5275 ObjectTy
= BaseExpr
->getType();
5276 BaseQuals
= ObjectTy
.getQualifiers();
5280 EmitLValueForIvar(ObjectTy
, BaseValue
, E
->getDecl(),
5281 BaseQuals
.getCVRQualifiers());
5282 setObjCGCLValueClass(getContext(), E
, LV
);
5286 LValue
CodeGenFunction::EmitStmtExprLValue(const StmtExpr
*E
) {
5287 // Can only get l-value for message expression returning aggregate type
5288 RValue RV
= EmitAnyExprToTemp(E
);
5289 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5290 AlignmentSource::Decl
);
5293 RValue
CodeGenFunction::EmitCall(QualType CalleeType
, const CGCallee
&OrigCallee
,
5294 const CallExpr
*E
, ReturnValueSlot ReturnValue
,
5295 llvm::Value
*Chain
) {
5296 // Get the actual function type. The callee type will always be a pointer to
5297 // function type or a block pointer type.
5298 assert(CalleeType
->isFunctionPointerType() &&
5299 "Call must have function pointer type!");
5301 const Decl
*TargetDecl
=
5302 OrigCallee
.getAbstractInfo().getCalleeDecl().getDecl();
5304 CalleeType
= getContext().getCanonicalType(CalleeType
);
5306 auto PointeeType
= cast
<PointerType
>(CalleeType
)->getPointeeType();
5308 CGCallee Callee
= OrigCallee
;
5310 if (getLangOpts().CPlusPlus
&& SanOpts
.has(SanitizerKind::Function
) &&
5311 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5312 if (llvm::Constant
*PrefixSig
=
5313 CGM
.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM
)) {
5314 SanitizerScope
SanScope(this);
5315 // Remove any (C++17) exception specifications, to allow calling e.g. a
5316 // noexcept function through a non-noexcept pointer.
5318 getContext().getFunctionTypeWithExceptionSpec(PointeeType
, EST_None
);
5319 llvm::Constant
*FTRTTIConst
=
5320 CGM
.GetAddrOfRTTIDescriptor(ProtoTy
, /*ForEH=*/true);
5321 llvm::Type
*PrefixSigType
= PrefixSig
->getType();
5322 llvm::StructType
*PrefixStructTy
= llvm::StructType::get(
5323 CGM
.getLLVMContext(), {PrefixSigType
, Int32Ty
}, /*isPacked=*/true);
5325 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5327 llvm::Value
*CalleePrefixStruct
= Builder
.CreateBitCast(
5328 CalleePtr
, llvm::PointerType::getUnqual(PrefixStructTy
));
5329 llvm::Value
*CalleeSigPtr
=
5330 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, 0, 0);
5331 llvm::Value
*CalleeSig
=
5332 Builder
.CreateAlignedLoad(PrefixSigType
, CalleeSigPtr
, getIntAlign());
5333 llvm::Value
*CalleeSigMatch
= Builder
.CreateICmpEQ(CalleeSig
, PrefixSig
);
5335 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
5336 llvm::BasicBlock
*TypeCheck
= createBasicBlock("typecheck");
5337 Builder
.CreateCondBr(CalleeSigMatch
, TypeCheck
, Cont
);
5339 EmitBlock(TypeCheck
);
5340 llvm::Value
*CalleeRTTIPtr
=
5341 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, 0, 1);
5342 llvm::Value
*CalleeRTTIEncoded
=
5343 Builder
.CreateAlignedLoad(Int32Ty
, CalleeRTTIPtr
, getPointerAlign());
5344 llvm::Value
*CalleeRTTI
=
5345 DecodeAddrUsedInPrologue(CalleePtr
, CalleeRTTIEncoded
);
5346 llvm::Value
*CalleeRTTIMatch
=
5347 Builder
.CreateICmpEQ(CalleeRTTI
, FTRTTIConst
);
5348 llvm::Constant
*StaticData
[] = {EmitCheckSourceLocation(E
->getBeginLoc()),
5349 EmitCheckTypeDescriptor(CalleeType
)};
5350 EmitCheck(std::make_pair(CalleeRTTIMatch
, SanitizerKind::Function
),
5351 SanitizerHandler::FunctionTypeMismatch
, StaticData
,
5352 {CalleePtr
, CalleeRTTI
, FTRTTIConst
});
5354 Builder
.CreateBr(Cont
);
5359 const auto *FnType
= cast
<FunctionType
>(PointeeType
);
5361 // If we are checking indirect calls and this call is indirect, check that the
5362 // function pointer is a member of the bit set for the function type.
5363 if (SanOpts
.has(SanitizerKind::CFIICall
) &&
5364 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5365 SanitizerScope
SanScope(this);
5366 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall
);
5369 if (CGM
.getCodeGenOpts().SanitizeCfiICallGeneralizePointers
)
5370 MD
= CGM
.CreateMetadataIdentifierGeneralized(QualType(FnType
, 0));
5372 MD
= CGM
.CreateMetadataIdentifierForType(QualType(FnType
, 0));
5374 llvm::Value
*TypeId
= llvm::MetadataAsValue::get(getLLVMContext(), MD
);
5376 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5377 llvm::Value
*CastedCallee
= Builder
.CreateBitCast(CalleePtr
, Int8PtrTy
);
5378 llvm::Value
*TypeTest
= Builder
.CreateCall(
5379 CGM
.getIntrinsic(llvm::Intrinsic::type_test
), {CastedCallee
, TypeId
});
5381 auto CrossDsoTypeId
= CGM
.CreateCrossDsoCfiTypeId(MD
);
5382 llvm::Constant
*StaticData
[] = {
5383 llvm::ConstantInt::get(Int8Ty
, CFITCK_ICall
),
5384 EmitCheckSourceLocation(E
->getBeginLoc()),
5385 EmitCheckTypeDescriptor(QualType(FnType
, 0)),
5387 if (CGM
.getCodeGenOpts().SanitizeCfiCrossDso
&& CrossDsoTypeId
) {
5388 EmitCfiSlowPathCheck(SanitizerKind::CFIICall
, TypeTest
, CrossDsoTypeId
,
5389 CastedCallee
, StaticData
);
5391 EmitCheck(std::make_pair(TypeTest
, SanitizerKind::CFIICall
),
5392 SanitizerHandler::CFICheckFail
, StaticData
,
5393 {CastedCallee
, llvm::UndefValue::get(IntPtrTy
)});
5399 Args
.add(RValue::get(Builder
.CreateBitCast(Chain
, CGM
.VoidPtrTy
)),
5400 CGM
.getContext().VoidPtrTy
);
5402 // C++17 requires that we evaluate arguments to a call using assignment syntax
5403 // right-to-left, and that we evaluate arguments to certain other operators
5404 // left-to-right. Note that we allow this to override the order dictated by
5405 // the calling convention on the MS ABI, which means that parameter
5406 // destruction order is not necessarily reverse construction order.
5407 // FIXME: Revisit this based on C++ committee response to unimplementability.
5408 EvaluationOrder Order
= EvaluationOrder::Default
;
5409 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(E
)) {
5410 if (OCE
->isAssignmentOp())
5411 Order
= EvaluationOrder::ForceRightToLeft
;
5413 switch (OCE
->getOperator()) {
5415 case OO_GreaterGreater
:
5420 Order
= EvaluationOrder::ForceLeftToRight
;
5428 EmitCallArgs(Args
, dyn_cast
<FunctionProtoType
>(FnType
), E
->arguments(),
5429 E
->getDirectCallee(), /*ParamsToSkip*/ 0, Order
);
5431 const CGFunctionInfo
&FnInfo
= CGM
.getTypes().arrangeFreeFunctionCall(
5432 Args
, FnType
, /*ChainCall=*/Chain
);
5435 // If the expression that denotes the called function has a type
5436 // that does not include a prototype, [the default argument
5437 // promotions are performed]. If the number of arguments does not
5438 // equal the number of parameters, the behavior is undefined. If
5439 // the function is defined with a type that includes a prototype,
5440 // and either the prototype ends with an ellipsis (, ...) or the
5441 // types of the arguments after promotion are not compatible with
5442 // the types of the parameters, the behavior is undefined. If the
5443 // function is defined with a type that does not include a
5444 // prototype, and the types of the arguments after promotion are
5445 // not compatible with those of the parameters after promotion,
5446 // the behavior is undefined [except in some trivial cases].
5447 // That is, in the general case, we should assume that a call
5448 // through an unprototyped function type works like a *non-variadic*
5449 // call. The way we make this work is to cast to the exact type
5450 // of the promoted arguments.
5452 // Chain calls use this same code path to add the invisible chain parameter
5453 // to the function type.
5454 if (isa
<FunctionNoProtoType
>(FnType
) || Chain
) {
5455 llvm::Type
*CalleeTy
= getTypes().GetFunctionType(FnInfo
);
5456 int AS
= Callee
.getFunctionPointer()->getType()->getPointerAddressSpace();
5457 CalleeTy
= CalleeTy
->getPointerTo(AS
);
5459 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5460 CalleePtr
= Builder
.CreateBitCast(CalleePtr
, CalleeTy
, "callee.knr.cast");
5461 Callee
.setFunctionPointer(CalleePtr
);
5464 // HIP function pointer contains kernel handle when it is used in triple
5465 // chevron. The kernel stub needs to be loaded from kernel handle and used
5467 if (CGM
.getLangOpts().HIP
&& !CGM
.getLangOpts().CUDAIsDevice
&&
5468 isa
<CUDAKernelCallExpr
>(E
) &&
5469 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5470 llvm::Value
*Handle
= Callee
.getFunctionPointer();
5472 Builder
.CreateBitCast(Handle
, Handle
->getType()->getPointerTo());
5473 auto *Stub
= Builder
.CreateLoad(
5474 Address(Cast
, Handle
->getType(), CGM
.getPointerAlign()));
5475 Callee
.setFunctionPointer(Stub
);
5477 llvm::CallBase
*CallOrInvoke
= nullptr;
5478 RValue Call
= EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, &CallOrInvoke
,
5479 E
== MustTailCall
, E
->getExprLoc());
5481 // Generate function declaration DISuprogram in order to be used
5482 // in debug info about call sites.
5483 if (CGDebugInfo
*DI
= getDebugInfo()) {
5484 if (auto *CalleeDecl
= dyn_cast_or_null
<FunctionDecl
>(TargetDecl
)) {
5485 FunctionArgList Args
;
5486 QualType ResTy
= BuildFunctionArgList(CalleeDecl
, Args
);
5487 DI
->EmitFuncDeclForCallSite(CallOrInvoke
,
5488 DI
->getFunctionType(CalleeDecl
, ResTy
, Args
),
5496 LValue
CodeGenFunction::
5497 EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
) {
5498 Address BaseAddr
= Address::invalid();
5499 if (E
->getOpcode() == BO_PtrMemI
) {
5500 BaseAddr
= EmitPointerWithAlignment(E
->getLHS());
5502 BaseAddr
= EmitLValue(E
->getLHS()).getAddress(*this);
5505 llvm::Value
*OffsetV
= EmitScalarExpr(E
->getRHS());
5506 const auto *MPT
= E
->getRHS()->getType()->castAs
<MemberPointerType
>();
5508 LValueBaseInfo BaseInfo
;
5509 TBAAAccessInfo TBAAInfo
;
5510 Address MemberAddr
=
5511 EmitCXXMemberDataPointerAddress(E
, BaseAddr
, OffsetV
, MPT
, &BaseInfo
,
5514 return MakeAddrLValue(MemberAddr
, MPT
->getPointeeType(), BaseInfo
, TBAAInfo
);
5517 /// Given the address of a temporary variable, produce an r-value of
5519 RValue
CodeGenFunction::convertTempToRValue(Address addr
,
5521 SourceLocation loc
) {
5522 LValue lvalue
= MakeAddrLValue(addr
, type
, AlignmentSource::Decl
);
5523 switch (getEvaluationKind(type
)) {
5525 return RValue::getComplex(EmitLoadOfComplex(lvalue
, loc
));
5527 return lvalue
.asAggregateRValue(*this);
5529 return RValue::get(EmitLoadOfScalar(lvalue
, loc
));
5531 llvm_unreachable("bad evaluation kind");
5534 void CodeGenFunction::SetFPAccuracy(llvm::Value
*Val
, float Accuracy
) {
5535 assert(Val
->getType()->isFPOrFPVectorTy());
5536 if (Accuracy
== 0.0 || !isa
<llvm::Instruction
>(Val
))
5539 llvm::MDBuilder
MDHelper(getLLVMContext());
5540 llvm::MDNode
*Node
= MDHelper
.createFPMath(Accuracy
);
5542 cast
<llvm::Instruction
>(Val
)->setMetadata(llvm::LLVMContext::MD_fpmath
, Node
);
5546 struct LValueOrRValue
{
5552 static LValueOrRValue
emitPseudoObjectExpr(CodeGenFunction
&CGF
,
5553 const PseudoObjectExpr
*E
,
5555 AggValueSlot slot
) {
5556 SmallVector
<CodeGenFunction::OpaqueValueMappingData
, 4> opaques
;
5558 // Find the result expression, if any.
5559 const Expr
*resultExpr
= E
->getResultExpr();
5560 LValueOrRValue result
;
5562 for (PseudoObjectExpr::const_semantics_iterator
5563 i
= E
->semantics_begin(), e
= E
->semantics_end(); i
!= e
; ++i
) {
5564 const Expr
*semantic
= *i
;
5566 // If this semantic expression is an opaque value, bind it
5567 // to the result of its source expression.
5568 if (const auto *ov
= dyn_cast
<OpaqueValueExpr
>(semantic
)) {
5569 // Skip unique OVEs.
5570 if (ov
->isUnique()) {
5571 assert(ov
!= resultExpr
&&
5572 "A unique OVE cannot be used as the result expression");
5576 // If this is the result expression, we may need to evaluate
5577 // directly into the slot.
5578 typedef CodeGenFunction::OpaqueValueMappingData OVMA
;
5580 if (ov
== resultExpr
&& ov
->isPRValue() && !forLValue
&&
5581 CodeGenFunction::hasAggregateEvaluationKind(ov
->getType())) {
5582 CGF
.EmitAggExpr(ov
->getSourceExpr(), slot
);
5583 LValue LV
= CGF
.MakeAddrLValue(slot
.getAddress(), ov
->getType(),
5584 AlignmentSource::Decl
);
5585 opaqueData
= OVMA::bind(CGF
, ov
, LV
);
5586 result
.RV
= slot
.asRValue();
5588 // Otherwise, emit as normal.
5590 opaqueData
= OVMA::bind(CGF
, ov
, ov
->getSourceExpr());
5592 // If this is the result, also evaluate the result now.
5593 if (ov
== resultExpr
) {
5595 result
.LV
= CGF
.EmitLValue(ov
);
5597 result
.RV
= CGF
.EmitAnyExpr(ov
, slot
);
5601 opaques
.push_back(opaqueData
);
5603 // Otherwise, if the expression is the result, evaluate it
5604 // and remember the result.
5605 } else if (semantic
== resultExpr
) {
5607 result
.LV
= CGF
.EmitLValue(semantic
);
5609 result
.RV
= CGF
.EmitAnyExpr(semantic
, slot
);
5611 // Otherwise, evaluate the expression in an ignored context.
5613 CGF
.EmitIgnoredExpr(semantic
);
5617 // Unbind all the opaques now.
5618 for (unsigned i
= 0, e
= opaques
.size(); i
!= e
; ++i
)
5619 opaques
[i
].unbind(CGF
);
5624 RValue
CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr
*E
,
5625 AggValueSlot slot
) {
5626 return emitPseudoObjectExpr(*this, E
, false, slot
).RV
;
5629 LValue
CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr
*E
) {
5630 return emitPseudoObjectExpr(*this, E
, true, AggValueSlot::ignored()).LV
;