1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "ABIInfoImpl.h"
14 #include "CGCUDARuntime.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CGRecordLayout.h"
22 #include "CodeGenFunction.h"
23 #include "CodeGenModule.h"
24 #include "ConstantEmitter.h"
25 #include "TargetInfo.h"
26 #include "clang/AST/ASTContext.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/DeclObjC.h"
29 #include "clang/AST/NSAPI.h"
30 #include "clang/AST/StmtVisitor.h"
31 #include "clang/Basic/Builtins.h"
32 #include "clang/Basic/CodeGenOptions.h"
33 #include "clang/Basic/SourceManager.h"
34 #include "llvm/ADT/Hashing.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicsWebAssembly.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/MatrixBuilder.h"
43 #include "llvm/Passes/OptimizationLevel.h"
44 #include "llvm/Support/ConvertUTF.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/Path.h"
47 #include "llvm/Support/SaveAndRestore.h"
48 #include "llvm/Support/xxhash.h"
49 #include "llvm/Transforms/Utils/SanitizerStats.h"
54 using namespace clang
;
55 using namespace CodeGen
;
57 // Experiment to make sanitizers easier to debug
58 static llvm::cl::opt
<bool> ClSanitizeDebugDeoptimization(
59 "ubsan-unique-traps", llvm::cl::Optional
,
60 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
62 // TODO: Introduce frontend options to enabled per sanitizers, similar to
64 static llvm::cl::opt
<bool> ClSanitizeGuardChecks(
65 "ubsan-guard-checks", llvm::cl::Optional
,
66 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
68 //===--------------------------------------------------------------------===//
69 // Miscellaneous Helper Methods
70 //===--------------------------------------------------------------------===//
72 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
75 CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type
*Ty
, CharUnits Align
,
77 llvm::Value
*ArraySize
) {
78 auto Alloca
= CreateTempAlloca(Ty
, Name
, ArraySize
);
79 Alloca
->setAlignment(Align
.getAsAlign());
80 return RawAddress(Alloca
, Ty
, Align
, KnownNonNull
);
83 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
84 /// block. The alloca is casted to default address space if necessary.
85 RawAddress
CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
, CharUnits Align
,
87 llvm::Value
*ArraySize
,
88 RawAddress
*AllocaAddr
) {
89 auto Alloca
= CreateTempAllocaWithoutCast(Ty
, Align
, Name
, ArraySize
);
92 llvm::Value
*V
= Alloca
.getPointer();
93 // Alloca always returns a pointer in alloca address space, which may
94 // be different from the type defined by the language. For example,
95 // in C++ the auto variables are in the default address space. Therefore
96 // cast alloca to the default address space when necessary.
97 if (getASTAllocaAddressSpace() != LangAS::Default
) {
98 auto DestAddrSpace
= getContext().getTargetAddressSpace(LangAS::Default
);
99 llvm::IRBuilderBase::InsertPointGuard
IPG(Builder
);
100 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
101 // otherwise alloca is inserted at the current insertion point of the
104 Builder
.SetInsertPoint(getPostAllocaInsertPoint());
105 V
= getTargetHooks().performAddrSpaceCast(
106 *this, V
, getASTAllocaAddressSpace(), LangAS::Default
,
107 Ty
->getPointerTo(DestAddrSpace
), /*non-null*/ true);
110 return RawAddress(V
, Ty
, Align
, KnownNonNull
);
113 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
114 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
115 /// insertion point of the builder.
116 llvm::AllocaInst
*CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
,
118 llvm::Value
*ArraySize
) {
119 llvm::AllocaInst
*Alloca
;
121 Alloca
= Builder
.CreateAlloca(Ty
, ArraySize
, Name
);
123 Alloca
= new llvm::AllocaInst(Ty
, CGM
.getDataLayout().getAllocaAddrSpace(),
124 ArraySize
, Name
, &*AllocaInsertPt
);
126 Allocas
->Add(Alloca
);
131 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
132 /// default alignment of the corresponding LLVM type, which is *not*
133 /// guaranteed to be related in any way to the expected alignment of
134 /// an AST type that might have been lowered to Ty.
135 RawAddress
CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
138 CharUnits::fromQuantity(CGM
.getDataLayout().getPrefTypeAlign(Ty
));
139 return CreateTempAlloca(Ty
, Align
, Name
);
142 RawAddress
CodeGenFunction::CreateIRTemp(QualType Ty
, const Twine
&Name
) {
143 CharUnits Align
= getContext().getTypeAlignInChars(Ty
);
144 return CreateTempAlloca(ConvertType(Ty
), Align
, Name
);
147 RawAddress
CodeGenFunction::CreateMemTemp(QualType Ty
, const Twine
&Name
,
148 RawAddress
*Alloca
) {
149 // FIXME: Should we prefer the preferred type alignment here?
150 return CreateMemTemp(Ty
, getContext().getTypeAlignInChars(Ty
), Name
, Alloca
);
153 RawAddress
CodeGenFunction::CreateMemTemp(QualType Ty
, CharUnits Align
,
155 RawAddress
*Alloca
) {
156 RawAddress Result
= CreateTempAlloca(ConvertTypeForMem(Ty
), Align
, Name
,
157 /*ArraySize=*/nullptr, Alloca
);
159 if (Ty
->isConstantMatrixType()) {
160 auto *ArrayTy
= cast
<llvm::ArrayType
>(Result
.getElementType());
161 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
162 ArrayTy
->getNumElements());
164 Result
= Address(Result
.getPointer(), VectorTy
, Result
.getAlignment(),
170 RawAddress
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
,
173 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty
), Align
, Name
);
176 RawAddress
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
,
178 return CreateMemTempWithoutCast(Ty
, getContext().getTypeAlignInChars(Ty
),
182 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
183 /// expression and compare the result against zero, returning an Int1Ty value.
184 llvm::Value
*CodeGenFunction::EvaluateExprAsBool(const Expr
*E
) {
185 PGO
.setCurrentStmt(E
);
186 if (const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>()) {
187 llvm::Value
*MemPtr
= EmitScalarExpr(E
);
188 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr
, MPT
);
191 QualType BoolTy
= getContext().BoolTy
;
192 SourceLocation Loc
= E
->getExprLoc();
193 CGFPOptionsRAII
FPOptsRAII(*this, E
);
194 if (!E
->getType()->isAnyComplexType())
195 return EmitScalarConversion(EmitScalarExpr(E
), E
->getType(), BoolTy
, Loc
);
197 return EmitComplexToScalarConversion(EmitComplexExpr(E
), E
->getType(), BoolTy
,
201 /// EmitIgnoredExpr - Emit code to compute the specified expression,
202 /// ignoring the result.
203 void CodeGenFunction::EmitIgnoredExpr(const Expr
*E
) {
205 return (void)EmitAnyExpr(E
, AggValueSlot::ignored(), true);
207 // if this is a bitfield-resulting conditional operator, we can special case
208 // emit this. The normal 'EmitLValue' version of this is particularly
209 // difficult to codegen for, since creating a single "LValue" for two
210 // different sized arguments here is not particularly doable.
211 if (const auto *CondOp
= dyn_cast
<AbstractConditionalOperator
>(
212 E
->IgnoreParenNoopCasts(getContext()))) {
213 if (CondOp
->getObjectKind() == OK_BitField
)
214 return EmitIgnoredConditionalOperator(CondOp
);
217 // Just emit it as an l-value and drop the result.
221 /// EmitAnyExpr - Emit code to compute the specified expression which
222 /// can have any type. The result is returned as an RValue struct.
223 /// If this is an aggregate expression, AggSlot indicates where the
224 /// result should be returned.
225 RValue
CodeGenFunction::EmitAnyExpr(const Expr
*E
,
226 AggValueSlot aggSlot
,
228 switch (getEvaluationKind(E
->getType())) {
230 return RValue::get(EmitScalarExpr(E
, ignoreResult
));
232 return RValue::getComplex(EmitComplexExpr(E
, ignoreResult
, ignoreResult
));
234 if (!ignoreResult
&& aggSlot
.isIgnored())
235 aggSlot
= CreateAggTemp(E
->getType(), "agg-temp");
236 EmitAggExpr(E
, aggSlot
);
237 return aggSlot
.asRValue();
239 llvm_unreachable("bad evaluation kind");
242 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
243 /// always be accessible even if no aggregate location is provided.
244 RValue
CodeGenFunction::EmitAnyExprToTemp(const Expr
*E
) {
245 AggValueSlot AggSlot
= AggValueSlot::ignored();
247 if (hasAggregateEvaluationKind(E
->getType()))
248 AggSlot
= CreateAggTemp(E
->getType(), "agg.tmp");
249 return EmitAnyExpr(E
, AggSlot
);
252 /// EmitAnyExprToMem - Evaluate an expression into a given memory
254 void CodeGenFunction::EmitAnyExprToMem(const Expr
*E
,
258 // FIXME: This function should take an LValue as an argument.
259 switch (getEvaluationKind(E
->getType())) {
261 EmitComplexExprIntoLValue(E
, MakeAddrLValue(Location
, E
->getType()),
265 case TEK_Aggregate
: {
266 EmitAggExpr(E
, AggValueSlot::forAddr(Location
, Quals
,
267 AggValueSlot::IsDestructed_t(IsInit
),
268 AggValueSlot::DoesNotNeedGCBarriers
,
269 AggValueSlot::IsAliased_t(!IsInit
),
270 AggValueSlot::MayOverlap
));
275 RValue RV
= RValue::get(EmitScalarExpr(E
, /*Ignore*/ false));
276 LValue LV
= MakeAddrLValue(Location
, E
->getType());
277 EmitStoreThroughLValue(RV
, LV
);
281 llvm_unreachable("bad evaluation kind");
285 pushTemporaryCleanup(CodeGenFunction
&CGF
, const MaterializeTemporaryExpr
*M
,
286 const Expr
*E
, Address ReferenceTemporary
) {
287 // Objective-C++ ARC:
288 // If we are binding a reference to a temporary that has ownership, we
289 // need to perform retain/release operations on the temporary.
291 // FIXME: This should be looking at E, not M.
292 if (auto Lifetime
= M
->getType().getObjCLifetime()) {
294 case Qualifiers::OCL_None
:
295 case Qualifiers::OCL_ExplicitNone
:
296 // Carry on to normal cleanup handling.
299 case Qualifiers::OCL_Autoreleasing
:
300 // Nothing to do; cleaned up by an autorelease pool.
303 case Qualifiers::OCL_Strong
:
304 case Qualifiers::OCL_Weak
:
305 switch (StorageDuration Duration
= M
->getStorageDuration()) {
307 // Note: we intentionally do not register a cleanup to release
308 // the object on program termination.
312 // FIXME: We should probably register a cleanup in this case.
316 case SD_FullExpression
:
317 CodeGenFunction::Destroyer
*Destroy
;
318 CleanupKind CleanupKind
;
319 if (Lifetime
== Qualifiers::OCL_Strong
) {
320 const ValueDecl
*VD
= M
->getExtendingDecl();
321 bool Precise
= isa_and_nonnull
<VarDecl
>(VD
) &&
322 VD
->hasAttr
<ObjCPreciseLifetimeAttr
>();
323 CleanupKind
= CGF
.getARCCleanupKind();
324 Destroy
= Precise
? &CodeGenFunction::destroyARCStrongPrecise
325 : &CodeGenFunction::destroyARCStrongImprecise
;
327 // __weak objects always get EH cleanups; otherwise, exceptions
328 // could cause really nasty crashes instead of mere leaks.
329 CleanupKind
= NormalAndEHCleanup
;
330 Destroy
= &CodeGenFunction::destroyARCWeak
;
332 if (Duration
== SD_FullExpression
)
333 CGF
.pushDestroy(CleanupKind
, ReferenceTemporary
,
334 M
->getType(), *Destroy
,
335 CleanupKind
& EHCleanup
);
337 CGF
.pushLifetimeExtendedDestroy(CleanupKind
, ReferenceTemporary
,
339 *Destroy
, CleanupKind
& EHCleanup
);
343 llvm_unreachable("temporary cannot have dynamic storage duration");
345 llvm_unreachable("unknown storage duration");
349 CXXDestructorDecl
*ReferenceTemporaryDtor
= nullptr;
350 if (const RecordType
*RT
=
351 E
->getType()->getBaseElementTypeUnsafe()->getAs
<RecordType
>()) {
352 // Get the destructor for the reference temporary.
353 auto *ClassDecl
= cast
<CXXRecordDecl
>(RT
->getDecl());
354 if (!ClassDecl
->hasTrivialDestructor())
355 ReferenceTemporaryDtor
= ClassDecl
->getDestructor();
358 if (!ReferenceTemporaryDtor
)
361 // Call the destructor for the temporary.
362 switch (M
->getStorageDuration()) {
365 llvm::FunctionCallee CleanupFn
;
366 llvm::Constant
*CleanupArg
;
367 if (E
->getType()->isArrayType()) {
368 CleanupFn
= CodeGenFunction(CGF
.CGM
).generateDestroyHelper(
369 ReferenceTemporary
, E
->getType(),
370 CodeGenFunction::destroyCXXObject
, CGF
.getLangOpts().Exceptions
,
371 dyn_cast_or_null
<VarDecl
>(M
->getExtendingDecl()));
372 CleanupArg
= llvm::Constant::getNullValue(CGF
.Int8PtrTy
);
374 CleanupFn
= CGF
.CGM
.getAddrAndTypeOfCXXStructor(
375 GlobalDecl(ReferenceTemporaryDtor
, Dtor_Complete
));
376 CleanupArg
= cast
<llvm::Constant
>(ReferenceTemporary
.emitRawPointer(CGF
));
378 CGF
.CGM
.getCXXABI().registerGlobalDtor(
379 CGF
, *cast
<VarDecl
>(M
->getExtendingDecl()), CleanupFn
, CleanupArg
);
383 case SD_FullExpression
:
384 CGF
.pushDestroy(NormalAndEHCleanup
, ReferenceTemporary
, E
->getType(),
385 CodeGenFunction::destroyCXXObject
,
386 CGF
.getLangOpts().Exceptions
);
390 CGF
.pushLifetimeExtendedDestroy(NormalAndEHCleanup
,
391 ReferenceTemporary
, E
->getType(),
392 CodeGenFunction::destroyCXXObject
,
393 CGF
.getLangOpts().Exceptions
);
397 llvm_unreachable("temporary cannot have dynamic storage duration");
401 static RawAddress
createReferenceTemporary(CodeGenFunction
&CGF
,
402 const MaterializeTemporaryExpr
*M
,
404 RawAddress
*Alloca
= nullptr) {
405 auto &TCG
= CGF
.getTargetHooks();
406 switch (M
->getStorageDuration()) {
407 case SD_FullExpression
:
409 // If we have a constant temporary array or record try to promote it into a
410 // constant global under the same rules a normal constant would've been
411 // promoted. This is easier on the optimizer and generally emits fewer
413 QualType Ty
= Inner
->getType();
414 if (CGF
.CGM
.getCodeGenOpts().MergeAllConstants
&&
415 (Ty
->isArrayType() || Ty
->isRecordType()) &&
416 Ty
.isConstantStorage(CGF
.getContext(), true, false))
417 if (auto Init
= ConstantEmitter(CGF
).tryEmitAbstract(Inner
, Ty
)) {
418 auto AS
= CGF
.CGM
.GetGlobalConstantAddressSpace();
419 auto *GV
= new llvm::GlobalVariable(
420 CGF
.CGM
.getModule(), Init
->getType(), /*isConstant=*/true,
421 llvm::GlobalValue::PrivateLinkage
, Init
, ".ref.tmp", nullptr,
422 llvm::GlobalValue::NotThreadLocal
,
423 CGF
.getContext().getTargetAddressSpace(AS
));
424 CharUnits alignment
= CGF
.getContext().getTypeAlignInChars(Ty
);
425 GV
->setAlignment(alignment
.getAsAlign());
426 llvm::Constant
*C
= GV
;
427 if (AS
!= LangAS::Default
)
428 C
= TCG
.performAddrSpaceCast(
429 CGF
.CGM
, GV
, AS
, LangAS::Default
,
430 GV
->getValueType()->getPointerTo(
431 CGF
.getContext().getTargetAddressSpace(LangAS::Default
)));
432 // FIXME: Should we put the new global into a COMDAT?
433 return RawAddress(C
, GV
->getValueType(), alignment
);
435 return CGF
.CreateMemTemp(Ty
, "ref.tmp", Alloca
);
439 return CGF
.CGM
.GetAddrOfGlobalTemporary(M
, Inner
);
442 llvm_unreachable("temporary can't have dynamic storage duration");
444 llvm_unreachable("unknown storage duration");
447 /// Helper method to check if the underlying ABI is AAPCS
448 static bool isAAPCS(const TargetInfo
&TargetInfo
) {
449 return TargetInfo
.getABI().starts_with("aapcs");
452 LValue
CodeGenFunction::
453 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*M
) {
454 const Expr
*E
= M
->getSubExpr();
456 assert((!M
->getExtendingDecl() || !isa
<VarDecl
>(M
->getExtendingDecl()) ||
457 !cast
<VarDecl
>(M
->getExtendingDecl())->isARCPseudoStrong()) &&
458 "Reference should never be pseudo-strong!");
460 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
461 // as that will cause the lifetime adjustment to be lost for ARC
462 auto ownership
= M
->getType().getObjCLifetime();
463 if (ownership
!= Qualifiers::OCL_None
&&
464 ownership
!= Qualifiers::OCL_ExplicitNone
) {
465 RawAddress Object
= createReferenceTemporary(*this, M
, E
);
466 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(Object
.getPointer())) {
467 llvm::Type
*Ty
= ConvertTypeForMem(E
->getType());
468 Object
= Object
.withElementType(Ty
);
470 // createReferenceTemporary will promote the temporary to a global with a
471 // constant initializer if it can. It can only do this to a value of
472 // ARC-manageable type if the value is global and therefore "immune" to
473 // ref-counting operations. Therefore we have no need to emit either a
474 // dynamic initialization or a cleanup and we can just return the address
476 if (Var
->hasInitializer())
477 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
479 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
481 LValue RefTempDst
= MakeAddrLValue(Object
, M
->getType(),
482 AlignmentSource::Decl
);
484 switch (getEvaluationKind(E
->getType())) {
485 default: llvm_unreachable("expected scalar or aggregate expression");
487 EmitScalarInit(E
, M
->getExtendingDecl(), RefTempDst
, false);
489 case TEK_Aggregate
: {
490 EmitAggExpr(E
, AggValueSlot::forAddr(Object
,
491 E
->getType().getQualifiers(),
492 AggValueSlot::IsDestructed
,
493 AggValueSlot::DoesNotNeedGCBarriers
,
494 AggValueSlot::IsNotAliased
,
495 AggValueSlot::DoesNotOverlap
));
500 pushTemporaryCleanup(*this, M
, E
, Object
);
504 SmallVector
<const Expr
*, 2> CommaLHSs
;
505 SmallVector
<SubobjectAdjustment
, 2> Adjustments
;
506 E
= E
->skipRValueSubobjectAdjustments(CommaLHSs
, Adjustments
);
508 for (const auto &Ignored
: CommaLHSs
)
509 EmitIgnoredExpr(Ignored
);
511 if (const auto *opaque
= dyn_cast
<OpaqueValueExpr
>(E
)) {
512 if (opaque
->getType()->isRecordType()) {
513 assert(Adjustments
.empty());
514 return EmitOpaqueValueLValue(opaque
);
518 // Create and initialize the reference temporary.
519 RawAddress Alloca
= Address::invalid();
520 RawAddress Object
= createReferenceTemporary(*this, M
, E
, &Alloca
);
521 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(
522 Object
.getPointer()->stripPointerCasts())) {
523 llvm::Type
*TemporaryType
= ConvertTypeForMem(E
->getType());
524 Object
= Object
.withElementType(TemporaryType
);
525 // If the temporary is a global and has a constant initializer or is a
526 // constant temporary that we promoted to a global, we may have already
528 if (!Var
->hasInitializer()) {
529 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
530 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
533 switch (M
->getStorageDuration()) {
535 if (auto *Size
= EmitLifetimeStart(
536 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
537 Alloca
.getPointer())) {
538 pushCleanupAfterFullExpr
<CallLifetimeEnd
>(NormalEHLifetimeMarker
,
543 case SD_FullExpression
: {
544 if (!ShouldEmitLifetimeMarkers
)
547 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
548 // marker. Instead, start the lifetime of a conditional temporary earlier
549 // so that it's unconditional. Don't do this with sanitizers which need
550 // more precise lifetime marks. However when inside an "await.suspend"
551 // block, we should always avoid conditional cleanup because it creates
552 // boolean marker that lives across await_suspend, which can destroy coro
554 ConditionalEvaluation
*OldConditional
= nullptr;
555 CGBuilderTy::InsertPoint OldIP
;
556 if (isInConditionalBranch() && !E
->getType().isDestructedType() &&
557 ((!SanOpts
.has(SanitizerKind::HWAddress
) &&
558 !SanOpts
.has(SanitizerKind::Memory
) &&
559 !CGM
.getCodeGenOpts().SanitizeAddressUseAfterScope
) ||
561 OldConditional
= OutermostConditional
;
562 OutermostConditional
= nullptr;
564 OldIP
= Builder
.saveIP();
565 llvm::BasicBlock
*Block
= OldConditional
->getStartingBlock();
566 Builder
.restoreIP(CGBuilderTy::InsertPoint(
567 Block
, llvm::BasicBlock::iterator(Block
->back())));
570 if (auto *Size
= EmitLifetimeStart(
571 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
572 Alloca
.getPointer())) {
573 pushFullExprCleanup
<CallLifetimeEnd
>(NormalEHLifetimeMarker
, Alloca
,
577 if (OldConditional
) {
578 OutermostConditional
= OldConditional
;
579 Builder
.restoreIP(OldIP
);
587 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
589 pushTemporaryCleanup(*this, M
, E
, Object
);
591 // Perform derived-to-base casts and/or field accesses, to get from the
592 // temporary object we created (and, potentially, for which we extended
593 // the lifetime) to the subobject we're binding the reference to.
594 for (SubobjectAdjustment
&Adjustment
: llvm::reverse(Adjustments
)) {
595 switch (Adjustment
.Kind
) {
596 case SubobjectAdjustment::DerivedToBaseAdjustment
:
598 GetAddressOfBaseClass(Object
, Adjustment
.DerivedToBase
.DerivedClass
,
599 Adjustment
.DerivedToBase
.BasePath
->path_begin(),
600 Adjustment
.DerivedToBase
.BasePath
->path_end(),
601 /*NullCheckValue=*/ false, E
->getExprLoc());
604 case SubobjectAdjustment::FieldAdjustment
: {
605 LValue LV
= MakeAddrLValue(Object
, E
->getType(), AlignmentSource::Decl
);
606 LV
= EmitLValueForField(LV
, Adjustment
.Field
);
607 assert(LV
.isSimple() &&
608 "materialized temporary field is not a simple lvalue");
609 Object
= LV
.getAddress();
613 case SubobjectAdjustment::MemberPointerAdjustment
: {
614 llvm::Value
*Ptr
= EmitScalarExpr(Adjustment
.Ptr
.RHS
);
615 Object
= EmitCXXMemberDataPointerAddress(E
, Object
, Ptr
,
622 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
626 CodeGenFunction::EmitReferenceBindingToExpr(const Expr
*E
) {
627 // Emit the expression as an lvalue.
628 LValue LV
= EmitLValue(E
);
629 assert(LV
.isSimple());
630 llvm::Value
*Value
= LV
.getPointer(*this);
632 if (sanitizePerformTypeCheck() && !E
->getType()->isFunctionType()) {
633 // C++11 [dcl.ref]p5 (as amended by core issue 453):
634 // If a glvalue to which a reference is directly bound designates neither
635 // an existing object or function of an appropriate type nor a region of
636 // storage of suitable size and alignment to contain an object of the
637 // reference's type, the behavior is undefined.
638 QualType Ty
= E
->getType();
639 EmitTypeCheck(TCK_ReferenceBinding
, E
->getExprLoc(), Value
, Ty
);
642 return RValue::get(Value
);
646 /// getAccessedFieldNo - Given an encoded value and a result number, return the
647 /// input field number being accessed.
648 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx
,
649 const llvm::Constant
*Elts
) {
650 return cast
<llvm::ConstantInt
>(Elts
->getAggregateElement(Idx
))
654 static llvm::Value
*emitHashMix(CGBuilderTy
&Builder
, llvm::Value
*Acc
,
657 Builder
.CreateMul(Ptr
, Builder
.getInt64(0xbf58476d1ce4e5b9u
));
659 Builder
.CreateXor(A0
, Builder
.CreateLShr(A0
, Builder
.getInt64(31)));
660 return Builder
.CreateXor(Acc
, A1
);
663 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK
) {
664 return TCK
== TCK_DowncastPointer
|| TCK
== TCK_Upcast
||
665 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
;
668 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
) {
669 CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
670 return (RD
&& RD
->hasDefinition() && RD
->isDynamicClass()) &&
671 (TCK
== TCK_MemberAccess
|| TCK
== TCK_MemberCall
||
672 TCK
== TCK_DowncastPointer
|| TCK
== TCK_DowncastReference
||
673 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
);
676 bool CodeGenFunction::sanitizePerformTypeCheck() const {
677 return SanOpts
.has(SanitizerKind::Null
) ||
678 SanOpts
.has(SanitizerKind::Alignment
) ||
679 SanOpts
.has(SanitizerKind::ObjectSize
) ||
680 SanOpts
.has(SanitizerKind::Vptr
);
683 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
,
684 llvm::Value
*Ptr
, QualType Ty
,
686 SanitizerSet SkippedChecks
,
687 llvm::Value
*ArraySize
) {
688 if (!sanitizePerformTypeCheck())
691 // Don't check pointers outside the default address space. The null check
692 // isn't correct, the object-size check isn't supported by LLVM, and we can't
693 // communicate the addresses to the runtime handler for the vptr check.
694 if (Ptr
->getType()->getPointerAddressSpace())
697 // Don't check pointers to volatile data. The behavior here is implementation-
699 if (Ty
.isVolatileQualified())
702 SanitizerScope
SanScope(this);
704 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 3> Checks
;
705 llvm::BasicBlock
*Done
= nullptr;
707 // Quickly determine whether we have a pointer to an alloca. It's possible
708 // to skip null checks, and some alignment checks, for these pointers. This
709 // can reduce compile-time significantly.
710 auto PtrToAlloca
= dyn_cast
<llvm::AllocaInst
>(Ptr
->stripPointerCasts());
712 llvm::Value
*True
= llvm::ConstantInt::getTrue(getLLVMContext());
713 llvm::Value
*IsNonNull
= nullptr;
714 bool IsGuaranteedNonNull
=
715 SkippedChecks
.has(SanitizerKind::Null
) || PtrToAlloca
;
716 bool AllowNullPointers
= isNullPointerAllowed(TCK
);
717 if ((SanOpts
.has(SanitizerKind::Null
) || AllowNullPointers
) &&
718 !IsGuaranteedNonNull
) {
719 // The glvalue must not be an empty glvalue.
720 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
722 // The IR builder can constant-fold the null check if the pointer points to
724 IsGuaranteedNonNull
= IsNonNull
== True
;
726 // Skip the null check if the pointer is known to be non-null.
727 if (!IsGuaranteedNonNull
) {
728 if (AllowNullPointers
) {
729 // When performing pointer casts, it's OK if the value is null.
730 // Skip the remaining checks in that case.
731 Done
= createBasicBlock("null");
732 llvm::BasicBlock
*Rest
= createBasicBlock("not.null");
733 Builder
.CreateCondBr(IsNonNull
, Rest
, Done
);
736 Checks
.push_back(std::make_pair(IsNonNull
, SanitizerKind::Null
));
741 if (SanOpts
.has(SanitizerKind::ObjectSize
) &&
742 !SkippedChecks
.has(SanitizerKind::ObjectSize
) &&
743 !Ty
->isIncompleteType()) {
744 uint64_t TySize
= CGM
.getMinimumObjectSize(Ty
).getQuantity();
745 llvm::Value
*Size
= llvm::ConstantInt::get(IntPtrTy
, TySize
);
747 Size
= Builder
.CreateMul(Size
, ArraySize
);
749 // Degenerate case: new X[0] does not need an objectsize check.
750 llvm::Constant
*ConstantSize
= dyn_cast
<llvm::Constant
>(Size
);
751 if (!ConstantSize
|| !ConstantSize
->isNullValue()) {
752 // The glvalue must refer to a large enough storage region.
753 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
755 // FIXME: Get object address space
756 llvm::Type
*Tys
[2] = { IntPtrTy
, Int8PtrTy
};
757 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::objectsize
, Tys
);
758 llvm::Value
*Min
= Builder
.getFalse();
759 llvm::Value
*NullIsUnknown
= Builder
.getFalse();
760 llvm::Value
*Dynamic
= Builder
.getFalse();
761 llvm::Value
*LargeEnough
= Builder
.CreateICmpUGE(
762 Builder
.CreateCall(F
, {Ptr
, Min
, NullIsUnknown
, Dynamic
}), Size
);
763 Checks
.push_back(std::make_pair(LargeEnough
, SanitizerKind::ObjectSize
));
767 llvm::MaybeAlign AlignVal
;
768 llvm::Value
*PtrAsInt
= nullptr;
770 if (SanOpts
.has(SanitizerKind::Alignment
) &&
771 !SkippedChecks
.has(SanitizerKind::Alignment
)) {
772 AlignVal
= Alignment
.getAsMaybeAlign();
773 if (!Ty
->isIncompleteType() && !AlignVal
)
774 AlignVal
= CGM
.getNaturalTypeAlignment(Ty
, nullptr, nullptr,
775 /*ForPointeeType=*/true)
778 // The glvalue must be suitably aligned.
779 if (AlignVal
&& *AlignVal
> llvm::Align(1) &&
780 (!PtrToAlloca
|| PtrToAlloca
->getAlign() < *AlignVal
)) {
781 PtrAsInt
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
782 llvm::Value
*Align
= Builder
.CreateAnd(
783 PtrAsInt
, llvm::ConstantInt::get(IntPtrTy
, AlignVal
->value() - 1));
784 llvm::Value
*Aligned
=
785 Builder
.CreateICmpEQ(Align
, llvm::ConstantInt::get(IntPtrTy
, 0));
787 Checks
.push_back(std::make_pair(Aligned
, SanitizerKind::Alignment
));
791 if (Checks
.size() > 0) {
792 llvm::Constant
*StaticData
[] = {
793 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(Ty
),
794 llvm::ConstantInt::get(Int8Ty
, AlignVal
? llvm::Log2(*AlignVal
) : 1),
795 llvm::ConstantInt::get(Int8Ty
, TCK
)};
796 EmitCheck(Checks
, SanitizerHandler::TypeMismatch
, StaticData
,
797 PtrAsInt
? PtrAsInt
: Ptr
);
800 // If possible, check that the vptr indicates that there is a subobject of
801 // type Ty at offset zero within this object.
803 // C++11 [basic.life]p5,6:
804 // [For storage which does not refer to an object within its lifetime]
805 // The program has undefined behavior if:
806 // -- the [pointer or glvalue] is used to access a non-static data member
807 // or call a non-static member function
808 if (SanOpts
.has(SanitizerKind::Vptr
) &&
809 !SkippedChecks
.has(SanitizerKind::Vptr
) && isVptrCheckRequired(TCK
, Ty
)) {
810 // Ensure that the pointer is non-null before loading it. If there is no
811 // compile-time guarantee, reuse the run-time null check or emit a new one.
812 if (!IsGuaranteedNonNull
) {
814 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
816 Done
= createBasicBlock("vptr.null");
817 llvm::BasicBlock
*VptrNotNull
= createBasicBlock("vptr.not.null");
818 Builder
.CreateCondBr(IsNonNull
, VptrNotNull
, Done
);
819 EmitBlock(VptrNotNull
);
822 // Compute a deterministic hash of the mangled name of the type.
823 SmallString
<64> MangledName
;
824 llvm::raw_svector_ostream
Out(MangledName
);
825 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
.getUnqualifiedType(),
828 // Contained in NoSanitizeList based on the mangled type.
829 if (!CGM
.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr
,
831 // Load the vptr, and mix it with TypeHash.
832 llvm::Value
*TypeHash
=
833 llvm::ConstantInt::get(Int64Ty
, xxh3_64bits(Out
.str()));
835 llvm::Type
*VPtrTy
= llvm::PointerType::get(IntPtrTy
, 0);
836 Address
VPtrAddr(Ptr
, IntPtrTy
, getPointerAlign());
837 llvm::Value
*VPtrVal
= GetVTablePtr(VPtrAddr
, VPtrTy
,
838 Ty
->getAsCXXRecordDecl(),
839 VTableAuthMode::UnsafeUbsanStrip
);
840 VPtrVal
= Builder
.CreateBitOrPointerCast(VPtrVal
, IntPtrTy
);
843 emitHashMix(Builder
, TypeHash
, Builder
.CreateZExt(VPtrVal
, Int64Ty
));
844 Hash
= Builder
.CreateTrunc(Hash
, IntPtrTy
);
846 // Look the hash up in our cache.
847 const int CacheSize
= 128;
848 llvm::Type
*HashTable
= llvm::ArrayType::get(IntPtrTy
, CacheSize
);
849 llvm::Value
*Cache
= CGM
.CreateRuntimeVariable(HashTable
,
850 "__ubsan_vptr_type_cache");
851 llvm::Value
*Slot
= Builder
.CreateAnd(Hash
,
852 llvm::ConstantInt::get(IntPtrTy
,
854 llvm::Value
*Indices
[] = { Builder
.getInt32(0), Slot
};
855 llvm::Value
*CacheVal
= Builder
.CreateAlignedLoad(
856 IntPtrTy
, Builder
.CreateInBoundsGEP(HashTable
, Cache
, Indices
),
859 // If the hash isn't in the cache, call a runtime handler to perform the
860 // hard work of checking whether the vptr is for an object of the right
861 // type. This will either fill in the cache and return, or produce a
863 llvm::Value
*EqualHash
= Builder
.CreateICmpEQ(CacheVal
, Hash
);
864 llvm::Constant
*StaticData
[] = {
865 EmitCheckSourceLocation(Loc
),
866 EmitCheckTypeDescriptor(Ty
),
867 CGM
.GetAddrOfRTTIDescriptor(Ty
.getUnqualifiedType()),
868 llvm::ConstantInt::get(Int8Ty
, TCK
)
870 llvm::Value
*DynamicData
[] = { Ptr
, Hash
};
871 EmitCheck(std::make_pair(EqualHash
, SanitizerKind::Vptr
),
872 SanitizerHandler::DynamicTypeCacheMiss
, StaticData
,
878 Builder
.CreateBr(Done
);
883 llvm::Value
*CodeGenFunction::LoadPassedObjectSize(const Expr
*E
,
885 ASTContext
&C
= getContext();
886 uint64_t EltSize
= C
.getTypeSizeInChars(EltTy
).getQuantity();
890 auto *ArrayDeclRef
= dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts());
894 auto *ParamDecl
= dyn_cast
<ParmVarDecl
>(ArrayDeclRef
->getDecl());
898 auto *POSAttr
= ParamDecl
->getAttr
<PassObjectSizeAttr
>();
902 // Don't load the size if it's a lower bound.
903 int POSType
= POSAttr
->getType();
904 if (POSType
!= 0 && POSType
!= 1)
907 // Find the implicit size parameter.
908 auto PassedSizeIt
= SizeArguments
.find(ParamDecl
);
909 if (PassedSizeIt
== SizeArguments
.end())
912 const ImplicitParamDecl
*PassedSizeDecl
= PassedSizeIt
->second
;
913 assert(LocalDeclMap
.count(PassedSizeDecl
) && "Passed size not loadable");
914 Address AddrOfSize
= LocalDeclMap
.find(PassedSizeDecl
)->second
;
915 llvm::Value
*SizeInBytes
= EmitLoadOfScalar(AddrOfSize
, /*Volatile=*/false,
916 C
.getSizeType(), E
->getExprLoc());
917 llvm::Value
*SizeOfElement
=
918 llvm::ConstantInt::get(SizeInBytes
->getType(), EltSize
);
919 return Builder
.CreateUDiv(SizeInBytes
, SizeOfElement
);
922 /// If Base is known to point to the start of an array, return the length of
923 /// that array. Return 0 if the length cannot be determined.
924 static llvm::Value
*getArrayIndexingBound(CodeGenFunction
&CGF
,
926 QualType
&IndexedType
,
927 LangOptions::StrictFlexArraysLevelKind
928 StrictFlexArraysLevel
) {
929 // For the vector indexing extension, the bound is the number of elements.
930 if (const VectorType
*VT
= Base
->getType()->getAs
<VectorType
>()) {
931 IndexedType
= Base
->getType();
932 return CGF
.Builder
.getInt32(VT
->getNumElements());
935 Base
= Base
->IgnoreParens();
937 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
938 if (CE
->getCastKind() == CK_ArrayToPointerDecay
&&
939 !CE
->getSubExpr()->isFlexibleArrayMemberLike(CGF
.getContext(),
940 StrictFlexArraysLevel
)) {
941 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
943 IndexedType
= CE
->getSubExpr()->getType();
944 const ArrayType
*AT
= IndexedType
->castAsArrayTypeUnsafe();
945 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
))
946 return CGF
.Builder
.getInt(CAT
->getSize());
948 if (const auto *VAT
= dyn_cast
<VariableArrayType
>(AT
))
949 return CGF
.getVLASize(VAT
).NumElts
;
950 // Ignore pass_object_size here. It's not applicable on decayed pointers.
954 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
956 QualType EltTy
{Base
->getType()->getPointeeOrArrayElementType(), 0};
957 if (llvm::Value
*POS
= CGF
.LoadPassedObjectSize(Base
, EltTy
)) {
958 IndexedType
= Base
->getType();
967 /// \p StructAccessBase returns the base \p Expr of a field access. It returns
968 /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
972 /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
978 /// char array[] __attribute__((counted_by(count)));
981 /// If we have an expression like \p p->ptr->array[index], we want the
982 /// \p MemberExpr for \p p->ptr instead of \p p.
983 class StructAccessBase
984 : public ConstStmtVisitor
<StructAccessBase
, const Expr
*> {
985 const RecordDecl
*ExpectedRD
;
987 bool IsExpectedRecordDecl(const Expr
*E
) const {
988 QualType Ty
= E
->getType();
989 if (Ty
->isPointerType())
990 Ty
= Ty
->getPointeeType();
991 return ExpectedRD
== Ty
->getAsRecordDecl();
995 StructAccessBase(const RecordDecl
*ExpectedRD
) : ExpectedRD(ExpectedRD
) {}
997 //===--------------------------------------------------------------------===//
999 //===--------------------------------------------------------------------===//
1001 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1002 // horrors like this:
1006 // int blah[] __attribute__((counted_by(x)));
1009 // int foo(int index, int val) {
1010 // int (S::*IHatePMDs)[] = &S::blah;
1011 // (s.*IHatePMDs)[index] = val;
1014 const Expr
*Visit(const Expr
*E
) {
1015 return ConstStmtVisitor
<StructAccessBase
, const Expr
*>::Visit(E
);
1018 const Expr
*VisitStmt(const Stmt
*S
) { return nullptr; }
1020 // These are the types we expect to return (in order of most to least
1023 // 1. DeclRefExpr - This is the expression for the base of the structure.
1024 // It's exactly what we want to build an access to the \p counted_by
1026 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1027 // as the flexble array member's lexical enclosing \p RecordDecl. This
1028 // allows us to catch things like: "p->p->array"
1029 // 3. CompoundLiteralExpr - This is for people who create something
1030 // heretical like (struct foo has a flexible array member):
1032 // (struct foo){ 1, 2 }.blah[idx];
1033 const Expr
*VisitDeclRefExpr(const DeclRefExpr
*E
) {
1034 return IsExpectedRecordDecl(E
) ? E
: nullptr;
1036 const Expr
*VisitMemberExpr(const MemberExpr
*E
) {
1037 if (IsExpectedRecordDecl(E
) && E
->isArrow())
1039 const Expr
*Res
= Visit(E
->getBase());
1040 return !Res
&& IsExpectedRecordDecl(E
) ? E
: Res
;
1042 const Expr
*VisitCompoundLiteralExpr(const CompoundLiteralExpr
*E
) {
1043 return IsExpectedRecordDecl(E
) ? E
: nullptr;
1045 const Expr
*VisitCallExpr(const CallExpr
*E
) {
1046 return IsExpectedRecordDecl(E
) ? E
: nullptr;
1049 const Expr
*VisitArraySubscriptExpr(const ArraySubscriptExpr
*E
) {
1050 if (IsExpectedRecordDecl(E
))
1052 return Visit(E
->getBase());
1054 const Expr
*VisitCastExpr(const CastExpr
*E
) {
1055 return Visit(E
->getSubExpr());
1057 const Expr
*VisitParenExpr(const ParenExpr
*E
) {
1058 return Visit(E
->getSubExpr());
1060 const Expr
*VisitUnaryAddrOf(const UnaryOperator
*E
) {
1061 return Visit(E
->getSubExpr());
1063 const Expr
*VisitUnaryDeref(const UnaryOperator
*E
) {
1064 return Visit(E
->getSubExpr());
1068 } // end anonymous namespace
1070 using RecIndicesTy
=
1071 SmallVector
<std::pair
<const RecordDecl
*, llvm::Value
*>, 8>;
1073 static bool getGEPIndicesToField(CodeGenFunction
&CGF
, const RecordDecl
*RD
,
1074 const FieldDecl
*Field
,
1075 RecIndicesTy
&Indices
) {
1076 const CGRecordLayout
&Layout
= CGF
.CGM
.getTypes().getCGRecordLayout(RD
);
1077 int64_t FieldNo
= -1;
1078 for (const FieldDecl
*FD
: RD
->fields()) {
1079 if (!Layout
.containsFieldDecl(FD
))
1080 // This could happen if the field has a struct type that's empty. I don't
1084 FieldNo
= Layout
.getLLVMFieldNo(FD
);
1086 Indices
.emplace_back(std::make_pair(RD
, CGF
.Builder
.getInt32(FieldNo
)));
1090 QualType Ty
= FD
->getType();
1091 if (Ty
->isRecordType()) {
1092 if (getGEPIndicesToField(CGF
, Ty
->getAsRecordDecl(), Field
, Indices
)) {
1095 Indices
.emplace_back(std::make_pair(RD
, CGF
.Builder
.getInt32(FieldNo
)));
1104 /// This method is typically called in contexts where we can't generate
1105 /// side-effects, like in __builtin_dynamic_object_size. When finding
1106 /// expressions, only choose those that have either already been emitted or can
1107 /// be loaded without side-effects.
1109 /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1110 /// within the top-level struct.
1111 /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1112 llvm::Value
*CodeGenFunction::EmitCountedByFieldExpr(
1113 const Expr
*Base
, const FieldDecl
*FAMDecl
, const FieldDecl
*CountDecl
) {
1114 const RecordDecl
*RD
= CountDecl
->getParent()->getOuterLexicalRecordContext();
1116 // Find the base struct expr (i.e. p in p->a.b.c.d).
1117 const Expr
*StructBase
= StructAccessBase(RD
).Visit(Base
);
1118 if (!StructBase
|| StructBase
->HasSideEffects(getContext()))
1121 llvm::Value
*Res
= nullptr;
1122 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(StructBase
)) {
1123 Res
= EmitDeclRefLValue(DRE
).getPointer(*this);
1124 Res
= Builder
.CreateAlignedLoad(ConvertType(DRE
->getType()), Res
,
1125 getPointerAlign(), "dre.load");
1126 } else if (const MemberExpr
*ME
= dyn_cast
<MemberExpr
>(StructBase
)) {
1127 LValue LV
= EmitMemberExpr(ME
);
1128 Address Addr
= LV
.getAddress();
1129 Res
= Addr
.emitRawPointer(*this);
1130 } else if (StructBase
->getType()->isPointerType()) {
1131 LValueBaseInfo BaseInfo
;
1132 TBAAAccessInfo TBAAInfo
;
1133 Address Addr
= EmitPointerWithAlignment(StructBase
, &BaseInfo
, &TBAAInfo
);
1134 Res
= Addr
.emitRawPointer(*this);
1139 llvm::Value
*Zero
= Builder
.getInt32(0);
1140 RecIndicesTy Indices
;
1142 getGEPIndicesToField(*this, RD
, CountDecl
, Indices
);
1144 for (auto I
= Indices
.rbegin(), E
= Indices
.rend(); I
!= E
; ++I
)
1145 Res
= Builder
.CreateInBoundsGEP(
1146 ConvertType(QualType(I
->first
->getTypeForDecl(), 0)), Res
,
1147 {Zero
, I
->second
}, "..counted_by.gep");
1149 return Builder
.CreateAlignedLoad(ConvertType(CountDecl
->getType()), Res
,
1150 getIntAlign(), "..counted_by.load");
1153 const FieldDecl
*CodeGenFunction::FindCountedByField(const FieldDecl
*FD
) {
1157 const auto *CAT
= FD
->getType()->getAs
<CountAttributedType
>();
1161 const auto *CountDRE
= cast
<DeclRefExpr
>(CAT
->getCountExpr());
1162 const auto *CountDecl
= CountDRE
->getDecl();
1163 if (const auto *IFD
= dyn_cast
<IndirectFieldDecl
>(CountDecl
))
1164 CountDecl
= IFD
->getAnonField();
1166 return dyn_cast
<FieldDecl
>(CountDecl
);
1169 void CodeGenFunction::EmitBoundsCheck(const Expr
*E
, const Expr
*Base
,
1170 llvm::Value
*Index
, QualType IndexType
,
1172 assert(SanOpts
.has(SanitizerKind::ArrayBounds
) &&
1173 "should not be called unless adding bounds checks");
1174 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
=
1175 getLangOpts().getStrictFlexArraysLevel();
1176 QualType IndexedType
;
1177 llvm::Value
*Bound
=
1178 getArrayIndexingBound(*this, Base
, IndexedType
, StrictFlexArraysLevel
);
1180 EmitBoundsCheckImpl(E
, Bound
, Index
, IndexType
, IndexedType
, Accessed
);
1183 void CodeGenFunction::EmitBoundsCheckImpl(const Expr
*E
, llvm::Value
*Bound
,
1186 QualType IndexedType
, bool Accessed
) {
1190 SanitizerScope
SanScope(this);
1192 bool IndexSigned
= IndexType
->isSignedIntegerOrEnumerationType();
1193 llvm::Value
*IndexVal
= Builder
.CreateIntCast(Index
, SizeTy
, IndexSigned
);
1194 llvm::Value
*BoundVal
= Builder
.CreateIntCast(Bound
, SizeTy
, false);
1196 llvm::Constant
*StaticData
[] = {
1197 EmitCheckSourceLocation(E
->getExprLoc()),
1198 EmitCheckTypeDescriptor(IndexedType
),
1199 EmitCheckTypeDescriptor(IndexType
)
1201 llvm::Value
*Check
= Accessed
? Builder
.CreateICmpULT(IndexVal
, BoundVal
)
1202 : Builder
.CreateICmpULE(IndexVal
, BoundVal
);
1203 EmitCheck(std::make_pair(Check
, SanitizerKind::ArrayBounds
),
1204 SanitizerHandler::OutOfBounds
, StaticData
, Index
);
1207 CodeGenFunction::ComplexPairTy
CodeGenFunction::
1208 EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
1209 bool isInc
, bool isPre
) {
1210 ComplexPairTy InVal
= EmitLoadOfComplex(LV
, E
->getExprLoc());
1212 llvm::Value
*NextVal
;
1213 if (isa
<llvm::IntegerType
>(InVal
.first
->getType())) {
1214 uint64_t AmountVal
= isInc
? 1 : -1;
1215 NextVal
= llvm::ConstantInt::get(InVal
.first
->getType(), AmountVal
, true);
1217 // Add the inc/dec to the real part.
1218 NextVal
= Builder
.CreateAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1220 QualType ElemTy
= E
->getType()->castAs
<ComplexType
>()->getElementType();
1221 llvm::APFloat
FVal(getContext().getFloatTypeSemantics(ElemTy
), 1);
1224 NextVal
= llvm::ConstantFP::get(getLLVMContext(), FVal
);
1226 // Add the inc/dec to the real part.
1227 NextVal
= Builder
.CreateFAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1230 ComplexPairTy
IncVal(NextVal
, InVal
.second
);
1232 // Store the updated result through the lvalue.
1233 EmitStoreOfComplex(IncVal
, LV
, /*init*/ false);
1234 if (getLangOpts().OpenMP
)
1235 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1238 // If this is a postinc, return the value read from memory, otherwise use the
1240 return isPre
? IncVal
: InVal
;
1243 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr
*E
,
1244 CodeGenFunction
*CGF
) {
1245 // Bind VLAs in the cast type.
1246 if (CGF
&& E
->getType()->isVariablyModifiedType())
1247 CGF
->EmitVariablyModifiedType(E
->getType());
1249 if (CGDebugInfo
*DI
= getModuleDebugInfo())
1250 DI
->EmitExplicitCastType(E
->getType());
1253 //===----------------------------------------------------------------------===//
1254 // LValue Expression Emission
1255 //===----------------------------------------------------------------------===//
1257 static Address
EmitPointerWithAlignment(const Expr
*E
, LValueBaseInfo
*BaseInfo
,
1258 TBAAAccessInfo
*TBAAInfo
,
1259 KnownNonNull_t IsKnownNonNull
,
1260 CodeGenFunction
&CGF
) {
1261 // We allow this with ObjC object pointers because of fragile ABIs.
1262 assert(E
->getType()->isPointerType() ||
1263 E
->getType()->isObjCObjectPointerType());
1264 E
= E
->IgnoreParens();
1267 if (const CastExpr
*CE
= dyn_cast
<CastExpr
>(E
)) {
1268 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(CE
))
1269 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
1271 switch (CE
->getCastKind()) {
1272 // Non-converting casts (but not C's implicit conversion from void*).
1275 case CK_AddressSpaceConversion
:
1276 if (auto PtrTy
= CE
->getSubExpr()->getType()->getAs
<PointerType
>()) {
1277 if (PtrTy
->getPointeeType()->isVoidType())
1280 LValueBaseInfo InnerBaseInfo
;
1281 TBAAAccessInfo InnerTBAAInfo
;
1282 Address Addr
= CGF
.EmitPointerWithAlignment(
1283 CE
->getSubExpr(), &InnerBaseInfo
, &InnerTBAAInfo
, IsKnownNonNull
);
1284 if (BaseInfo
) *BaseInfo
= InnerBaseInfo
;
1285 if (TBAAInfo
) *TBAAInfo
= InnerTBAAInfo
;
1287 if (isa
<ExplicitCastExpr
>(CE
)) {
1288 LValueBaseInfo TargetTypeBaseInfo
;
1289 TBAAAccessInfo TargetTypeTBAAInfo
;
1290 CharUnits Align
= CGF
.CGM
.getNaturalPointeeTypeAlignment(
1291 E
->getType(), &TargetTypeBaseInfo
, &TargetTypeTBAAInfo
);
1294 CGF
.CGM
.mergeTBAAInfoForCast(*TBAAInfo
, TargetTypeTBAAInfo
);
1295 // If the source l-value is opaque, honor the alignment of the
1297 if (InnerBaseInfo
.getAlignmentSource() != AlignmentSource::Decl
) {
1299 BaseInfo
->mergeForCast(TargetTypeBaseInfo
);
1300 Addr
.setAlignment(Align
);
1304 if (CGF
.SanOpts
.has(SanitizerKind::CFIUnrelatedCast
) &&
1305 CE
->getCastKind() == CK_BitCast
) {
1306 if (auto PT
= E
->getType()->getAs
<PointerType
>())
1307 CGF
.EmitVTablePtrCheckForCast(PT
->getPointeeType(), Addr
,
1309 CodeGenFunction::CFITCK_UnrelatedCast
,
1313 llvm::Type
*ElemTy
=
1314 CGF
.ConvertTypeForMem(E
->getType()->getPointeeType());
1315 Addr
= Addr
.withElementType(ElemTy
);
1316 if (CE
->getCastKind() == CK_AddressSpaceConversion
)
1317 Addr
= CGF
.Builder
.CreateAddrSpaceCast(
1318 Addr
, CGF
.ConvertType(E
->getType()), ElemTy
);
1319 return CGF
.authPointerToPointerCast(Addr
, CE
->getSubExpr()->getType(),
1324 // Array-to-pointer decay.
1325 case CK_ArrayToPointerDecay
:
1326 return CGF
.EmitArrayToPointerDecay(CE
->getSubExpr(), BaseInfo
, TBAAInfo
);
1328 // Derived-to-base conversions.
1329 case CK_UncheckedDerivedToBase
:
1330 case CK_DerivedToBase
: {
1331 // TODO: Support accesses to members of base classes in TBAA. For now, we
1332 // conservatively pretend that the complete object is of the base class
1335 *TBAAInfo
= CGF
.CGM
.getTBAAAccessInfo(E
->getType());
1336 Address Addr
= CGF
.EmitPointerWithAlignment(
1337 CE
->getSubExpr(), BaseInfo
, nullptr,
1338 (KnownNonNull_t
)(IsKnownNonNull
||
1339 CE
->getCastKind() == CK_UncheckedDerivedToBase
));
1340 auto Derived
= CE
->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1341 return CGF
.GetAddressOfBaseClass(
1342 Addr
, Derived
, CE
->path_begin(), CE
->path_end(),
1343 CGF
.ShouldNullCheckClassCastValue(CE
), CE
->getExprLoc());
1346 // TODO: Is there any reason to treat base-to-derived conversions
1354 if (const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(E
)) {
1355 if (UO
->getOpcode() == UO_AddrOf
) {
1356 LValue LV
= CGF
.EmitLValue(UO
->getSubExpr(), IsKnownNonNull
);
1357 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1358 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1359 return LV
.getAddress();
1363 // std::addressof and variants.
1364 if (auto *Call
= dyn_cast
<CallExpr
>(E
)) {
1365 switch (Call
->getBuiltinCallee()) {
1368 case Builtin::BIaddressof
:
1369 case Builtin::BI__addressof
:
1370 case Builtin::BI__builtin_addressof
: {
1371 LValue LV
= CGF
.EmitLValue(Call
->getArg(0), IsKnownNonNull
);
1372 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1373 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1374 return LV
.getAddress();
1379 // TODO: conditional operators, comma.
1381 // Otherwise, use the alignment of the type.
1382 return CGF
.makeNaturalAddressForPointer(
1383 CGF
.EmitScalarExpr(E
), E
->getType()->getPointeeType(), CharUnits(),
1384 /*ForPointeeType=*/true, BaseInfo
, TBAAInfo
, IsKnownNonNull
);
1387 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1388 /// derive a more accurate bound on the alignment of the pointer.
1389 Address
CodeGenFunction::EmitPointerWithAlignment(
1390 const Expr
*E
, LValueBaseInfo
*BaseInfo
, TBAAAccessInfo
*TBAAInfo
,
1391 KnownNonNull_t IsKnownNonNull
) {
1393 ::EmitPointerWithAlignment(E
, BaseInfo
, TBAAInfo
, IsKnownNonNull
, *this);
1394 if (IsKnownNonNull
&& !Addr
.isKnownNonNull())
1395 Addr
.setKnownNonNull();
1399 llvm::Value
*CodeGenFunction::EmitNonNullRValueCheck(RValue RV
, QualType T
) {
1400 llvm::Value
*V
= RV
.getScalarVal();
1401 if (auto MPT
= T
->getAs
<MemberPointerType
>())
1402 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, V
, MPT
);
1403 return Builder
.CreateICmpNE(V
, llvm::Constant::getNullValue(V
->getType()));
1406 RValue
CodeGenFunction::GetUndefRValue(QualType Ty
) {
1407 if (Ty
->isVoidType())
1408 return RValue::get(nullptr);
1410 switch (getEvaluationKind(Ty
)) {
1413 ConvertType(Ty
->castAs
<ComplexType
>()->getElementType());
1414 llvm::Value
*U
= llvm::UndefValue::get(EltTy
);
1415 return RValue::getComplex(std::make_pair(U
, U
));
1418 // If this is a use of an undefined aggregate type, the aggregate must have an
1419 // identifiable address. Just because the contents of the value are undefined
1420 // doesn't mean that the address can't be taken and compared.
1421 case TEK_Aggregate
: {
1422 Address DestPtr
= CreateMemTemp(Ty
, "undef.agg.tmp");
1423 return RValue::getAggregate(DestPtr
);
1427 return RValue::get(llvm::UndefValue::get(ConvertType(Ty
)));
1429 llvm_unreachable("bad evaluation kind");
1432 RValue
CodeGenFunction::EmitUnsupportedRValue(const Expr
*E
,
1434 ErrorUnsupported(E
, Name
);
1435 return GetUndefRValue(E
->getType());
1438 LValue
CodeGenFunction::EmitUnsupportedLValue(const Expr
*E
,
1440 ErrorUnsupported(E
, Name
);
1441 llvm::Type
*ElTy
= ConvertType(E
->getType());
1442 llvm::Type
*Ty
= UnqualPtrTy
;
1443 return MakeAddrLValue(
1444 Address(llvm::UndefValue::get(Ty
), ElTy
, CharUnits::One()), E
->getType());
1447 bool CodeGenFunction::IsWrappedCXXThis(const Expr
*Obj
) {
1448 const Expr
*Base
= Obj
;
1449 while (!isa
<CXXThisExpr
>(Base
)) {
1450 // The result of a dynamic_cast can be null.
1451 if (isa
<CXXDynamicCastExpr
>(Base
))
1454 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
1455 Base
= CE
->getSubExpr();
1456 } else if (const auto *PE
= dyn_cast
<ParenExpr
>(Base
)) {
1457 Base
= PE
->getSubExpr();
1458 } else if (const auto *UO
= dyn_cast
<UnaryOperator
>(Base
)) {
1459 if (UO
->getOpcode() == UO_Extension
)
1460 Base
= UO
->getSubExpr();
1470 LValue
CodeGenFunction::EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
) {
1472 if (SanOpts
.has(SanitizerKind::ArrayBounds
) && isa
<ArraySubscriptExpr
>(E
))
1473 LV
= EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
), /*Accessed*/true);
1476 if (!isa
<DeclRefExpr
>(E
) && !LV
.isBitField() && LV
.isSimple()) {
1477 SanitizerSet SkippedChecks
;
1478 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
)) {
1479 bool IsBaseCXXThis
= IsWrappedCXXThis(ME
->getBase());
1481 SkippedChecks
.set(SanitizerKind::Alignment
, true);
1482 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(ME
->getBase()))
1483 SkippedChecks
.set(SanitizerKind::Null
, true);
1485 EmitTypeCheck(TCK
, E
->getExprLoc(), LV
, E
->getType(), SkippedChecks
);
1490 /// EmitLValue - Emit code to compute a designator that specifies the location
1491 /// of the expression.
1493 /// This can return one of two things: a simple address or a bitfield reference.
1494 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1495 /// an LLVM pointer type.
1497 /// If this returns a bitfield reference, nothing about the pointee type of the
1498 /// LLVM value is known: For example, it may not be a pointer to an integer.
1500 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1501 /// this method guarantees that the returned pointer type will point to an LLVM
1502 /// type of the same size of the lvalue's type. If the lvalue has a variable
1503 /// length type, this is not possible.
1505 LValue
CodeGenFunction::EmitLValue(const Expr
*E
,
1506 KnownNonNull_t IsKnownNonNull
) {
1507 LValue LV
= EmitLValueHelper(E
, IsKnownNonNull
);
1508 if (IsKnownNonNull
&& !LV
.isKnownNonNull())
1509 LV
.setKnownNonNull();
1513 static QualType
getConstantExprReferredType(const FullExpr
*E
,
1514 const ASTContext
&Ctx
) {
1515 const Expr
*SE
= E
->getSubExpr()->IgnoreImplicit();
1516 if (isa
<OpaqueValueExpr
>(SE
))
1517 return SE
->getType();
1518 return cast
<CallExpr
>(SE
)->getCallReturnType(Ctx
)->getPointeeType();
1521 LValue
CodeGenFunction::EmitLValueHelper(const Expr
*E
,
1522 KnownNonNull_t IsKnownNonNull
) {
1523 ApplyDebugLocation
DL(*this, E
);
1524 switch (E
->getStmtClass()) {
1525 default: return EmitUnsupportedLValue(E
, "l-value expression");
1527 case Expr::ObjCPropertyRefExprClass
:
1528 llvm_unreachable("cannot emit a property reference directly");
1530 case Expr::ObjCSelectorExprClass
:
1531 return EmitObjCSelectorLValue(cast
<ObjCSelectorExpr
>(E
));
1532 case Expr::ObjCIsaExprClass
:
1533 return EmitObjCIsaExpr(cast
<ObjCIsaExpr
>(E
));
1534 case Expr::BinaryOperatorClass
:
1535 return EmitBinaryOperatorLValue(cast
<BinaryOperator
>(E
));
1536 case Expr::CompoundAssignOperatorClass
: {
1537 QualType Ty
= E
->getType();
1538 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1539 Ty
= AT
->getValueType();
1540 if (!Ty
->isAnyComplexType())
1541 return EmitCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1542 return EmitComplexCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1544 case Expr::CallExprClass
:
1545 case Expr::CXXMemberCallExprClass
:
1546 case Expr::CXXOperatorCallExprClass
:
1547 case Expr::UserDefinedLiteralClass
:
1548 return EmitCallExprLValue(cast
<CallExpr
>(E
));
1549 case Expr::CXXRewrittenBinaryOperatorClass
:
1550 return EmitLValue(cast
<CXXRewrittenBinaryOperator
>(E
)->getSemanticForm(),
1552 case Expr::VAArgExprClass
:
1553 return EmitVAArgExprLValue(cast
<VAArgExpr
>(E
));
1554 case Expr::DeclRefExprClass
:
1555 return EmitDeclRefLValue(cast
<DeclRefExpr
>(E
));
1556 case Expr::ConstantExprClass
: {
1557 const ConstantExpr
*CE
= cast
<ConstantExpr
>(E
);
1558 if (llvm::Value
*Result
= ConstantEmitter(*this).tryEmitConstantExpr(CE
)) {
1559 QualType RetType
= getConstantExprReferredType(CE
, getContext());
1560 return MakeNaturalAlignAddrLValue(Result
, RetType
);
1562 return EmitLValue(cast
<ConstantExpr
>(E
)->getSubExpr(), IsKnownNonNull
);
1564 case Expr::ParenExprClass
:
1565 return EmitLValue(cast
<ParenExpr
>(E
)->getSubExpr(), IsKnownNonNull
);
1566 case Expr::GenericSelectionExprClass
:
1567 return EmitLValue(cast
<GenericSelectionExpr
>(E
)->getResultExpr(),
1569 case Expr::PredefinedExprClass
:
1570 return EmitPredefinedLValue(cast
<PredefinedExpr
>(E
));
1571 case Expr::StringLiteralClass
:
1572 return EmitStringLiteralLValue(cast
<StringLiteral
>(E
));
1573 case Expr::ObjCEncodeExprClass
:
1574 return EmitObjCEncodeExprLValue(cast
<ObjCEncodeExpr
>(E
));
1575 case Expr::PseudoObjectExprClass
:
1576 return EmitPseudoObjectLValue(cast
<PseudoObjectExpr
>(E
));
1577 case Expr::InitListExprClass
:
1578 return EmitInitListLValue(cast
<InitListExpr
>(E
));
1579 case Expr::CXXTemporaryObjectExprClass
:
1580 case Expr::CXXConstructExprClass
:
1581 return EmitCXXConstructLValue(cast
<CXXConstructExpr
>(E
));
1582 case Expr::CXXBindTemporaryExprClass
:
1583 return EmitCXXBindTemporaryLValue(cast
<CXXBindTemporaryExpr
>(E
));
1584 case Expr::CXXUuidofExprClass
:
1585 return EmitCXXUuidofLValue(cast
<CXXUuidofExpr
>(E
));
1586 case Expr::LambdaExprClass
:
1587 return EmitAggExprToLValue(E
);
1589 case Expr::ExprWithCleanupsClass
: {
1590 const auto *cleanups
= cast
<ExprWithCleanups
>(E
);
1591 RunCleanupsScope
Scope(*this);
1592 LValue LV
= EmitLValue(cleanups
->getSubExpr(), IsKnownNonNull
);
1593 if (LV
.isSimple()) {
1594 // Defend against branches out of gnu statement expressions surrounded by
1596 Address Addr
= LV
.getAddress();
1597 llvm::Value
*V
= Addr
.getBasePointer();
1598 Scope
.ForceCleanup({&V
});
1599 Addr
.replaceBasePointer(V
);
1600 return LValue::MakeAddr(Addr
, LV
.getType(), getContext(),
1601 LV
.getBaseInfo(), LV
.getTBAAInfo());
1603 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1604 // bitfield lvalue or some other non-simple lvalue?
1608 case Expr::CXXDefaultArgExprClass
: {
1609 auto *DAE
= cast
<CXXDefaultArgExpr
>(E
);
1610 CXXDefaultArgExprScope
Scope(*this, DAE
);
1611 return EmitLValue(DAE
->getExpr(), IsKnownNonNull
);
1613 case Expr::CXXDefaultInitExprClass
: {
1614 auto *DIE
= cast
<CXXDefaultInitExpr
>(E
);
1615 CXXDefaultInitExprScope
Scope(*this, DIE
);
1616 return EmitLValue(DIE
->getExpr(), IsKnownNonNull
);
1618 case Expr::CXXTypeidExprClass
:
1619 return EmitCXXTypeidLValue(cast
<CXXTypeidExpr
>(E
));
1621 case Expr::ObjCMessageExprClass
:
1622 return EmitObjCMessageExprLValue(cast
<ObjCMessageExpr
>(E
));
1623 case Expr::ObjCIvarRefExprClass
:
1624 return EmitObjCIvarRefLValue(cast
<ObjCIvarRefExpr
>(E
));
1625 case Expr::StmtExprClass
:
1626 return EmitStmtExprLValue(cast
<StmtExpr
>(E
));
1627 case Expr::UnaryOperatorClass
:
1628 return EmitUnaryOpLValue(cast
<UnaryOperator
>(E
));
1629 case Expr::ArraySubscriptExprClass
:
1630 return EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
));
1631 case Expr::MatrixSubscriptExprClass
:
1632 return EmitMatrixSubscriptExpr(cast
<MatrixSubscriptExpr
>(E
));
1633 case Expr::ArraySectionExprClass
:
1634 return EmitArraySectionExpr(cast
<ArraySectionExpr
>(E
));
1635 case Expr::ExtVectorElementExprClass
:
1636 return EmitExtVectorElementExpr(cast
<ExtVectorElementExpr
>(E
));
1637 case Expr::CXXThisExprClass
:
1638 return MakeAddrLValue(LoadCXXThisAddress(), E
->getType());
1639 case Expr::MemberExprClass
:
1640 return EmitMemberExpr(cast
<MemberExpr
>(E
));
1641 case Expr::CompoundLiteralExprClass
:
1642 return EmitCompoundLiteralLValue(cast
<CompoundLiteralExpr
>(E
));
1643 case Expr::ConditionalOperatorClass
:
1644 return EmitConditionalOperatorLValue(cast
<ConditionalOperator
>(E
));
1645 case Expr::BinaryConditionalOperatorClass
:
1646 return EmitConditionalOperatorLValue(cast
<BinaryConditionalOperator
>(E
));
1647 case Expr::ChooseExprClass
:
1648 return EmitLValue(cast
<ChooseExpr
>(E
)->getChosenSubExpr(), IsKnownNonNull
);
1649 case Expr::OpaqueValueExprClass
:
1650 return EmitOpaqueValueLValue(cast
<OpaqueValueExpr
>(E
));
1651 case Expr::SubstNonTypeTemplateParmExprClass
:
1652 return EmitLValue(cast
<SubstNonTypeTemplateParmExpr
>(E
)->getReplacement(),
1654 case Expr::ImplicitCastExprClass
:
1655 case Expr::CStyleCastExprClass
:
1656 case Expr::CXXFunctionalCastExprClass
:
1657 case Expr::CXXStaticCastExprClass
:
1658 case Expr::CXXDynamicCastExprClass
:
1659 case Expr::CXXReinterpretCastExprClass
:
1660 case Expr::CXXConstCastExprClass
:
1661 case Expr::CXXAddrspaceCastExprClass
:
1662 case Expr::ObjCBridgedCastExprClass
:
1663 return EmitCastLValue(cast
<CastExpr
>(E
));
1665 case Expr::MaterializeTemporaryExprClass
:
1666 return EmitMaterializeTemporaryExpr(cast
<MaterializeTemporaryExpr
>(E
));
1668 case Expr::CoawaitExprClass
:
1669 return EmitCoawaitLValue(cast
<CoawaitExpr
>(E
));
1670 case Expr::CoyieldExprClass
:
1671 return EmitCoyieldLValue(cast
<CoyieldExpr
>(E
));
1672 case Expr::PackIndexingExprClass
:
1673 return EmitLValue(cast
<PackIndexingExpr
>(E
)->getSelectedExpr());
1677 /// Given an object of the given canonical type, can we safely copy a
1678 /// value out of it based on its initializer?
1679 static bool isConstantEmittableObjectType(QualType type
) {
1680 assert(type
.isCanonical());
1681 assert(!type
->isReferenceType());
1683 // Must be const-qualified but non-volatile.
1684 Qualifiers qs
= type
.getLocalQualifiers();
1685 if (!qs
.hasConst() || qs
.hasVolatile()) return false;
1687 // Otherwise, all object types satisfy this except C++ classes with
1688 // mutable subobjects or non-trivial copy/destroy behavior.
1689 if (const auto *RT
= dyn_cast
<RecordType
>(type
))
1690 if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl()))
1691 if (RD
->hasMutableFields() || !RD
->isTrivial())
1697 /// Can we constant-emit a load of a reference to a variable of the
1698 /// given type? This is different from predicates like
1699 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1700 /// in situations that don't necessarily satisfy the language's rules
1701 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1702 /// to do this with const float variables even if those variables
1703 /// aren't marked 'constexpr'.
1704 enum ConstantEmissionKind
{
1706 CEK_AsReferenceOnly
,
1707 CEK_AsValueOrReference
,
1710 static ConstantEmissionKind
checkVarTypeForConstantEmission(QualType type
) {
1711 type
= type
.getCanonicalType();
1712 if (const auto *ref
= dyn_cast
<ReferenceType
>(type
)) {
1713 if (isConstantEmittableObjectType(ref
->getPointeeType()))
1714 return CEK_AsValueOrReference
;
1715 return CEK_AsReferenceOnly
;
1717 if (isConstantEmittableObjectType(type
))
1718 return CEK_AsValueOnly
;
1722 /// Try to emit a reference to the given value without producing it as
1723 /// an l-value. This is just an optimization, but it avoids us needing
1724 /// to emit global copies of variables if they're named without triggering
1725 /// a formal use in a context where we can't emit a direct reference to them,
1726 /// for instance if a block or lambda or a member of a local class uses a
1727 /// const int variable or constexpr variable from an enclosing function.
1728 CodeGenFunction::ConstantEmission
1729 CodeGenFunction::tryEmitAsConstant(DeclRefExpr
*refExpr
) {
1730 ValueDecl
*value
= refExpr
->getDecl();
1732 // The value needs to be an enum constant or a constant variable.
1733 ConstantEmissionKind CEK
;
1734 if (isa
<ParmVarDecl
>(value
)) {
1736 } else if (auto *var
= dyn_cast
<VarDecl
>(value
)) {
1737 CEK
= checkVarTypeForConstantEmission(var
->getType());
1738 } else if (isa
<EnumConstantDecl
>(value
)) {
1739 CEK
= CEK_AsValueOnly
;
1743 if (CEK
== CEK_None
) return ConstantEmission();
1745 Expr::EvalResult result
;
1746 bool resultIsReference
;
1747 QualType resultType
;
1749 // It's best to evaluate all the way as an r-value if that's permitted.
1750 if (CEK
!= CEK_AsReferenceOnly
&&
1751 refExpr
->EvaluateAsRValue(result
, getContext())) {
1752 resultIsReference
= false;
1753 resultType
= refExpr
->getType();
1755 // Otherwise, try to evaluate as an l-value.
1756 } else if (CEK
!= CEK_AsValueOnly
&&
1757 refExpr
->EvaluateAsLValue(result
, getContext())) {
1758 resultIsReference
= true;
1759 resultType
= value
->getType();
1763 return ConstantEmission();
1766 // In any case, if the initializer has side-effects, abandon ship.
1767 if (result
.HasSideEffects
)
1768 return ConstantEmission();
1770 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1771 // referencing a global host variable by copy. In this case the lambda should
1772 // make a copy of the value of the global host variable. The DRE of the
1773 // captured reference variable cannot be emitted as load from the host
1774 // global variable as compile time constant, since the host variable is not
1775 // accessible on device. The DRE of the captured reference variable has to be
1776 // loaded from captures.
1777 if (CGM
.getLangOpts().CUDAIsDevice
&& result
.Val
.isLValue() &&
1778 refExpr
->refersToEnclosingVariableOrCapture()) {
1779 auto *MD
= dyn_cast_or_null
<CXXMethodDecl
>(CurCodeDecl
);
1780 if (MD
&& MD
->getParent()->isLambda() &&
1781 MD
->getOverloadedOperator() == OO_Call
) {
1782 const APValue::LValueBase
&base
= result
.Val
.getLValueBase();
1783 if (const ValueDecl
*D
= base
.dyn_cast
<const ValueDecl
*>()) {
1784 if (const VarDecl
*VD
= dyn_cast
<const VarDecl
>(D
)) {
1785 if (!VD
->hasAttr
<CUDADeviceAttr
>()) {
1786 return ConstantEmission();
1793 // Emit as a constant.
1794 auto C
= ConstantEmitter(*this).emitAbstract(refExpr
->getLocation(),
1795 result
.Val
, resultType
);
1797 // Make sure we emit a debug reference to the global variable.
1798 // This should probably fire even for
1799 if (isa
<VarDecl
>(value
)) {
1800 if (!getContext().DeclMustBeEmitted(cast
<VarDecl
>(value
)))
1801 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1803 assert(isa
<EnumConstantDecl
>(value
));
1804 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1807 // If we emitted a reference constant, we need to dereference that.
1808 if (resultIsReference
)
1809 return ConstantEmission::forReference(C
);
1811 return ConstantEmission::forValue(C
);
1814 static DeclRefExpr
*tryToConvertMemberExprToDeclRefExpr(CodeGenFunction
&CGF
,
1815 const MemberExpr
*ME
) {
1816 if (auto *VD
= dyn_cast
<VarDecl
>(ME
->getMemberDecl())) {
1817 // Try to emit static variable member expressions as DREs.
1818 return DeclRefExpr::Create(
1819 CGF
.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD
,
1820 /*RefersToEnclosingVariableOrCapture=*/false, ME
->getExprLoc(),
1821 ME
->getType(), ME
->getValueKind(), nullptr, nullptr, ME
->isNonOdrUse());
1826 CodeGenFunction::ConstantEmission
1827 CodeGenFunction::tryEmitAsConstant(const MemberExpr
*ME
) {
1828 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, ME
))
1829 return tryEmitAsConstant(DRE
);
1830 return ConstantEmission();
1833 llvm::Value
*CodeGenFunction::emitScalarConstant(
1834 const CodeGenFunction::ConstantEmission
&Constant
, Expr
*E
) {
1835 assert(Constant
&& "not a constant");
1836 if (Constant
.isReference())
1837 return EmitLoadOfLValue(Constant
.getReferenceLValue(*this, E
),
1840 return Constant
.getValue();
1843 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(LValue lvalue
,
1844 SourceLocation Loc
) {
1845 return EmitLoadOfScalar(lvalue
.getAddress(), lvalue
.isVolatile(),
1846 lvalue
.getType(), Loc
, lvalue
.getBaseInfo(),
1847 lvalue
.getTBAAInfo(), lvalue
.isNontemporal());
1850 static bool hasBooleanRepresentation(QualType Ty
) {
1851 if (Ty
->isBooleanType())
1854 if (const EnumType
*ET
= Ty
->getAs
<EnumType
>())
1855 return ET
->getDecl()->getIntegerType()->isBooleanType();
1857 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1858 return hasBooleanRepresentation(AT
->getValueType());
1863 static bool getRangeForType(CodeGenFunction
&CGF
, QualType Ty
,
1864 llvm::APInt
&Min
, llvm::APInt
&End
,
1865 bool StrictEnums
, bool IsBool
) {
1866 const EnumType
*ET
= Ty
->getAs
<EnumType
>();
1867 bool IsRegularCPlusPlusEnum
= CGF
.getLangOpts().CPlusPlus
&& StrictEnums
&&
1868 ET
&& !ET
->getDecl()->isFixed();
1869 if (!IsBool
&& !IsRegularCPlusPlusEnum
)
1873 Min
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 0);
1874 End
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 2);
1876 const EnumDecl
*ED
= ET
->getDecl();
1877 ED
->getValueRange(End
, Min
);
1882 llvm::MDNode
*CodeGenFunction::getRangeForLoadFromType(QualType Ty
) {
1883 llvm::APInt Min
, End
;
1884 if (!getRangeForType(*this, Ty
, Min
, End
, CGM
.getCodeGenOpts().StrictEnums
,
1885 hasBooleanRepresentation(Ty
)))
1888 llvm::MDBuilder
MDHelper(getLLVMContext());
1889 return MDHelper
.createRange(Min
, End
);
1892 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
1893 SourceLocation Loc
) {
1894 bool HasBoolCheck
= SanOpts
.has(SanitizerKind::Bool
);
1895 bool HasEnumCheck
= SanOpts
.has(SanitizerKind::Enum
);
1896 if (!HasBoolCheck
&& !HasEnumCheck
)
1899 bool IsBool
= hasBooleanRepresentation(Ty
) ||
1900 NSAPI(CGM
.getContext()).isObjCBOOLType(Ty
);
1901 bool NeedsBoolCheck
= HasBoolCheck
&& IsBool
;
1902 bool NeedsEnumCheck
= HasEnumCheck
&& Ty
->getAs
<EnumType
>();
1903 if (!NeedsBoolCheck
&& !NeedsEnumCheck
)
1906 // Single-bit booleans don't need to be checked. Special-case this to avoid
1907 // a bit width mismatch when handling bitfield values. This is handled by
1908 // EmitFromMemory for the non-bitfield case.
1910 cast
<llvm::IntegerType
>(Value
->getType())->getBitWidth() == 1)
1913 llvm::APInt Min
, End
;
1914 if (!getRangeForType(*this, Ty
, Min
, End
, /*StrictEnums=*/true, IsBool
))
1917 auto &Ctx
= getLLVMContext();
1918 SanitizerScope
SanScope(this);
1922 Check
= Builder
.CreateICmpULE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1924 llvm::Value
*Upper
=
1925 Builder
.CreateICmpSLE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1926 llvm::Value
*Lower
=
1927 Builder
.CreateICmpSGE(Value
, llvm::ConstantInt::get(Ctx
, Min
));
1928 Check
= Builder
.CreateAnd(Upper
, Lower
);
1930 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
),
1931 EmitCheckTypeDescriptor(Ty
)};
1932 SanitizerMask Kind
=
1933 NeedsEnumCheck
? SanitizerKind::Enum
: SanitizerKind::Bool
;
1934 EmitCheck(std::make_pair(Check
, Kind
), SanitizerHandler::LoadInvalidValue
,
1935 StaticArgs
, EmitCheckValue(Value
));
1939 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(Address Addr
, bool Volatile
,
1942 LValueBaseInfo BaseInfo
,
1943 TBAAAccessInfo TBAAInfo
,
1944 bool isNontemporal
) {
1945 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getBasePointer()))
1946 if (GV
->isThreadLocal())
1947 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
),
1950 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1951 // Boolean vectors use `iN` as storage type.
1952 if (ClangVecTy
->isExtVectorBoolType()) {
1953 llvm::Type
*ValTy
= ConvertType(Ty
);
1954 unsigned ValNumElems
=
1955 cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1956 // Load the `iP` storage object (P is the padded vector size).
1957 auto *RawIntV
= Builder
.CreateLoad(Addr
, Volatile
, "load_bits");
1958 const auto *RawIntTy
= RawIntV
->getType();
1959 assert(RawIntTy
->isIntegerTy() && "compressed iN storage for bitvectors");
1960 // Bitcast iP --> <P x i1>.
1961 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1962 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1963 llvm::Value
*V
= Builder
.CreateBitCast(RawIntV
, PaddedVecTy
);
1964 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1965 V
= emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1967 return EmitFromMemory(V
, Ty
);
1970 // Handle vectors of size 3 like size 4 for better performance.
1971 const llvm::Type
*EltTy
= Addr
.getElementType();
1972 const auto *VTy
= cast
<llvm::FixedVectorType
>(EltTy
);
1974 if (!CGM
.getCodeGenOpts().PreserveVec3Type
&& VTy
->getNumElements() == 3) {
1976 llvm::VectorType
*vec4Ty
=
1977 llvm::FixedVectorType::get(VTy
->getElementType(), 4);
1978 Address Cast
= Addr
.withElementType(vec4Ty
);
1980 llvm::Value
*V
= Builder
.CreateLoad(Cast
, Volatile
, "loadVec4");
1982 // Shuffle vector to get vec3.
1983 V
= Builder
.CreateShuffleVector(V
, ArrayRef
<int>{0, 1, 2}, "extractVec");
1984 return EmitFromMemory(V
, Ty
);
1988 // Atomic operations have to be done on integral types.
1989 LValue AtomicLValue
=
1990 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1991 if (Ty
->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue
)) {
1992 return EmitAtomicLoad(AtomicLValue
, Loc
).getScalarVal();
1996 Addr
.withElementType(convertTypeForLoadStore(Ty
, Addr
.getElementType()));
1998 llvm::LoadInst
*Load
= Builder
.CreateLoad(Addr
, Volatile
);
1999 if (isNontemporal
) {
2000 llvm::MDNode
*Node
= llvm::MDNode::get(
2001 Load
->getContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
2002 Load
->setMetadata(llvm::LLVMContext::MD_nontemporal
, Node
);
2005 CGM
.DecorateInstructionWithTBAA(Load
, TBAAInfo
);
2007 if (EmitScalarRangeCheck(Load
, Ty
, Loc
)) {
2008 // In order to prevent the optimizer from throwing away the check, don't
2009 // attach range metadata to the load.
2010 } else if (CGM
.getCodeGenOpts().OptimizationLevel
> 0)
2011 if (llvm::MDNode
*RangeInfo
= getRangeForLoadFromType(Ty
)) {
2012 Load
->setMetadata(llvm::LLVMContext::MD_range
, RangeInfo
);
2013 Load
->setMetadata(llvm::LLVMContext::MD_noundef
,
2014 llvm::MDNode::get(getLLVMContext(), std::nullopt
));
2017 return EmitFromMemory(Load
, Ty
);
2020 /// Converts a scalar value from its primary IR type (as returned
2021 /// by ConvertType) to its load/store type (as returned by
2022 /// convertTypeForLoadStore).
2023 llvm::Value
*CodeGenFunction::EmitToMemory(llvm::Value
*Value
, QualType Ty
) {
2024 if (hasBooleanRepresentation(Ty
) || Ty
->isBitIntType()) {
2025 llvm::Type
*StoreTy
= convertTypeForLoadStore(Ty
, Value
->getType());
2026 bool Signed
= Ty
->isSignedIntegerOrEnumerationType();
2027 return Builder
.CreateIntCast(Value
, StoreTy
, Signed
, "storedv");
2030 if (Ty
->isExtVectorBoolType()) {
2031 llvm::Type
*StoreTy
= convertTypeForLoadStore(Ty
, Value
->getType());
2032 // Expand to the memory bit width.
2033 unsigned MemNumElems
= StoreTy
->getPrimitiveSizeInBits();
2034 // <N x i1> --> <P x i1>.
2035 Value
= emitBoolVecConversion(Value
, MemNumElems
, "insertvec");
2037 Value
= Builder
.CreateBitCast(Value
, StoreTy
);
2043 /// Converts a scalar value from its load/store type (as returned
2044 /// by convertTypeForLoadStore) to its primary IR type (as returned
2045 /// by ConvertType).
2046 llvm::Value
*CodeGenFunction::EmitFromMemory(llvm::Value
*Value
, QualType Ty
) {
2047 if (Ty
->isExtVectorBoolType()) {
2048 const auto *RawIntTy
= Value
->getType();
2049 // Bitcast iP --> <P x i1>.
2050 auto *PaddedVecTy
= llvm::FixedVectorType::get(
2051 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
2052 auto *V
= Builder
.CreateBitCast(Value
, PaddedVecTy
);
2053 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2054 llvm::Type
*ValTy
= ConvertType(Ty
);
2055 unsigned ValNumElems
= cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
2056 return emitBoolVecConversion(V
, ValNumElems
, "extractvec");
2059 if (hasBooleanRepresentation(Ty
) || Ty
->isBitIntType()) {
2060 llvm::Type
*ResTy
= ConvertType(Ty
);
2061 return Builder
.CreateTrunc(Value
, ResTy
, "loadedv");
2067 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
2068 // MatrixType), if it points to a array (the memory type of MatrixType).
2069 static RawAddress
MaybeConvertMatrixAddress(RawAddress Addr
,
2070 CodeGenFunction
&CGF
,
2071 bool IsVector
= true) {
2072 auto *ArrayTy
= dyn_cast
<llvm::ArrayType
>(Addr
.getElementType());
2073 if (ArrayTy
&& IsVector
) {
2074 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
2075 ArrayTy
->getNumElements());
2077 return Addr
.withElementType(VectorTy
);
2079 auto *VectorTy
= dyn_cast
<llvm::VectorType
>(Addr
.getElementType());
2080 if (VectorTy
&& !IsVector
) {
2081 auto *ArrayTy
= llvm::ArrayType::get(
2082 VectorTy
->getElementType(),
2083 cast
<llvm::FixedVectorType
>(VectorTy
)->getNumElements());
2085 return Addr
.withElementType(ArrayTy
);
2091 // Emit a store of a matrix LValue. This may require casting the original
2092 // pointer to memory address (ArrayType) to a pointer to the value type
2094 static void EmitStoreOfMatrixScalar(llvm::Value
*value
, LValue lvalue
,
2095 bool isInit
, CodeGenFunction
&CGF
) {
2096 Address Addr
= MaybeConvertMatrixAddress(lvalue
.getAddress(), CGF
,
2097 value
->getType()->isVectorTy());
2098 CGF
.EmitStoreOfScalar(value
, Addr
, lvalue
.isVolatile(), lvalue
.getType(),
2099 lvalue
.getBaseInfo(), lvalue
.getTBAAInfo(), isInit
,
2100 lvalue
.isNontemporal());
2103 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
2104 bool Volatile
, QualType Ty
,
2105 LValueBaseInfo BaseInfo
,
2106 TBAAAccessInfo TBAAInfo
,
2107 bool isInit
, bool isNontemporal
) {
2108 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getBasePointer()))
2109 if (GV
->isThreadLocal())
2110 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
),
2113 llvm::Type
*SrcTy
= Value
->getType();
2114 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
2115 auto *VecTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
);
2116 if (!CGM
.getCodeGenOpts().PreserveVec3Type
) {
2117 // Handle vec3 special.
2118 if (VecTy
&& !ClangVecTy
->isExtVectorBoolType() &&
2119 cast
<llvm::FixedVectorType
>(VecTy
)->getNumElements() == 3) {
2120 // Our source is a vec3, do a shuffle vector to make it a vec4.
2121 Value
= Builder
.CreateShuffleVector(Value
, ArrayRef
<int>{0, 1, 2, -1},
2123 SrcTy
= llvm::FixedVectorType::get(VecTy
->getElementType(), 4);
2125 if (Addr
.getElementType() != SrcTy
) {
2126 Addr
= Addr
.withElementType(SrcTy
);
2131 Value
= EmitToMemory(Value
, Ty
);
2133 LValue AtomicLValue
=
2134 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
2135 if (Ty
->isAtomicType() ||
2136 (!isInit
&& LValueIsSuitableForInlineAtomic(AtomicLValue
))) {
2137 EmitAtomicStore(RValue::get(Value
), AtomicLValue
, isInit
);
2141 llvm::StoreInst
*Store
= Builder
.CreateStore(Value
, Addr
, Volatile
);
2142 if (isNontemporal
) {
2143 llvm::MDNode
*Node
=
2144 llvm::MDNode::get(Store
->getContext(),
2145 llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
2146 Store
->setMetadata(llvm::LLVMContext::MD_nontemporal
, Node
);
2149 CGM
.DecorateInstructionWithTBAA(Store
, TBAAInfo
);
2152 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
,
2154 if (lvalue
.getType()->isConstantMatrixType()) {
2155 EmitStoreOfMatrixScalar(value
, lvalue
, isInit
, *this);
2159 EmitStoreOfScalar(value
, lvalue
.getAddress(), lvalue
.isVolatile(),
2160 lvalue
.getType(), lvalue
.getBaseInfo(),
2161 lvalue
.getTBAAInfo(), isInit
, lvalue
.isNontemporal());
2164 // Emit a load of a LValue of matrix type. This may require casting the pointer
2165 // to memory address (ArrayType) to a pointer to the value type (VectorType).
2166 static RValue
EmitLoadOfMatrixLValue(LValue LV
, SourceLocation Loc
,
2167 CodeGenFunction
&CGF
) {
2168 assert(LV
.getType()->isConstantMatrixType());
2169 Address Addr
= MaybeConvertMatrixAddress(LV
.getAddress(), CGF
);
2170 LV
.setAddress(Addr
);
2171 return RValue::get(CGF
.EmitLoadOfScalar(LV
, Loc
));
2174 RValue
CodeGenFunction::EmitLoadOfAnyValue(LValue LV
, AggValueSlot Slot
,
2175 SourceLocation Loc
) {
2176 QualType Ty
= LV
.getType();
2177 switch (getEvaluationKind(Ty
)) {
2179 return EmitLoadOfLValue(LV
, Loc
);
2181 return RValue::getComplex(EmitLoadOfComplex(LV
, Loc
));
2183 EmitAggFinalDestCopy(Ty
, Slot
, LV
, EVK_NonRValue
);
2184 return Slot
.asRValue();
2186 llvm_unreachable("bad evaluation kind");
2189 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2190 /// method emits the address of the lvalue, then loads the result as an rvalue,
2191 /// returning the rvalue.
2192 RValue
CodeGenFunction::EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
2193 if (LV
.isObjCWeak()) {
2194 // load of a __weak object.
2195 Address AddrWeakObj
= LV
.getAddress();
2196 return RValue::get(CGM
.getObjCRuntime().EmitObjCWeakRead(*this,
2199 if (LV
.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak
) {
2200 // In MRC mode, we do a load+autorelease.
2201 if (!getLangOpts().ObjCAutoRefCount
) {
2202 return RValue::get(EmitARCLoadWeak(LV
.getAddress()));
2205 // In ARC mode, we load retained and then consume the value.
2206 llvm::Value
*Object
= EmitARCLoadWeakRetained(LV
.getAddress());
2207 Object
= EmitObjCConsumeObject(LV
.getType(), Object
);
2208 return RValue::get(Object
);
2211 if (LV
.isSimple()) {
2212 assert(!LV
.getType()->isFunctionType());
2214 if (LV
.getType()->isConstantMatrixType())
2215 return EmitLoadOfMatrixLValue(LV
, Loc
, *this);
2217 // Everything needs a load.
2218 return RValue::get(EmitLoadOfScalar(LV
, Loc
));
2221 if (LV
.isVectorElt()) {
2222 llvm::LoadInst
*Load
= Builder
.CreateLoad(LV
.getVectorAddress(),
2223 LV
.isVolatileQualified());
2224 return RValue::get(Builder
.CreateExtractElement(Load
, LV
.getVectorIdx(),
2228 // If this is a reference to a subset of the elements of a vector, either
2229 // shuffle the input or extract/insert them as appropriate.
2230 if (LV
.isExtVectorElt()) {
2231 return EmitLoadOfExtVectorElementLValue(LV
);
2234 // Global Register variables always invoke intrinsics
2235 if (LV
.isGlobalReg())
2236 return EmitLoadOfGlobalRegLValue(LV
);
2238 if (LV
.isMatrixElt()) {
2239 llvm::Value
*Idx
= LV
.getMatrixIdx();
2240 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2241 const auto *const MatTy
= LV
.getType()->castAs
<ConstantMatrixType
>();
2242 llvm::MatrixBuilder
MB(Builder
);
2243 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2245 llvm::LoadInst
*Load
=
2246 Builder
.CreateLoad(LV
.getMatrixAddress(), LV
.isVolatileQualified());
2247 return RValue::get(Builder
.CreateExtractElement(Load
, Idx
, "matrixext"));
2250 assert(LV
.isBitField() && "Unknown LValue type!");
2251 return EmitLoadOfBitfieldLValue(LV
, Loc
);
2254 RValue
CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV
,
2255 SourceLocation Loc
) {
2256 const CGBitFieldInfo
&Info
= LV
.getBitFieldInfo();
2258 // Get the output type.
2259 llvm::Type
*ResLTy
= ConvertType(LV
.getType());
2261 Address Ptr
= LV
.getBitFieldAddress();
2263 Builder
.CreateLoad(Ptr
, LV
.isVolatileQualified(), "bf.load");
2265 bool UseVolatile
= LV
.isVolatileQualified() &&
2266 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2267 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2268 const unsigned StorageSize
=
2269 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2270 if (Info
.IsSigned
) {
2271 assert(static_cast<unsigned>(Offset
+ Info
.Size
) <= StorageSize
);
2272 unsigned HighBits
= StorageSize
- Offset
- Info
.Size
;
2274 Val
= Builder
.CreateShl(Val
, HighBits
, "bf.shl");
2275 if (Offset
+ HighBits
)
2276 Val
= Builder
.CreateAShr(Val
, Offset
+ HighBits
, "bf.ashr");
2279 Val
= Builder
.CreateLShr(Val
, Offset
, "bf.lshr");
2280 if (static_cast<unsigned>(Offset
) + Info
.Size
< StorageSize
)
2281 Val
= Builder
.CreateAnd(
2282 Val
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
), "bf.clear");
2284 Val
= Builder
.CreateIntCast(Val
, ResLTy
, Info
.IsSigned
, "bf.cast");
2285 EmitScalarRangeCheck(Val
, LV
.getType(), Loc
);
2286 return RValue::get(Val
);
2289 // If this is a reference to a subset of the elements of a vector, create an
2290 // appropriate shufflevector.
2291 RValue
CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV
) {
2292 llvm::Value
*Vec
= Builder
.CreateLoad(LV
.getExtVectorAddress(),
2293 LV
.isVolatileQualified());
2295 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2296 // IR value to a vector here allows the rest of codegen to behave as normal.
2297 if (getLangOpts().HLSL
&& !Vec
->getType()->isVectorTy()) {
2298 llvm::Type
*DstTy
= llvm::FixedVectorType::get(Vec
->getType(), 1);
2299 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGM
.Int64Ty
);
2300 Vec
= Builder
.CreateInsertElement(DstTy
, Vec
, Zero
, "cast.splat");
2303 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2305 // If the result of the expression is a non-vector type, we must be extracting
2306 // a single element. Just codegen as an extractelement.
2307 const VectorType
*ExprVT
= LV
.getType()->getAs
<VectorType
>();
2309 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2310 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2311 return RValue::get(Builder
.CreateExtractElement(Vec
, Elt
));
2314 // Always use shuffle vector to try to retain the original program structure
2315 unsigned NumResultElts
= ExprVT
->getNumElements();
2317 SmallVector
<int, 4> Mask
;
2318 for (unsigned i
= 0; i
!= NumResultElts
; ++i
)
2319 Mask
.push_back(getAccessedFieldNo(i
, Elts
));
2321 Vec
= Builder
.CreateShuffleVector(Vec
, Mask
);
2322 return RValue::get(Vec
);
2325 /// Generates lvalue for partial ext_vector access.
2326 Address
CodeGenFunction::EmitExtVectorElementLValue(LValue LV
) {
2327 Address VectorAddress
= LV
.getExtVectorAddress();
2328 QualType EQT
= LV
.getType()->castAs
<VectorType
>()->getElementType();
2329 llvm::Type
*VectorElementTy
= CGM
.getTypes().ConvertType(EQT
);
2331 Address CastToPointerElement
= VectorAddress
.withElementType(VectorElementTy
);
2333 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2334 unsigned ix
= getAccessedFieldNo(0, Elts
);
2336 Address VectorBasePtrPlusIx
=
2337 Builder
.CreateConstInBoundsGEP(CastToPointerElement
, ix
,
2340 return VectorBasePtrPlusIx
;
2343 /// Load of global gamed gegisters are always calls to intrinsics.
2344 RValue
CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV
) {
2345 assert((LV
.getType()->isIntegerType() || LV
.getType()->isPointerType()) &&
2346 "Bad type for register variable");
2347 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2348 cast
<llvm::MetadataAsValue
>(LV
.getGlobalReg())->getMetadata());
2350 // We accept integer and pointer types only
2351 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(LV
.getType());
2352 llvm::Type
*Ty
= OrigTy
;
2353 if (OrigTy
->isPointerTy())
2354 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2355 llvm::Type
*Types
[] = { Ty
};
2357 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::read_register
, Types
);
2358 llvm::Value
*Call
= Builder
.CreateCall(
2359 F
, llvm::MetadataAsValue::get(Ty
->getContext(), RegName
));
2360 if (OrigTy
->isPointerTy())
2361 Call
= Builder
.CreateIntToPtr(Call
, OrigTy
);
2362 return RValue::get(Call
);
2365 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2366 /// lvalue, where both are guaranteed to the have the same type, and that type
2368 void CodeGenFunction::EmitStoreThroughLValue(RValue Src
, LValue Dst
,
2370 if (!Dst
.isSimple()) {
2371 if (Dst
.isVectorElt()) {
2372 // Read/modify/write the vector, inserting the new element.
2373 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getVectorAddress(),
2374 Dst
.isVolatileQualified());
2375 auto *IRStoreTy
= dyn_cast
<llvm::IntegerType
>(Vec
->getType());
2377 auto *IRVecTy
= llvm::FixedVectorType::get(
2378 Builder
.getInt1Ty(), IRStoreTy
->getPrimitiveSizeInBits());
2379 Vec
= Builder
.CreateBitCast(Vec
, IRVecTy
);
2382 Vec
= Builder
.CreateInsertElement(Vec
, Src
.getScalarVal(),
2383 Dst
.getVectorIdx(), "vecins");
2385 // <N x i1> --> <iN>.
2386 Vec
= Builder
.CreateBitCast(Vec
, IRStoreTy
);
2388 Builder
.CreateStore(Vec
, Dst
.getVectorAddress(),
2389 Dst
.isVolatileQualified());
2393 // If this is an update of extended vector elements, insert them as
2395 if (Dst
.isExtVectorElt())
2396 return EmitStoreThroughExtVectorComponentLValue(Src
, Dst
);
2398 if (Dst
.isGlobalReg())
2399 return EmitStoreThroughGlobalRegLValue(Src
, Dst
);
2401 if (Dst
.isMatrixElt()) {
2402 llvm::Value
*Idx
= Dst
.getMatrixIdx();
2403 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2404 const auto *const MatTy
= Dst
.getType()->castAs
<ConstantMatrixType
>();
2405 llvm::MatrixBuilder
MB(Builder
);
2406 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2408 llvm::Instruction
*Load
= Builder
.CreateLoad(Dst
.getMatrixAddress());
2410 Builder
.CreateInsertElement(Load
, Src
.getScalarVal(), Idx
, "matins");
2411 Builder
.CreateStore(Vec
, Dst
.getMatrixAddress(),
2412 Dst
.isVolatileQualified());
2416 assert(Dst
.isBitField() && "Unknown LValue type");
2417 return EmitStoreThroughBitfieldLValue(Src
, Dst
);
2420 // There's special magic for assigning into an ARC-qualified l-value.
2421 if (Qualifiers::ObjCLifetime Lifetime
= Dst
.getQuals().getObjCLifetime()) {
2423 case Qualifiers::OCL_None
:
2424 llvm_unreachable("present but none");
2426 case Qualifiers::OCL_ExplicitNone
:
2430 case Qualifiers::OCL_Strong
:
2432 Src
= RValue::get(EmitARCRetain(Dst
.getType(), Src
.getScalarVal()));
2435 EmitARCStoreStrong(Dst
, Src
.getScalarVal(), /*ignore*/ true);
2438 case Qualifiers::OCL_Weak
:
2440 // Initialize and then skip the primitive store.
2441 EmitARCInitWeak(Dst
.getAddress(), Src
.getScalarVal());
2443 EmitARCStoreWeak(Dst
.getAddress(), Src
.getScalarVal(),
2447 case Qualifiers::OCL_Autoreleasing
:
2448 Src
= RValue::get(EmitObjCExtendObjectLifetime(Dst
.getType(),
2449 Src
.getScalarVal()));
2450 // fall into the normal path
2455 if (Dst
.isObjCWeak() && !Dst
.isNonGC()) {
2456 // load of a __weak object.
2457 Address LvalueDst
= Dst
.getAddress();
2458 llvm::Value
*src
= Src
.getScalarVal();
2459 CGM
.getObjCRuntime().EmitObjCWeakAssign(*this, src
, LvalueDst
);
2463 if (Dst
.isObjCStrong() && !Dst
.isNonGC()) {
2464 // load of a __strong object.
2465 Address LvalueDst
= Dst
.getAddress();
2466 llvm::Value
*src
= Src
.getScalarVal();
2467 if (Dst
.isObjCIvar()) {
2468 assert(Dst
.getBaseIvarExp() && "BaseIvarExp is NULL");
2469 llvm::Type
*ResultType
= IntPtrTy
;
2470 Address dst
= EmitPointerWithAlignment(Dst
.getBaseIvarExp());
2471 llvm::Value
*RHS
= dst
.emitRawPointer(*this);
2472 RHS
= Builder
.CreatePtrToInt(RHS
, ResultType
, "sub.ptr.rhs.cast");
2473 llvm::Value
*LHS
= Builder
.CreatePtrToInt(LvalueDst
.emitRawPointer(*this),
2474 ResultType
, "sub.ptr.lhs.cast");
2475 llvm::Value
*BytesBetween
= Builder
.CreateSub(LHS
, RHS
, "ivar.offset");
2476 CGM
.getObjCRuntime().EmitObjCIvarAssign(*this, src
, dst
, BytesBetween
);
2477 } else if (Dst
.isGlobalObjCRef()) {
2478 CGM
.getObjCRuntime().EmitObjCGlobalAssign(*this, src
, LvalueDst
,
2479 Dst
.isThreadLocalRef());
2482 CGM
.getObjCRuntime().EmitObjCStrongCastAssign(*this, src
, LvalueDst
);
2486 assert(Src
.isScalar() && "Can't emit an agg store with this method");
2487 EmitStoreOfScalar(Src
.getScalarVal(), Dst
, isInit
);
2490 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
2491 llvm::Value
**Result
) {
2492 const CGBitFieldInfo
&Info
= Dst
.getBitFieldInfo();
2493 llvm::Type
*ResLTy
= convertTypeForLoadStore(Dst
.getType());
2494 Address Ptr
= Dst
.getBitFieldAddress();
2496 // Get the source value, truncated to the width of the bit-field.
2497 llvm::Value
*SrcVal
= Src
.getScalarVal();
2499 // Cast the source to the storage type and shift it into place.
2500 SrcVal
= Builder
.CreateIntCast(SrcVal
, Ptr
.getElementType(),
2501 /*isSigned=*/false);
2502 llvm::Value
*MaskedVal
= SrcVal
;
2504 const bool UseVolatile
=
2505 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&& Dst
.isVolatileQualified() &&
2506 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2507 const unsigned StorageSize
=
2508 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2509 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2510 // See if there are other bits in the bitfield's storage we'll need to load
2511 // and mask together with source before storing.
2512 if (StorageSize
!= Info
.Size
) {
2513 assert(StorageSize
> Info
.Size
&& "Invalid bitfield size.");
2515 Builder
.CreateLoad(Ptr
, Dst
.isVolatileQualified(), "bf.load");
2517 // Mask the source value as needed.
2518 if (!hasBooleanRepresentation(Dst
.getType()))
2519 SrcVal
= Builder
.CreateAnd(
2520 SrcVal
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
),
2524 SrcVal
= Builder
.CreateShl(SrcVal
, Offset
, "bf.shl");
2526 // Mask out the original value.
2527 Val
= Builder
.CreateAnd(
2528 Val
, ~llvm::APInt::getBitsSet(StorageSize
, Offset
, Offset
+ Info
.Size
),
2531 // Or together the unchanged values and the source value.
2532 SrcVal
= Builder
.CreateOr(Val
, SrcVal
, "bf.set");
2534 assert(Offset
== 0);
2535 // According to the AACPS:
2536 // When a volatile bit-field is written, and its container does not overlap
2537 // with any non-bit-field member, its container must be read exactly once
2538 // and written exactly once using the access width appropriate to the type
2539 // of the container. The two accesses are not atomic.
2540 if (Dst
.isVolatileQualified() && isAAPCS(CGM
.getTarget()) &&
2541 CGM
.getCodeGenOpts().ForceAAPCSBitfieldLoad
)
2542 Builder
.CreateLoad(Ptr
, true, "bf.load");
2545 // Write the new value back out.
2546 Builder
.CreateStore(SrcVal
, Ptr
, Dst
.isVolatileQualified());
2548 // Return the new value of the bit-field, if requested.
2550 llvm::Value
*ResultVal
= MaskedVal
;
2552 // Sign extend the value if needed.
2553 if (Info
.IsSigned
) {
2554 assert(Info
.Size
<= StorageSize
);
2555 unsigned HighBits
= StorageSize
- Info
.Size
;
2557 ResultVal
= Builder
.CreateShl(ResultVal
, HighBits
, "bf.result.shl");
2558 ResultVal
= Builder
.CreateAShr(ResultVal
, HighBits
, "bf.result.ashr");
2562 ResultVal
= Builder
.CreateIntCast(ResultVal
, ResLTy
, Info
.IsSigned
,
2564 *Result
= EmitFromMemory(ResultVal
, Dst
.getType());
2568 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src
,
2570 // HLSL allows storing to scalar values through ExtVector component LValues.
2571 // To support this we need to handle the case where the destination address is
2573 Address DstAddr
= Dst
.getExtVectorAddress();
2574 if (!DstAddr
.getElementType()->isVectorTy()) {
2575 assert(!Dst
.getType()->isVectorType() &&
2576 "this should only occur for non-vector l-values");
2577 Builder
.CreateStore(Src
.getScalarVal(), DstAddr
, Dst
.isVolatileQualified());
2581 // This access turns into a read/modify/write of the vector. Load the input
2583 llvm::Value
*Vec
= Builder
.CreateLoad(DstAddr
, Dst
.isVolatileQualified());
2584 const llvm::Constant
*Elts
= Dst
.getExtVectorElts();
2586 llvm::Value
*SrcVal
= Src
.getScalarVal();
2588 if (const VectorType
*VTy
= Dst
.getType()->getAs
<VectorType
>()) {
2589 unsigned NumSrcElts
= VTy
->getNumElements();
2590 unsigned NumDstElts
=
2591 cast
<llvm::FixedVectorType
>(Vec
->getType())->getNumElements();
2592 if (NumDstElts
== NumSrcElts
) {
2593 // Use shuffle vector is the src and destination are the same number of
2594 // elements and restore the vector mask since it is on the side it will be
2596 SmallVector
<int, 4> Mask(NumDstElts
);
2597 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2598 Mask
[getAccessedFieldNo(i
, Elts
)] = i
;
2600 Vec
= Builder
.CreateShuffleVector(SrcVal
, Mask
);
2601 } else if (NumDstElts
> NumSrcElts
) {
2602 // Extended the source vector to the same length and then shuffle it
2603 // into the destination.
2604 // FIXME: since we're shuffling with undef, can we just use the indices
2605 // into that? This could be simpler.
2606 SmallVector
<int, 4> ExtMask
;
2607 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2608 ExtMask
.push_back(i
);
2609 ExtMask
.resize(NumDstElts
, -1);
2610 llvm::Value
*ExtSrcVal
= Builder
.CreateShuffleVector(SrcVal
, ExtMask
);
2612 SmallVector
<int, 4> Mask
;
2613 for (unsigned i
= 0; i
!= NumDstElts
; ++i
)
2616 // When the vector size is odd and .odd or .hi is used, the last element
2617 // of the Elts constant array will be one past the size of the vector.
2618 // Ignore the last element here, if it is greater than the mask size.
2619 if (getAccessedFieldNo(NumSrcElts
- 1, Elts
) == Mask
.size())
2622 // modify when what gets shuffled in
2623 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2624 Mask
[getAccessedFieldNo(i
, Elts
)] = i
+ NumDstElts
;
2625 Vec
= Builder
.CreateShuffleVector(Vec
, ExtSrcVal
, Mask
);
2627 // We should never shorten the vector
2628 llvm_unreachable("unexpected shorten vector length");
2631 // If the Src is a scalar (not a vector), and the target is a vector it must
2632 // be updating one element.
2633 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2634 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2635 Vec
= Builder
.CreateInsertElement(Vec
, SrcVal
, Elt
);
2638 Builder
.CreateStore(Vec
, Dst
.getExtVectorAddress(),
2639 Dst
.isVolatileQualified());
2642 /// Store of global named registers are always calls to intrinsics.
2643 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
) {
2644 assert((Dst
.getType()->isIntegerType() || Dst
.getType()->isPointerType()) &&
2645 "Bad type for register variable");
2646 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2647 cast
<llvm::MetadataAsValue
>(Dst
.getGlobalReg())->getMetadata());
2648 assert(RegName
&& "Register LValue is not metadata");
2650 // We accept integer and pointer types only
2651 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(Dst
.getType());
2652 llvm::Type
*Ty
= OrigTy
;
2653 if (OrigTy
->isPointerTy())
2654 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2655 llvm::Type
*Types
[] = { Ty
};
2657 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
2658 llvm::Value
*Value
= Src
.getScalarVal();
2659 if (OrigTy
->isPointerTy())
2660 Value
= Builder
.CreatePtrToInt(Value
, Ty
);
2662 F
, {llvm::MetadataAsValue::get(Ty
->getContext(), RegName
), Value
});
2665 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2666 // generating write-barries API. It is currently a global, ivar,
2668 static void setObjCGCLValueClass(const ASTContext
&Ctx
, const Expr
*E
,
2670 bool IsMemberAccess
=false) {
2671 if (Ctx
.getLangOpts().getGC() == LangOptions::NonGC
)
2674 if (isa
<ObjCIvarRefExpr
>(E
)) {
2675 QualType ExpTy
= E
->getType();
2676 if (IsMemberAccess
&& ExpTy
->isPointerType()) {
2677 // If ivar is a structure pointer, assigning to field of
2678 // this struct follows gcc's behavior and makes it a non-ivar
2679 // writer-barrier conservatively.
2680 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2681 if (ExpTy
->isRecordType()) {
2682 LV
.setObjCIvar(false);
2686 LV
.setObjCIvar(true);
2687 auto *Exp
= cast
<ObjCIvarRefExpr
>(const_cast<Expr
*>(E
));
2688 LV
.setBaseIvarExp(Exp
->getBase());
2689 LV
.setObjCArray(E
->getType()->isArrayType());
2693 if (const auto *Exp
= dyn_cast
<DeclRefExpr
>(E
)) {
2694 if (const auto *VD
= dyn_cast
<VarDecl
>(Exp
->getDecl())) {
2695 if (VD
->hasGlobalStorage()) {
2696 LV
.setGlobalObjCRef(true);
2697 LV
.setThreadLocalRef(VD
->getTLSKind() != VarDecl::TLS_None
);
2700 LV
.setObjCArray(E
->getType()->isArrayType());
2704 if (const auto *Exp
= dyn_cast
<UnaryOperator
>(E
)) {
2705 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2709 if (const auto *Exp
= dyn_cast
<ParenExpr
>(E
)) {
2710 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2711 if (LV
.isObjCIvar()) {
2712 // If cast is to a structure pointer, follow gcc's behavior and make it
2713 // a non-ivar write-barrier.
2714 QualType ExpTy
= E
->getType();
2715 if (ExpTy
->isPointerType())
2716 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2717 if (ExpTy
->isRecordType())
2718 LV
.setObjCIvar(false);
2723 if (const auto *Exp
= dyn_cast
<GenericSelectionExpr
>(E
)) {
2724 setObjCGCLValueClass(Ctx
, Exp
->getResultExpr(), LV
);
2728 if (const auto *Exp
= dyn_cast
<ImplicitCastExpr
>(E
)) {
2729 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2733 if (const auto *Exp
= dyn_cast
<CStyleCastExpr
>(E
)) {
2734 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2738 if (const auto *Exp
= dyn_cast
<ObjCBridgedCastExpr
>(E
)) {
2739 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2743 if (const auto *Exp
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
2744 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
);
2745 if (LV
.isObjCIvar() && !LV
.isObjCArray())
2746 // Using array syntax to assigning to what an ivar points to is not
2747 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2748 LV
.setObjCIvar(false);
2749 else if (LV
.isGlobalObjCRef() && !LV
.isObjCArray())
2750 // Using array syntax to assigning to what global points to is not
2751 // same as assigning to the global itself. {id *G;} G[i] = 0;
2752 LV
.setGlobalObjCRef(false);
2756 if (const auto *Exp
= dyn_cast
<MemberExpr
>(E
)) {
2757 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
, true);
2758 // We don't know if member is an 'ivar', but this flag is looked at
2759 // only in the context of LV.isObjCIvar().
2760 LV
.setObjCArray(E
->getType()->isArrayType());
2765 static LValue
EmitThreadPrivateVarDeclLValue(
2766 CodeGenFunction
&CGF
, const VarDecl
*VD
, QualType T
, Address Addr
,
2767 llvm::Type
*RealVarTy
, SourceLocation Loc
) {
2768 if (CGF
.CGM
.getLangOpts().OpenMPIRBuilder
)
2769 Addr
= CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2770 CGF
, VD
, Addr
, Loc
);
2773 CGF
.CGM
.getOpenMPRuntime().getAddrOfThreadPrivate(CGF
, VD
, Addr
, Loc
);
2775 Addr
= Addr
.withElementType(RealVarTy
);
2776 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2779 static Address
emitDeclTargetVarDeclLValue(CodeGenFunction
&CGF
,
2780 const VarDecl
*VD
, QualType T
) {
2781 std::optional
<OMPDeclareTargetDeclAttr::MapTypeTy
> Res
=
2782 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD
);
2783 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2784 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2785 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2786 if (!Res
|| ((*Res
== OMPDeclareTargetDeclAttr::MT_To
||
2787 *Res
== OMPDeclareTargetDeclAttr::MT_Enter
) &&
2788 !CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2789 return Address::invalid();
2790 assert(((*Res
== OMPDeclareTargetDeclAttr::MT_Link
) ||
2791 ((*Res
== OMPDeclareTargetDeclAttr::MT_To
||
2792 *Res
== OMPDeclareTargetDeclAttr::MT_Enter
) &&
2793 CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2794 "Expected link clause OR to clause with unified memory enabled.");
2795 QualType PtrTy
= CGF
.getContext().getPointerType(VD
->getType());
2796 Address Addr
= CGF
.CGM
.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD
);
2797 return CGF
.EmitLoadOfPointer(Addr
, PtrTy
->castAs
<PointerType
>());
2801 CodeGenFunction::EmitLoadOfReference(LValue RefLVal
,
2802 LValueBaseInfo
*PointeeBaseInfo
,
2803 TBAAAccessInfo
*PointeeTBAAInfo
) {
2804 llvm::LoadInst
*Load
=
2805 Builder
.CreateLoad(RefLVal
.getAddress(), RefLVal
.isVolatile());
2806 CGM
.DecorateInstructionWithTBAA(Load
, RefLVal
.getTBAAInfo());
2807 return makeNaturalAddressForPointer(Load
, RefLVal
.getType()->getPointeeType(),
2808 CharUnits(), /*ForPointeeType=*/true,
2809 PointeeBaseInfo
, PointeeTBAAInfo
);
2812 LValue
CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal
) {
2813 LValueBaseInfo PointeeBaseInfo
;
2814 TBAAAccessInfo PointeeTBAAInfo
;
2815 Address PointeeAddr
= EmitLoadOfReference(RefLVal
, &PointeeBaseInfo
,
2817 return MakeAddrLValue(PointeeAddr
, RefLVal
.getType()->getPointeeType(),
2818 PointeeBaseInfo
, PointeeTBAAInfo
);
2821 Address
CodeGenFunction::EmitLoadOfPointer(Address Ptr
,
2822 const PointerType
*PtrTy
,
2823 LValueBaseInfo
*BaseInfo
,
2824 TBAAAccessInfo
*TBAAInfo
) {
2825 llvm::Value
*Addr
= Builder
.CreateLoad(Ptr
);
2826 return makeNaturalAddressForPointer(Addr
, PtrTy
->getPointeeType(),
2827 CharUnits(), /*ForPointeeType=*/true,
2828 BaseInfo
, TBAAInfo
);
2831 LValue
CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr
,
2832 const PointerType
*PtrTy
) {
2833 LValueBaseInfo BaseInfo
;
2834 TBAAAccessInfo TBAAInfo
;
2835 Address Addr
= EmitLoadOfPointer(PtrAddr
, PtrTy
, &BaseInfo
, &TBAAInfo
);
2836 return MakeAddrLValue(Addr
, PtrTy
->getPointeeType(), BaseInfo
, TBAAInfo
);
2839 static LValue
EmitGlobalVarDeclLValue(CodeGenFunction
&CGF
,
2840 const Expr
*E
, const VarDecl
*VD
) {
2841 QualType T
= E
->getType();
2843 // If it's thread_local, emit a call to its wrapper function instead.
2844 if (VD
->getTLSKind() == VarDecl::TLS_Dynamic
&&
2845 CGF
.CGM
.getCXXABI().usesThreadWrapperFunction(VD
))
2846 return CGF
.CGM
.getCXXABI().EmitThreadLocalVarDeclLValue(CGF
, VD
, T
);
2847 // Check if the variable is marked as declare target with link clause in
2849 if (CGF
.getLangOpts().OpenMPIsTargetDevice
) {
2850 Address Addr
= emitDeclTargetVarDeclLValue(CGF
, VD
, T
);
2852 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2855 llvm::Value
*V
= CGF
.CGM
.GetAddrOfGlobalVar(VD
);
2857 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2858 V
= CGF
.Builder
.CreateThreadLocalAddress(V
);
2860 llvm::Type
*RealVarTy
= CGF
.getTypes().ConvertTypeForMem(VD
->getType());
2861 CharUnits Alignment
= CGF
.getContext().getDeclAlign(VD
);
2862 Address
Addr(V
, RealVarTy
, Alignment
);
2863 // Emit reference to the private copy of the variable if it is an OpenMP
2864 // threadprivate variable.
2865 if (CGF
.getLangOpts().OpenMP
&& !CGF
.getLangOpts().OpenMPSimd
&&
2866 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2867 return EmitThreadPrivateVarDeclLValue(CGF
, VD
, T
, Addr
, RealVarTy
,
2870 LValue LV
= VD
->getType()->isReferenceType() ?
2871 CGF
.EmitLoadOfReferenceLValue(Addr
, VD
->getType(),
2872 AlignmentSource::Decl
) :
2873 CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2874 setObjCGCLValueClass(CGF
.getContext(), E
, LV
);
2878 llvm::Constant
*CodeGenModule::getRawFunctionPointer(GlobalDecl GD
,
2880 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2881 if (FD
->hasAttr
<WeakRefAttr
>()) {
2882 ConstantAddress aliasee
= GetWeakRefReference(FD
);
2883 return aliasee
.getPointer();
2886 llvm::Constant
*V
= GetAddrOfFunction(GD
, Ty
);
2890 static LValue
EmitFunctionDeclLValue(CodeGenFunction
&CGF
, const Expr
*E
,
2892 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2893 llvm::Constant
*V
= CGF
.CGM
.getFunctionPointer(GD
);
2894 CharUnits Alignment
= CGF
.getContext().getDeclAlign(FD
);
2895 return CGF
.MakeAddrLValue(V
, E
->getType(), Alignment
,
2896 AlignmentSource::Decl
);
2899 static LValue
EmitCapturedFieldLValue(CodeGenFunction
&CGF
, const FieldDecl
*FD
,
2900 llvm::Value
*ThisValue
) {
2902 return CGF
.EmitLValueForLambdaField(FD
, ThisValue
);
2905 /// Named Registers are named metadata pointing to the register name
2906 /// which will be read from/written to as an argument to the intrinsic
2907 /// @llvm.read/write_register.
2908 /// So far, only the name is being passed down, but other options such as
2909 /// register type, allocation type or even optimization options could be
2910 /// passed down via the metadata node.
2911 static LValue
EmitGlobalNamedRegister(const VarDecl
*VD
, CodeGenModule
&CGM
) {
2912 SmallString
<64> Name("llvm.named.register.");
2913 AsmLabelAttr
*Asm
= VD
->getAttr
<AsmLabelAttr
>();
2914 assert(Asm
->getLabel().size() < 64-Name
.size() &&
2915 "Register name too big");
2916 Name
.append(Asm
->getLabel());
2917 llvm::NamedMDNode
*M
=
2918 CGM
.getModule().getOrInsertNamedMetadata(Name
);
2919 if (M
->getNumOperands() == 0) {
2920 llvm::MDString
*Str
= llvm::MDString::get(CGM
.getLLVMContext(),
2922 llvm::Metadata
*Ops
[] = {Str
};
2923 M
->addOperand(llvm::MDNode::get(CGM
.getLLVMContext(), Ops
));
2926 CharUnits Alignment
= CGM
.getContext().getDeclAlign(VD
);
2929 llvm::MetadataAsValue::get(CGM
.getLLVMContext(), M
->getOperand(0));
2930 return LValue::MakeGlobalReg(Ptr
, Alignment
, VD
->getType());
2933 /// Determine whether we can emit a reference to \p VD from the current
2934 /// context, despite not necessarily having seen an odr-use of the variable in
2936 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction
&CGF
,
2937 const DeclRefExpr
*E
,
2938 const VarDecl
*VD
) {
2939 // For a variable declared in an enclosing scope, do not emit a spurious
2940 // reference even if we have a capture, as that will emit an unwarranted
2941 // reference to our capture state, and will likely generate worse code than
2942 // emitting a local copy.
2943 if (E
->refersToEnclosingVariableOrCapture())
2946 // For a local declaration declared in this function, we can always reference
2947 // it even if we don't have an odr-use.
2948 if (VD
->hasLocalStorage()) {
2949 return VD
->getDeclContext() ==
2950 dyn_cast_or_null
<DeclContext
>(CGF
.CurCodeDecl
);
2953 // For a global declaration, we can emit a reference to it if we know
2954 // for sure that we are able to emit a definition of it.
2955 VD
= VD
->getDefinition(CGF
.getContext());
2959 // Don't emit a spurious reference if it might be to a variable that only
2960 // exists on a different device / target.
2961 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2962 // cross-target reference.
2963 if (CGF
.getLangOpts().OpenMP
|| CGF
.getLangOpts().CUDA
||
2964 CGF
.getLangOpts().OpenCL
) {
2968 // We can emit a spurious reference only if the linkage implies that we'll
2969 // be emitting a non-interposable symbol that will be retained until link
2971 switch (CGF
.CGM
.getLLVMLinkageVarDefinition(VD
)) {
2972 case llvm::GlobalValue::ExternalLinkage
:
2973 case llvm::GlobalValue::LinkOnceODRLinkage
:
2974 case llvm::GlobalValue::WeakODRLinkage
:
2975 case llvm::GlobalValue::InternalLinkage
:
2976 case llvm::GlobalValue::PrivateLinkage
:
2983 LValue
CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr
*E
) {
2984 const NamedDecl
*ND
= E
->getDecl();
2985 QualType T
= E
->getType();
2987 assert(E
->isNonOdrUse() != NOUR_Unevaluated
&&
2988 "should not emit an unevaluated operand");
2990 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2991 // Global Named registers access via intrinsics only
2992 if (VD
->getStorageClass() == SC_Register
&&
2993 VD
->hasAttr
<AsmLabelAttr
>() && !VD
->isLocalVarDecl())
2994 return EmitGlobalNamedRegister(VD
, CGM
);
2996 // If this DeclRefExpr does not constitute an odr-use of the variable,
2997 // we're not permitted to emit a reference to it in general, and it might
2998 // not be captured if capture would be necessary for a use. Emit the
2999 // constant value directly instead.
3000 if (E
->isNonOdrUse() == NOUR_Constant
&&
3001 (VD
->getType()->isReferenceType() ||
3002 !canEmitSpuriousReferenceToVariable(*this, E
, VD
))) {
3003 VD
->getAnyInitializer(VD
);
3004 llvm::Constant
*Val
= ConstantEmitter(*this).emitAbstract(
3005 E
->getLocation(), *VD
->evaluateValue(), VD
->getType());
3006 assert(Val
&& "failed to emit constant expression");
3008 Address Addr
= Address::invalid();
3009 if (!VD
->getType()->isReferenceType()) {
3010 // Spill the constant value to a global.
3011 Addr
= CGM
.createUnnamedGlobalFrom(*VD
, Val
,
3012 getContext().getDeclAlign(VD
));
3013 llvm::Type
*VarTy
= getTypes().ConvertTypeForMem(VD
->getType());
3014 auto *PTy
= llvm::PointerType::get(
3015 VarTy
, getTypes().getTargetAddressSpace(VD
->getType()));
3016 Addr
= Builder
.CreatePointerBitCastOrAddrSpaceCast(Addr
, PTy
, VarTy
);
3018 // Should we be using the alignment of the constant pointer we emitted?
3019 CharUnits Alignment
=
3020 CGM
.getNaturalTypeAlignment(E
->getType(),
3021 /* BaseInfo= */ nullptr,
3022 /* TBAAInfo= */ nullptr,
3023 /* forPointeeType= */ true);
3024 Addr
= makeNaturalAddressForPointer(Val
, T
, Alignment
);
3026 return MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
3029 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3031 // Check for captured variables.
3032 if (E
->refersToEnclosingVariableOrCapture()) {
3033 VD
= VD
->getCanonicalDecl();
3034 if (auto *FD
= LambdaCaptureFields
.lookup(VD
))
3035 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
3036 if (CapturedStmtInfo
) {
3037 auto I
= LocalDeclMap
.find(VD
);
3038 if (I
!= LocalDeclMap
.end()) {
3040 if (VD
->getType()->isReferenceType())
3041 CapLVal
= EmitLoadOfReferenceLValue(I
->second
, VD
->getType(),
3042 AlignmentSource::Decl
);
3044 CapLVal
= MakeAddrLValue(I
->second
, T
);
3045 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3047 if (getLangOpts().OpenMP
&&
3048 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
3049 CapLVal
.setNontemporal(/*Value=*/true);
3053 EmitCapturedFieldLValue(*this, CapturedStmtInfo
->lookup(VD
),
3054 CapturedStmtInfo
->getContextValue());
3055 Address LValueAddress
= CapLVal
.getAddress();
3056 CapLVal
= MakeAddrLValue(Address(LValueAddress
.emitRawPointer(*this),
3057 LValueAddress
.getElementType(),
3058 getContext().getDeclAlign(VD
)),
3060 LValueBaseInfo(AlignmentSource::Decl
),
3061 CapLVal
.getTBAAInfo());
3062 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3064 if (getLangOpts().OpenMP
&&
3065 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
3066 CapLVal
.setNontemporal(/*Value=*/true);
3070 assert(isa
<BlockDecl
>(CurCodeDecl
));
3071 Address addr
= GetAddrOfBlockDecl(VD
);
3072 return MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
3076 // FIXME: We should be able to assert this for FunctionDecls as well!
3077 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3078 // those with a valid source location.
3079 assert((ND
->isUsed(false) || !isa
<VarDecl
>(ND
) || E
->isNonOdrUse() ||
3080 !E
->getLocation().isValid()) &&
3081 "Should not use decl without marking it used!");
3083 if (ND
->hasAttr
<WeakRefAttr
>()) {
3084 const auto *VD
= cast
<ValueDecl
>(ND
);
3085 ConstantAddress Aliasee
= CGM
.GetWeakRefReference(VD
);
3086 return MakeAddrLValue(Aliasee
, T
, AlignmentSource::Decl
);
3089 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
3090 // Check if this is a global variable.
3091 if (VD
->hasLinkage() || VD
->isStaticDataMember())
3092 return EmitGlobalVarDeclLValue(*this, E
, VD
);
3094 Address addr
= Address::invalid();
3096 // The variable should generally be present in the local decl map.
3097 auto iter
= LocalDeclMap
.find(VD
);
3098 if (iter
!= LocalDeclMap
.end()) {
3099 addr
= iter
->second
;
3101 // Otherwise, it might be static local we haven't emitted yet for
3102 // some reason; most likely, because it's in an outer function.
3103 } else if (VD
->isStaticLocal()) {
3104 llvm::Constant
*var
= CGM
.getOrCreateStaticVarDecl(
3105 *VD
, CGM
.getLLVMLinkageVarDefinition(VD
));
3107 var
, ConvertTypeForMem(VD
->getType()), getContext().getDeclAlign(VD
));
3109 // No other cases for now.
3111 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3114 // Handle threadlocal function locals.
3115 if (VD
->getTLSKind() != VarDecl::TLS_None
)
3116 addr
= addr
.withPointer(
3117 Builder
.CreateThreadLocalAddress(addr
.getBasePointer()),
3120 // Check for OpenMP threadprivate variables.
3121 if (getLangOpts().OpenMP
&& !getLangOpts().OpenMPSimd
&&
3122 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
3123 return EmitThreadPrivateVarDeclLValue(
3124 *this, VD
, T
, addr
, getTypes().ConvertTypeForMem(VD
->getType()),
3128 // Drill into block byref variables.
3129 bool isBlockByref
= VD
->isEscapingByref();
3131 addr
= emitBlockByrefAddress(addr
, VD
);
3134 // Drill into reference types.
3135 LValue LV
= VD
->getType()->isReferenceType() ?
3136 EmitLoadOfReferenceLValue(addr
, VD
->getType(), AlignmentSource::Decl
) :
3137 MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
3139 bool isLocalStorage
= VD
->hasLocalStorage();
3141 bool NonGCable
= isLocalStorage
&&
3142 !VD
->getType()->isReferenceType() &&
3145 LV
.getQuals().removeObjCGCAttr();
3149 bool isImpreciseLifetime
=
3150 (isLocalStorage
&& !VD
->hasAttr
<ObjCPreciseLifetimeAttr
>());
3151 if (isImpreciseLifetime
)
3152 LV
.setARCPreciseLifetime(ARCImpreciseLifetime
);
3153 setObjCGCLValueClass(getContext(), E
, LV
);
3157 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
))
3158 return EmitFunctionDeclLValue(*this, E
, FD
);
3160 // FIXME: While we're emitting a binding from an enclosing scope, all other
3161 // DeclRefExprs we see should be implicitly treated as if they also refer to
3162 // an enclosing scope.
3163 if (const auto *BD
= dyn_cast
<BindingDecl
>(ND
)) {
3164 if (E
->refersToEnclosingVariableOrCapture()) {
3165 auto *FD
= LambdaCaptureFields
.lookup(BD
);
3166 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
3168 return EmitLValue(BD
->getBinding());
3171 // We can form DeclRefExprs naming GUID declarations when reconstituting
3172 // non-type template parameters into expressions.
3173 if (const auto *GD
= dyn_cast
<MSGuidDecl
>(ND
))
3174 return MakeAddrLValue(CGM
.GetAddrOfMSGuidDecl(GD
), T
,
3175 AlignmentSource::Decl
);
3177 if (const auto *TPO
= dyn_cast
<TemplateParamObjectDecl
>(ND
)) {
3178 auto ATPO
= CGM
.GetAddrOfTemplateParamObject(TPO
);
3179 auto AS
= getLangASFromTargetAS(ATPO
.getAddressSpace());
3181 if (AS
!= T
.getAddressSpace()) {
3182 auto TargetAS
= getContext().getTargetAddressSpace(T
.getAddressSpace());
3183 auto PtrTy
= ATPO
.getElementType()->getPointerTo(TargetAS
);
3184 auto ASC
= getTargetHooks().performAddrSpaceCast(
3185 CGM
, ATPO
.getPointer(), AS
, T
.getAddressSpace(), PtrTy
);
3186 ATPO
= ConstantAddress(ASC
, ATPO
.getElementType(), ATPO
.getAlignment());
3189 return MakeAddrLValue(ATPO
, T
, AlignmentSource::Decl
);
3192 llvm_unreachable("Unhandled DeclRefExpr");
3195 LValue
CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator
*E
) {
3196 // __extension__ doesn't affect lvalue-ness.
3197 if (E
->getOpcode() == UO_Extension
)
3198 return EmitLValue(E
->getSubExpr());
3200 QualType ExprTy
= getContext().getCanonicalType(E
->getSubExpr()->getType());
3201 switch (E
->getOpcode()) {
3202 default: llvm_unreachable("Unknown unary operator lvalue!");
3204 QualType T
= E
->getSubExpr()->getType()->getPointeeType();
3205 assert(!T
.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3207 LValueBaseInfo BaseInfo
;
3208 TBAAAccessInfo TBAAInfo
;
3209 Address Addr
= EmitPointerWithAlignment(E
->getSubExpr(), &BaseInfo
,
3211 LValue LV
= MakeAddrLValue(Addr
, T
, BaseInfo
, TBAAInfo
);
3212 LV
.getQuals().setAddressSpace(ExprTy
.getAddressSpace());
3214 // We should not generate __weak write barrier on indirect reference
3215 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3216 // But, we continue to generate __strong write barrier on indirect write
3217 // into a pointer to object.
3218 if (getLangOpts().ObjC
&&
3219 getLangOpts().getGC() != LangOptions::NonGC
&&
3221 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
3226 LValue LV
= EmitLValue(E
->getSubExpr());
3227 assert(LV
.isSimple() && "real/imag on non-ordinary l-value");
3229 // __real is valid on scalars. This is a faster way of testing that.
3230 // __imag can only produce an rvalue on scalars.
3231 if (E
->getOpcode() == UO_Real
&&
3232 !LV
.getAddress().getElementType()->isStructTy()) {
3233 assert(E
->getSubExpr()->getType()->isArithmeticType());
3237 QualType T
= ExprTy
->castAs
<ComplexType
>()->getElementType();
3240 (E
->getOpcode() == UO_Real
3241 ? emitAddrOfRealComponent(LV
.getAddress(), LV
.getType())
3242 : emitAddrOfImagComponent(LV
.getAddress(), LV
.getType()));
3243 LValue ElemLV
= MakeAddrLValue(Component
, T
, LV
.getBaseInfo(),
3244 CGM
.getTBAAInfoForSubobject(LV
, T
));
3245 ElemLV
.getQuals().addQualifiers(LV
.getQuals());
3250 LValue LV
= EmitLValue(E
->getSubExpr());
3251 bool isInc
= E
->getOpcode() == UO_PreInc
;
3253 if (E
->getType()->isAnyComplexType())
3254 EmitComplexPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3256 EmitScalarPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3262 LValue
CodeGenFunction::EmitStringLiteralLValue(const StringLiteral
*E
) {
3263 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromLiteral(E
),
3264 E
->getType(), AlignmentSource::Decl
);
3267 LValue
CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
) {
3268 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromObjCEncode(E
),
3269 E
->getType(), AlignmentSource::Decl
);
3272 LValue
CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr
*E
) {
3273 auto SL
= E
->getFunctionName();
3274 assert(SL
!= nullptr && "No StringLiteral name in PredefinedExpr");
3275 StringRef FnName
= CurFn
->getName();
3276 if (FnName
.starts_with("\01"))
3277 FnName
= FnName
.substr(1);
3278 StringRef NameItems
[] = {
3279 PredefinedExpr::getIdentKindName(E
->getIdentKind()), FnName
};
3280 std::string GVName
= llvm::join(NameItems
, NameItems
+ 2, ".");
3281 if (auto *BD
= dyn_cast_or_null
<BlockDecl
>(CurCodeDecl
)) {
3282 std::string Name
= std::string(SL
->getString());
3283 if (!Name
.empty()) {
3284 unsigned Discriminator
=
3285 CGM
.getCXXABI().getMangleContext().getBlockId(BD
, true);
3287 Name
+= "_" + Twine(Discriminator
+ 1).str();
3288 auto C
= CGM
.GetAddrOfConstantCString(Name
, GVName
.c_str());
3289 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3292 CGM
.GetAddrOfConstantCString(std::string(FnName
), GVName
.c_str());
3293 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3296 auto C
= CGM
.GetAddrOfConstantStringFromLiteral(SL
, GVName
);
3297 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3300 /// Emit a type description suitable for use by a runtime sanitizer library. The
3301 /// format of a type descriptor is
3304 /// { i16 TypeKind, i16 TypeInfo }
3307 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3308 /// integer, 1 for a floating point value, and -1 for anything else.
3309 llvm::Constant
*CodeGenFunction::EmitCheckTypeDescriptor(QualType T
) {
3310 // Only emit each type's descriptor once.
3311 if (llvm::Constant
*C
= CGM
.getTypeDescriptorFromMap(T
))
3314 uint16_t TypeKind
= -1;
3315 uint16_t TypeInfo
= 0;
3317 if (T
->isIntegerType()) {
3319 TypeInfo
= (llvm::Log2_32(getContext().getTypeSize(T
)) << 1) |
3320 (T
->isSignedIntegerType() ? 1 : 0);
3321 } else if (T
->isFloatingType()) {
3323 TypeInfo
= getContext().getTypeSize(T
);
3326 // Format the type name as if for a diagnostic, including quotes and
3327 // optionally an 'aka'.
3328 SmallString
<32> Buffer
;
3329 CGM
.getDiags().ConvertArgToString(
3330 DiagnosticsEngine::ak_qualtype
, (intptr_t)T
.getAsOpaquePtr(), StringRef(),
3331 StringRef(), std::nullopt
, Buffer
, std::nullopt
);
3333 llvm::Constant
*Components
[] = {
3334 Builder
.getInt16(TypeKind
), Builder
.getInt16(TypeInfo
),
3335 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer
)
3337 llvm::Constant
*Descriptor
= llvm::ConstantStruct::getAnon(Components
);
3339 auto *GV
= new llvm::GlobalVariable(
3340 CGM
.getModule(), Descriptor
->getType(),
3341 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
, Descriptor
);
3342 GV
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3343 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(GV
);
3345 // Remember the descriptor for this type.
3346 CGM
.setTypeDescriptorInMap(T
, GV
);
3351 llvm::Value
*CodeGenFunction::EmitCheckValue(llvm::Value
*V
) {
3352 llvm::Type
*TargetTy
= IntPtrTy
;
3354 if (V
->getType() == TargetTy
)
3357 // Floating-point types which fit into intptr_t are bitcast to integers
3358 // and then passed directly (after zero-extension, if necessary).
3359 if (V
->getType()->isFloatingPointTy()) {
3360 unsigned Bits
= V
->getType()->getPrimitiveSizeInBits().getFixedValue();
3361 if (Bits
<= TargetTy
->getIntegerBitWidth())
3362 V
= Builder
.CreateBitCast(V
, llvm::Type::getIntNTy(getLLVMContext(),
3366 // Integers which fit in intptr_t are zero-extended and passed directly.
3367 if (V
->getType()->isIntegerTy() &&
3368 V
->getType()->getIntegerBitWidth() <= TargetTy
->getIntegerBitWidth())
3369 return Builder
.CreateZExt(V
, TargetTy
);
3371 // Pointers are passed directly, everything else is passed by address.
3372 if (!V
->getType()->isPointerTy()) {
3373 RawAddress Ptr
= CreateDefaultAlignTempAlloca(V
->getType());
3374 Builder
.CreateStore(V
, Ptr
);
3375 V
= Ptr
.getPointer();
3377 return Builder
.CreatePtrToInt(V
, TargetTy
);
3380 /// Emit a representation of a SourceLocation for passing to a handler
3381 /// in a sanitizer runtime library. The format for this data is:
3383 /// struct SourceLocation {
3384 /// const char *Filename;
3385 /// int32_t Line, Column;
3388 /// For an invalid SourceLocation, the Filename pointer is null.
3389 llvm::Constant
*CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc
) {
3390 llvm::Constant
*Filename
;
3393 PresumedLoc PLoc
= getContext().getSourceManager().getPresumedLoc(Loc
);
3394 if (PLoc
.isValid()) {
3395 StringRef FilenameString
= PLoc
.getFilename();
3397 int PathComponentsToStrip
=
3398 CGM
.getCodeGenOpts().EmitCheckPathComponentsToStrip
;
3399 if (PathComponentsToStrip
< 0) {
3400 assert(PathComponentsToStrip
!= INT_MIN
);
3401 int PathComponentsToKeep
= -PathComponentsToStrip
;
3402 auto I
= llvm::sys::path::rbegin(FilenameString
);
3403 auto E
= llvm::sys::path::rend(FilenameString
);
3404 while (I
!= E
&& --PathComponentsToKeep
)
3407 FilenameString
= FilenameString
.substr(I
- E
);
3408 } else if (PathComponentsToStrip
> 0) {
3409 auto I
= llvm::sys::path::begin(FilenameString
);
3410 auto E
= llvm::sys::path::end(FilenameString
);
3411 while (I
!= E
&& PathComponentsToStrip
--)
3416 FilenameString
.substr(I
- llvm::sys::path::begin(FilenameString
));
3418 FilenameString
= llvm::sys::path::filename(FilenameString
);
3422 CGM
.GetAddrOfConstantCString(std::string(FilenameString
), ".src");
3423 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(
3424 cast
<llvm::GlobalVariable
>(
3425 FilenameGV
.getPointer()->stripPointerCasts()));
3426 Filename
= FilenameGV
.getPointer();
3427 Line
= PLoc
.getLine();
3428 Column
= PLoc
.getColumn();
3430 Filename
= llvm::Constant::getNullValue(Int8PtrTy
);
3434 llvm::Constant
*Data
[] = {Filename
, Builder
.getInt32(Line
),
3435 Builder
.getInt32(Column
)};
3437 return llvm::ConstantStruct::getAnon(Data
);
3441 /// Specify under what conditions this check can be recovered
3442 enum class CheckRecoverableKind
{
3443 /// Always terminate program execution if this check fails.
3445 /// Check supports recovering, runtime has both fatal (noreturn) and
3446 /// non-fatal handlers for this check.
3448 /// Runtime conditionally aborts, always need to support recovery.
3453 static CheckRecoverableKind
getRecoverableKind(SanitizerMask Kind
) {
3454 assert(Kind
.countPopulation() == 1);
3455 if (Kind
== SanitizerKind::Vptr
)
3456 return CheckRecoverableKind::AlwaysRecoverable
;
3457 else if (Kind
== SanitizerKind::Return
|| Kind
== SanitizerKind::Unreachable
)
3458 return CheckRecoverableKind::Unrecoverable
;
3460 return CheckRecoverableKind::Recoverable
;
3464 struct SanitizerHandlerInfo
{
3465 char const *const Name
;
3470 const SanitizerHandlerInfo SanitizerHandlers
[] = {
3471 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3472 LIST_SANITIZER_CHECKS
3473 #undef SANITIZER_CHECK
3476 static void emitCheckHandlerCall(CodeGenFunction
&CGF
,
3477 llvm::FunctionType
*FnType
,
3478 ArrayRef
<llvm::Value
*> FnArgs
,
3479 SanitizerHandler CheckHandler
,
3480 CheckRecoverableKind RecoverKind
, bool IsFatal
,
3481 llvm::BasicBlock
*ContBB
) {
3482 assert(IsFatal
|| RecoverKind
!= CheckRecoverableKind::Unrecoverable
);
3483 std::optional
<ApplyDebugLocation
> DL
;
3484 if (!CGF
.Builder
.getCurrentDebugLocation()) {
3485 // Ensure that the call has at least an artificial debug location.
3486 DL
.emplace(CGF
, SourceLocation());
3488 bool NeedsAbortSuffix
=
3489 IsFatal
&& RecoverKind
!= CheckRecoverableKind::Unrecoverable
;
3490 bool MinimalRuntime
= CGF
.CGM
.getCodeGenOpts().SanitizeMinimalRuntime
;
3491 const SanitizerHandlerInfo
&CheckInfo
= SanitizerHandlers
[CheckHandler
];
3492 const StringRef CheckName
= CheckInfo
.Name
;
3493 std::string FnName
= "__ubsan_handle_" + CheckName
.str();
3494 if (CheckInfo
.Version
&& !MinimalRuntime
)
3495 FnName
+= "_v" + llvm::utostr(CheckInfo
.Version
);
3497 FnName
+= "_minimal";
3498 if (NeedsAbortSuffix
)
3501 !IsFatal
|| RecoverKind
== CheckRecoverableKind::AlwaysRecoverable
;
3503 llvm::AttrBuilder
B(CGF
.getLLVMContext());
3505 B
.addAttribute(llvm::Attribute::NoReturn
)
3506 .addAttribute(llvm::Attribute::NoUnwind
);
3508 B
.addUWTableAttr(llvm::UWTableKind::Default
);
3510 llvm::FunctionCallee Fn
= CGF
.CGM
.CreateRuntimeFunction(
3512 llvm::AttributeList::get(CGF
.getLLVMContext(),
3513 llvm::AttributeList::FunctionIndex
, B
),
3515 llvm::CallInst
*HandlerCall
= CGF
.EmitNounwindRuntimeCall(Fn
, FnArgs
);
3517 HandlerCall
->setDoesNotReturn();
3518 CGF
.Builder
.CreateUnreachable();
3520 CGF
.Builder
.CreateBr(ContBB
);
3524 void CodeGenFunction::EmitCheck(
3525 ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
3526 SanitizerHandler CheckHandler
, ArrayRef
<llvm::Constant
*> StaticArgs
,
3527 ArrayRef
<llvm::Value
*> DynamicArgs
) {
3528 assert(IsSanitizerScope
);
3529 assert(Checked
.size() > 0);
3530 assert(CheckHandler
>= 0 &&
3531 size_t(CheckHandler
) < std::size(SanitizerHandlers
));
3532 const StringRef CheckName
= SanitizerHandlers
[CheckHandler
].Name
;
3534 llvm::Value
*FatalCond
= nullptr;
3535 llvm::Value
*RecoverableCond
= nullptr;
3536 llvm::Value
*TrapCond
= nullptr;
3537 for (int i
= 0, n
= Checked
.size(); i
< n
; ++i
) {
3538 llvm::Value
*Check
= Checked
[i
].first
;
3539 // -fsanitize-trap= overrides -fsanitize-recover=.
3540 llvm::Value
*&Cond
=
3541 CGM
.getCodeGenOpts().SanitizeTrap
.has(Checked
[i
].second
)
3543 : CGM
.getCodeGenOpts().SanitizeRecover
.has(Checked
[i
].second
)
3546 Cond
= Cond
? Builder
.CreateAnd(Cond
, Check
) : Check
;
3549 if (ClSanitizeGuardChecks
) {
3550 llvm::Value
*Allow
=
3551 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::allow_ubsan_check
),
3552 llvm::ConstantInt::get(CGM
.Int8Ty
, CheckHandler
));
3554 for (llvm::Value
**Cond
: {&FatalCond
, &RecoverableCond
, &TrapCond
}) {
3556 *Cond
= Builder
.CreateOr(*Cond
, Builder
.CreateNot(Allow
));
3561 EmitTrapCheck(TrapCond
, CheckHandler
);
3562 if (!FatalCond
&& !RecoverableCond
)
3565 llvm::Value
*JointCond
;
3566 if (FatalCond
&& RecoverableCond
)
3567 JointCond
= Builder
.CreateAnd(FatalCond
, RecoverableCond
);
3569 JointCond
= FatalCond
? FatalCond
: RecoverableCond
;
3572 CheckRecoverableKind RecoverKind
= getRecoverableKind(Checked
[0].second
);
3573 assert(SanOpts
.has(Checked
[0].second
));
3575 for (int i
= 1, n
= Checked
.size(); i
< n
; ++i
) {
3576 assert(RecoverKind
== getRecoverableKind(Checked
[i
].second
) &&
3577 "All recoverable kinds in a single check must be same!");
3578 assert(SanOpts
.has(Checked
[i
].second
));
3582 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3583 llvm::BasicBlock
*Handlers
= createBasicBlock("handler." + CheckName
);
3584 llvm::Instruction
*Branch
= Builder
.CreateCondBr(JointCond
, Cont
, Handlers
);
3585 // Give hint that we very much don't expect to execute the handler
3586 llvm::MDBuilder
MDHelper(getLLVMContext());
3587 llvm::MDNode
*Node
= MDHelper
.createLikelyBranchWeights();
3588 Branch
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3589 EmitBlock(Handlers
);
3591 // Handler functions take an i8* pointing to the (handler-specific) static
3592 // information block, followed by a sequence of intptr_t arguments
3593 // representing operand values.
3594 SmallVector
<llvm::Value
*, 4> Args
;
3595 SmallVector
<llvm::Type
*, 4> ArgTypes
;
3596 if (!CGM
.getCodeGenOpts().SanitizeMinimalRuntime
) {
3597 Args
.reserve(DynamicArgs
.size() + 1);
3598 ArgTypes
.reserve(DynamicArgs
.size() + 1);
3600 // Emit handler arguments and create handler function type.
3601 if (!StaticArgs
.empty()) {
3602 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3603 auto *InfoPtr
= new llvm::GlobalVariable(
3604 CGM
.getModule(), Info
->getType(), false,
3605 llvm::GlobalVariable::PrivateLinkage
, Info
, "", nullptr,
3606 llvm::GlobalVariable::NotThreadLocal
,
3607 CGM
.getDataLayout().getDefaultGlobalsAddressSpace());
3608 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3609 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3610 Args
.push_back(InfoPtr
);
3611 ArgTypes
.push_back(Args
.back()->getType());
3614 for (size_t i
= 0, n
= DynamicArgs
.size(); i
!= n
; ++i
) {
3615 Args
.push_back(EmitCheckValue(DynamicArgs
[i
]));
3616 ArgTypes
.push_back(IntPtrTy
);
3620 llvm::FunctionType
*FnType
=
3621 llvm::FunctionType::get(CGM
.VoidTy
, ArgTypes
, false);
3623 if (!FatalCond
|| !RecoverableCond
) {
3624 // Simple case: we need to generate a single handler call, either
3625 // fatal, or non-fatal.
3626 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
,
3627 (FatalCond
!= nullptr), Cont
);
3629 // Emit two handler calls: first one for set of unrecoverable checks,
3630 // another one for recoverable.
3631 llvm::BasicBlock
*NonFatalHandlerBB
=
3632 createBasicBlock("non_fatal." + CheckName
);
3633 llvm::BasicBlock
*FatalHandlerBB
= createBasicBlock("fatal." + CheckName
);
3634 Builder
.CreateCondBr(FatalCond
, NonFatalHandlerBB
, FatalHandlerBB
);
3635 EmitBlock(FatalHandlerBB
);
3636 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, true,
3638 EmitBlock(NonFatalHandlerBB
);
3639 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, false,
3646 void CodeGenFunction::EmitCfiSlowPathCheck(
3647 SanitizerMask Kind
, llvm::Value
*Cond
, llvm::ConstantInt
*TypeId
,
3648 llvm::Value
*Ptr
, ArrayRef
<llvm::Constant
*> StaticArgs
) {
3649 llvm::BasicBlock
*Cont
= createBasicBlock("cfi.cont");
3651 llvm::BasicBlock
*CheckBB
= createBasicBlock("cfi.slowpath");
3652 llvm::BranchInst
*BI
= Builder
.CreateCondBr(Cond
, Cont
, CheckBB
);
3654 llvm::MDBuilder
MDHelper(getLLVMContext());
3655 llvm::MDNode
*Node
= MDHelper
.createLikelyBranchWeights();
3656 BI
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3660 bool WithDiag
= !CGM
.getCodeGenOpts().SanitizeTrap
.has(Kind
);
3662 llvm::CallInst
*CheckCall
;
3663 llvm::FunctionCallee SlowPathFn
;
3665 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3667 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3668 llvm::GlobalVariable::PrivateLinkage
, Info
);
3669 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3670 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3672 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3673 "__cfi_slowpath_diag",
3674 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
},
3676 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
, InfoPtr
});
3678 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3680 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
}, false));
3681 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
});
3685 cast
<llvm::GlobalValue
>(SlowPathFn
.getCallee()->stripPointerCasts()));
3686 CheckCall
->setDoesNotThrow();
3691 // Emit a stub for __cfi_check function so that the linker knows about this
3692 // symbol in LTO mode.
3693 void CodeGenFunction::EmitCfiCheckStub() {
3694 llvm::Module
*M
= &CGM
.getModule();
3695 ASTContext
&C
= getContext();
3696 QualType QInt64Ty
= C
.getIntTypeForBitwidth(64, false);
3698 FunctionArgList FnArgs
;
3699 ImplicitParamDecl
ArgCallsiteTypeId(C
, QInt64Ty
, ImplicitParamKind::Other
);
3700 ImplicitParamDecl
ArgAddr(C
, C
.VoidPtrTy
, ImplicitParamKind::Other
);
3701 ImplicitParamDecl
ArgCFICheckFailData(C
, C
.VoidPtrTy
,
3702 ImplicitParamKind::Other
);
3703 FnArgs
.push_back(&ArgCallsiteTypeId
);
3704 FnArgs
.push_back(&ArgAddr
);
3705 FnArgs
.push_back(&ArgCFICheckFailData
);
3706 const CGFunctionInfo
&FI
=
3707 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(C
.VoidTy
, FnArgs
);
3709 llvm::Function
*F
= llvm::Function::Create(
3710 llvm::FunctionType::get(VoidTy
, {Int64Ty
, VoidPtrTy
, VoidPtrTy
}, false),
3711 llvm::GlobalValue::WeakAnyLinkage
, "__cfi_check", M
);
3712 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, F
, /*IsThunk=*/false);
3713 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, F
);
3714 F
->setAlignment(llvm::Align(4096));
3717 llvm::LLVMContext
&Ctx
= M
->getContext();
3718 llvm::BasicBlock
*BB
= llvm::BasicBlock::Create(Ctx
, "entry", F
);
3719 // CrossDSOCFI pass is not executed if there is no executable code.
3720 SmallVector
<llvm::Value
*> Args
{F
->getArg(2), F
->getArg(1)};
3721 llvm::CallInst::Create(M
->getFunction("__cfi_check_fail"), Args
, "", BB
);
3722 llvm::ReturnInst::Create(Ctx
, nullptr, BB
);
3725 // This function is basically a switch over the CFI failure kind, which is
3726 // extracted from CFICheckFailData (1st function argument). Each case is either
3727 // llvm.trap or a call to one of the two runtime handlers, based on
3728 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3729 // failure kind) traps, but this should really never happen. CFICheckFailData
3730 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3731 // check kind; in this case __cfi_check_fail traps as well.
3732 void CodeGenFunction::EmitCfiCheckFail() {
3733 SanitizerScope
SanScope(this);
3734 FunctionArgList Args
;
3735 ImplicitParamDecl
ArgData(getContext(), getContext().VoidPtrTy
,
3736 ImplicitParamKind::Other
);
3737 ImplicitParamDecl
ArgAddr(getContext(), getContext().VoidPtrTy
,
3738 ImplicitParamKind::Other
);
3739 Args
.push_back(&ArgData
);
3740 Args
.push_back(&ArgAddr
);
3742 const CGFunctionInfo
&FI
=
3743 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy
, Args
);
3745 llvm::Function
*F
= llvm::Function::Create(
3746 llvm::FunctionType::get(VoidTy
, {VoidPtrTy
, VoidPtrTy
}, false),
3747 llvm::GlobalValue::WeakODRLinkage
, "__cfi_check_fail", &CGM
.getModule());
3749 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, F
, /*IsThunk=*/false);
3750 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, F
);
3751 F
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
3753 StartFunction(GlobalDecl(), CGM
.getContext().VoidTy
, F
, FI
, Args
,
3756 // This function is not affected by NoSanitizeList. This function does
3757 // not have a source location, but "src:*" would still apply. Revert any
3758 // changes to SanOpts made in StartFunction.
3759 SanOpts
= CGM
.getLangOpts().Sanitize
;
3762 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData
), /*Volatile=*/false,
3763 CGM
.getContext().VoidPtrTy
, ArgData
.getLocation());
3765 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr
), /*Volatile=*/false,
3766 CGM
.getContext().VoidPtrTy
, ArgAddr
.getLocation());
3768 // Data == nullptr means the calling module has trap behaviour for this check.
3769 llvm::Value
*DataIsNotNullPtr
=
3770 Builder
.CreateICmpNE(Data
, llvm::ConstantPointerNull::get(Int8PtrTy
));
3771 EmitTrapCheck(DataIsNotNullPtr
, SanitizerHandler::CFICheckFail
);
3773 llvm::StructType
*SourceLocationTy
=
3774 llvm::StructType::get(VoidPtrTy
, Int32Ty
, Int32Ty
);
3775 llvm::StructType
*CfiCheckFailDataTy
=
3776 llvm::StructType::get(Int8Ty
, SourceLocationTy
, VoidPtrTy
);
3778 llvm::Value
*V
= Builder
.CreateConstGEP2_32(
3780 Builder
.CreatePointerCast(Data
, CfiCheckFailDataTy
->getPointerTo(0)), 0,
3783 Address
CheckKindAddr(V
, Int8Ty
, getIntAlign());
3784 llvm::Value
*CheckKind
= Builder
.CreateLoad(CheckKindAddr
);
3786 llvm::Value
*AllVtables
= llvm::MetadataAsValue::get(
3787 CGM
.getLLVMContext(),
3788 llvm::MDString::get(CGM
.getLLVMContext(), "all-vtables"));
3789 llvm::Value
*ValidVtable
= Builder
.CreateZExt(
3790 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::type_test
),
3791 {Addr
, AllVtables
}),
3794 const std::pair
<int, SanitizerMask
> CheckKinds
[] = {
3795 {CFITCK_VCall
, SanitizerKind::CFIVCall
},
3796 {CFITCK_NVCall
, SanitizerKind::CFINVCall
},
3797 {CFITCK_DerivedCast
, SanitizerKind::CFIDerivedCast
},
3798 {CFITCK_UnrelatedCast
, SanitizerKind::CFIUnrelatedCast
},
3799 {CFITCK_ICall
, SanitizerKind::CFIICall
}};
3801 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 5> Checks
;
3802 for (auto CheckKindMaskPair
: CheckKinds
) {
3803 int Kind
= CheckKindMaskPair
.first
;
3804 SanitizerMask Mask
= CheckKindMaskPair
.second
;
3806 Builder
.CreateICmpNE(CheckKind
, llvm::ConstantInt::get(Int8Ty
, Kind
));
3807 if (CGM
.getLangOpts().Sanitize
.has(Mask
))
3808 EmitCheck(std::make_pair(Cond
, Mask
), SanitizerHandler::CFICheckFail
, {},
3809 {Data
, Addr
, ValidVtable
});
3811 EmitTrapCheck(Cond
, SanitizerHandler::CFICheckFail
);
3815 // The only reference to this function will be created during LTO link.
3816 // Make sure it survives until then.
3817 CGM
.addUsedGlobal(F
);
3820 void CodeGenFunction::EmitUnreachable(SourceLocation Loc
) {
3821 if (SanOpts
.has(SanitizerKind::Unreachable
)) {
3822 SanitizerScope
SanScope(this);
3823 EmitCheck(std::make_pair(static_cast<llvm::Value
*>(Builder
.getFalse()),
3824 SanitizerKind::Unreachable
),
3825 SanitizerHandler::BuiltinUnreachable
,
3826 EmitCheckSourceLocation(Loc
), std::nullopt
);
3828 Builder
.CreateUnreachable();
3831 void CodeGenFunction::EmitTrapCheck(llvm::Value
*Checked
,
3832 SanitizerHandler CheckHandlerID
) {
3833 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3835 // If we're optimizing, collapse all calls to trap down to just one per
3836 // check-type per function to save on code size.
3837 if ((int)TrapBBs
.size() <= CheckHandlerID
)
3838 TrapBBs
.resize(CheckHandlerID
+ 1);
3840 llvm::BasicBlock
*&TrapBB
= TrapBBs
[CheckHandlerID
];
3842 if (!ClSanitizeDebugDeoptimization
&&
3843 CGM
.getCodeGenOpts().OptimizationLevel
&& TrapBB
&&
3844 (!CurCodeDecl
|| !CurCodeDecl
->hasAttr
<OptimizeNoneAttr
>())) {
3845 auto Call
= TrapBB
->begin();
3846 assert(isa
<llvm::CallInst
>(Call
) && "Expected call in trap BB");
3848 Call
->applyMergedLocation(Call
->getDebugLoc(),
3849 Builder
.getCurrentDebugLocation());
3850 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3852 TrapBB
= createBasicBlock("trap");
3853 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3856 llvm::CallInst
*TrapCall
= Builder
.CreateCall(
3857 CGM
.getIntrinsic(llvm::Intrinsic::ubsantrap
),
3858 llvm::ConstantInt::get(CGM
.Int8Ty
,
3859 ClSanitizeDebugDeoptimization
3860 ? TrapBB
->getParent()->size()
3861 : static_cast<uint64_t>(CheckHandlerID
)));
3863 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3864 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3865 CGM
.getCodeGenOpts().TrapFuncName
);
3866 TrapCall
->addFnAttr(A
);
3868 TrapCall
->setDoesNotReturn();
3869 TrapCall
->setDoesNotThrow();
3870 Builder
.CreateUnreachable();
3876 llvm::CallInst
*CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID
) {
3877 llvm::CallInst
*TrapCall
=
3878 Builder
.CreateCall(CGM
.getIntrinsic(IntrID
));
3880 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3881 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3882 CGM
.getCodeGenOpts().TrapFuncName
);
3883 TrapCall
->addFnAttr(A
);
3889 Address
CodeGenFunction::EmitArrayToPointerDecay(const Expr
*E
,
3890 LValueBaseInfo
*BaseInfo
,
3891 TBAAAccessInfo
*TBAAInfo
) {
3892 assert(E
->getType()->isArrayType() &&
3893 "Array to pointer decay must have array source type!");
3895 // Expressions of array type can't be bitfields or vector elements.
3896 LValue LV
= EmitLValue(E
);
3897 Address Addr
= LV
.getAddress();
3899 // If the array type was an incomplete type, we need to make sure
3900 // the decay ends up being the right type.
3901 llvm::Type
*NewTy
= ConvertType(E
->getType());
3902 Addr
= Addr
.withElementType(NewTy
);
3904 // Note that VLA pointers are always decayed, so we don't need to do
3906 if (!E
->getType()->isVariableArrayType()) {
3907 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3908 "Expected pointer to array");
3909 Addr
= Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
3912 // The result of this decay conversion points to an array element within the
3913 // base lvalue. However, since TBAA currently does not support representing
3914 // accesses to elements of member arrays, we conservatively represent accesses
3915 // to the pointee object as if it had no any base lvalue specified.
3916 // TODO: Support TBAA for member arrays.
3917 QualType EltType
= E
->getType()->castAsArrayTypeUnsafe()->getElementType();
3918 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
3919 if (TBAAInfo
) *TBAAInfo
= CGM
.getTBAAAccessInfo(EltType
);
3921 return Addr
.withElementType(ConvertTypeForMem(EltType
));
3924 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3925 /// array to pointer, return the array subexpression.
3926 static const Expr
*isSimpleArrayDecayOperand(const Expr
*E
) {
3927 // If this isn't just an array->pointer decay, bail out.
3928 const auto *CE
= dyn_cast
<CastExpr
>(E
);
3929 if (!CE
|| CE
->getCastKind() != CK_ArrayToPointerDecay
)
3932 // If this is a decay from variable width array, bail out.
3933 const Expr
*SubExpr
= CE
->getSubExpr();
3934 if (SubExpr
->getType()->isVariableArrayType())
3940 static llvm::Value
*emitArraySubscriptGEP(CodeGenFunction
&CGF
,
3941 llvm::Type
*elemType
,
3943 ArrayRef
<llvm::Value
*> indices
,
3947 const llvm::Twine
&name
= "arrayidx") {
3949 return CGF
.EmitCheckedInBoundsGEP(elemType
, ptr
, indices
, signedIndices
,
3950 CodeGenFunction::NotSubtraction
, loc
,
3953 return CGF
.Builder
.CreateGEP(elemType
, ptr
, indices
, name
);
3957 static Address
emitArraySubscriptGEP(CodeGenFunction
&CGF
, Address addr
,
3958 ArrayRef
<llvm::Value
*> indices
,
3959 llvm::Type
*elementType
, bool inbounds
,
3960 bool signedIndices
, SourceLocation loc
,
3962 const llvm::Twine
&name
= "arrayidx") {
3964 return CGF
.EmitCheckedInBoundsGEP(addr
, indices
, elementType
, signedIndices
,
3965 CodeGenFunction::NotSubtraction
, loc
,
3968 return CGF
.Builder
.CreateGEP(addr
, indices
, elementType
, align
, name
);
3972 static CharUnits
getArrayElementAlign(CharUnits arrayAlign
,
3974 CharUnits eltSize
) {
3975 // If we have a constant index, we can use the exact offset of the
3976 // element we're accessing.
3977 if (auto constantIdx
= dyn_cast
<llvm::ConstantInt
>(idx
)) {
3978 CharUnits offset
= constantIdx
->getZExtValue() * eltSize
;
3979 return arrayAlign
.alignmentAtOffset(offset
);
3981 // Otherwise, use the worst-case alignment for any element.
3983 return arrayAlign
.alignmentOfArrayElement(eltSize
);
3987 static QualType
getFixedSizeElementType(const ASTContext
&ctx
,
3988 const VariableArrayType
*vla
) {
3991 eltType
= vla
->getElementType();
3992 } while ((vla
= ctx
.getAsVariableArrayType(eltType
)));
3996 static bool hasBPFPreserveStaticOffset(const RecordDecl
*D
) {
3997 return D
&& D
->hasAttr
<BPFPreserveStaticOffsetAttr
>();
4000 static bool hasBPFPreserveStaticOffset(const Expr
*E
) {
4003 QualType PointeeType
= E
->getType()->getPointeeType();
4004 if (PointeeType
.isNull())
4006 if (const auto *BaseDecl
= PointeeType
->getAsRecordDecl())
4007 return hasBPFPreserveStaticOffset(BaseDecl
);
4011 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4012 static Address
wrapWithBPFPreserveStaticOffset(CodeGenFunction
&CGF
,
4014 if (!CGF
.getTarget().getTriple().isBPF())
4017 llvm::Function
*Fn
=
4018 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::preserve_static_offset
);
4019 llvm::CallInst
*Call
= CGF
.Builder
.CreateCall(Fn
, {Addr
.emitRawPointer(CGF
)});
4020 return Address(Call
, Addr
.getElementType(), Addr
.getAlignment());
4023 /// Given an array base, check whether its member access belongs to a record
4024 /// with preserve_access_index attribute or not.
4025 static bool IsPreserveAIArrayBase(CodeGenFunction
&CGF
, const Expr
*ArrayBase
) {
4026 if (!ArrayBase
|| !CGF
.getDebugInfo())
4029 // Only support base as either a MemberExpr or DeclRefExpr.
4030 // DeclRefExpr to cover cases like:
4031 // struct s { int a; int b[10]; };
4034 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4035 // p->b[5] is a MemberExpr example.
4036 const Expr
*E
= ArrayBase
->IgnoreImpCasts();
4037 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
))
4038 return ME
->getMemberDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
4040 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
4041 const auto *VarDef
= dyn_cast
<VarDecl
>(DRE
->getDecl());
4045 const auto *PtrT
= VarDef
->getType()->getAs
<PointerType
>();
4049 const auto *PointeeT
= PtrT
->getPointeeType()
4050 ->getUnqualifiedDesugaredType();
4051 if (const auto *RecT
= dyn_cast
<RecordType
>(PointeeT
))
4052 return RecT
->getDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
4059 static Address
emitArraySubscriptGEP(CodeGenFunction
&CGF
, Address addr
,
4060 ArrayRef
<llvm::Value
*> indices
,
4061 QualType eltType
, bool inbounds
,
4062 bool signedIndices
, SourceLocation loc
,
4063 QualType
*arrayType
= nullptr,
4064 const Expr
*Base
= nullptr,
4065 const llvm::Twine
&name
= "arrayidx") {
4066 // All the indices except that last must be zero.
4068 for (auto *idx
: indices
.drop_back())
4069 assert(isa
<llvm::ConstantInt
>(idx
) &&
4070 cast
<llvm::ConstantInt
>(idx
)->isZero());
4073 // Determine the element size of the statically-sized base. This is
4074 // the thing that the indices are expressed in terms of.
4075 if (auto vla
= CGF
.getContext().getAsVariableArrayType(eltType
)) {
4076 eltType
= getFixedSizeElementType(CGF
.getContext(), vla
);
4079 // We can use that to compute the best alignment of the element.
4080 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(eltType
);
4081 CharUnits eltAlign
=
4082 getArrayElementAlign(addr
.getAlignment(), indices
.back(), eltSize
);
4084 if (hasBPFPreserveStaticOffset(Base
))
4085 addr
= wrapWithBPFPreserveStaticOffset(CGF
, addr
);
4087 llvm::Value
*eltPtr
;
4088 auto LastIndex
= dyn_cast
<llvm::ConstantInt
>(indices
.back());
4090 (!CGF
.IsInPreservedAIRegion
&& !IsPreserveAIArrayBase(CGF
, Base
))) {
4091 addr
= emitArraySubscriptGEP(CGF
, addr
, indices
,
4092 CGF
.ConvertTypeForMem(eltType
), inbounds
,
4093 signedIndices
, loc
, eltAlign
, name
);
4096 // Remember the original array subscript for bpf target
4097 unsigned idx
= LastIndex
->getZExtValue();
4098 llvm::DIType
*DbgInfo
= nullptr;
4100 DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(*arrayType
, loc
);
4101 eltPtr
= CGF
.Builder
.CreatePreserveArrayAccessIndex(
4102 addr
.getElementType(), addr
.emitRawPointer(CGF
), indices
.size() - 1,
4106 return Address(eltPtr
, CGF
.ConvertTypeForMem(eltType
), eltAlign
);
4109 /// The offset of a field from the beginning of the record.
4110 static bool getFieldOffsetInBits(CodeGenFunction
&CGF
, const RecordDecl
*RD
,
4111 const FieldDecl
*FD
, int64_t &Offset
) {
4112 ASTContext
&Ctx
= CGF
.getContext();
4113 const ASTRecordLayout
&Layout
= Ctx
.getASTRecordLayout(RD
);
4114 unsigned FieldNo
= 0;
4116 for (const Decl
*D
: RD
->decls()) {
4117 if (const auto *Record
= dyn_cast
<RecordDecl
>(D
))
4118 if (getFieldOffsetInBits(CGF
, Record
, FD
, Offset
)) {
4119 Offset
+= Layout
.getFieldOffset(FieldNo
);
4123 if (const auto *Field
= dyn_cast
<FieldDecl
>(D
))
4125 Offset
+= Layout
.getFieldOffset(FieldNo
);
4129 if (isa
<FieldDecl
>(D
))
4136 /// Returns the relative offset difference between \p FD1 and \p FD2.
4138 /// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4140 /// Both fields must be within the same struct.
4141 static std::optional
<int64_t> getOffsetDifferenceInBits(CodeGenFunction
&CGF
,
4142 const FieldDecl
*FD1
,
4143 const FieldDecl
*FD2
) {
4144 const RecordDecl
*FD1OuterRec
=
4145 FD1
->getParent()->getOuterLexicalRecordContext();
4146 const RecordDecl
*FD2OuterRec
=
4147 FD2
->getParent()->getOuterLexicalRecordContext();
4149 if (FD1OuterRec
!= FD2OuterRec
)
4150 // Fields must be within the same RecordDecl.
4151 return std::optional
<int64_t>();
4153 int64_t FD1Offset
= 0;
4154 if (!getFieldOffsetInBits(CGF
, FD1OuterRec
, FD1
, FD1Offset
))
4155 return std::optional
<int64_t>();
4157 int64_t FD2Offset
= 0;
4158 if (!getFieldOffsetInBits(CGF
, FD2OuterRec
, FD2
, FD2Offset
))
4159 return std::optional
<int64_t>();
4161 return std::make_optional
<int64_t>(FD1Offset
- FD2Offset
);
4164 LValue
CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
4166 // The index must always be an integer, which is not an aggregate. Emit it
4167 // in lexical order (this complexity is, sadly, required by C++17).
4168 llvm::Value
*IdxPre
=
4169 (E
->getLHS() == E
->getIdx()) ? EmitScalarExpr(E
->getIdx()) : nullptr;
4170 bool SignedIndices
= false;
4171 auto EmitIdxAfterBase
= [&, IdxPre
](bool Promote
) -> llvm::Value
* {
4173 if (E
->getLHS() != E
->getIdx()) {
4174 assert(E
->getRHS() == E
->getIdx() && "index was neither LHS nor RHS");
4175 Idx
= EmitScalarExpr(E
->getIdx());
4178 QualType IdxTy
= E
->getIdx()->getType();
4179 bool IdxSigned
= IdxTy
->isSignedIntegerOrEnumerationType();
4180 SignedIndices
|= IdxSigned
;
4182 if (SanOpts
.has(SanitizerKind::ArrayBounds
))
4183 EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, Accessed
);
4185 // Extend or truncate the index type to 32 or 64-bits.
4186 if (Promote
&& Idx
->getType() != IntPtrTy
)
4187 Idx
= Builder
.CreateIntCast(Idx
, IntPtrTy
, IdxSigned
, "idxprom");
4193 // If the base is a vector type, then we are forming a vector element lvalue
4194 // with this subscript.
4195 if (E
->getBase()->getType()->isSubscriptableVectorType() &&
4196 !isa
<ExtVectorElementExpr
>(E
->getBase())) {
4197 // Emit the vector as an lvalue to get its address.
4198 LValue LHS
= EmitLValue(E
->getBase());
4199 auto *Idx
= EmitIdxAfterBase(/*Promote*/false);
4200 assert(LHS
.isSimple() && "Can only subscript lvalue vectors here!");
4201 return LValue::MakeVectorElt(LHS
.getAddress(), Idx
, E
->getBase()->getType(),
4202 LHS
.getBaseInfo(), TBAAAccessInfo());
4205 // All the other cases basically behave like simple offsetting.
4207 // Handle the extvector case we ignored above.
4208 if (isa
<ExtVectorElementExpr
>(E
->getBase())) {
4209 LValue LV
= EmitLValue(E
->getBase());
4210 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
4211 Address Addr
= EmitExtVectorElementLValue(LV
);
4213 QualType EltType
= LV
.getType()->castAs
<VectorType
>()->getElementType();
4214 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, EltType
, /*inbounds*/ true,
4215 SignedIndices
, E
->getExprLoc());
4216 return MakeAddrLValue(Addr
, EltType
, LV
.getBaseInfo(),
4217 CGM
.getTBAAInfoForSubobject(LV
, EltType
));
4220 LValueBaseInfo EltBaseInfo
;
4221 TBAAAccessInfo EltTBAAInfo
;
4222 Address Addr
= Address::invalid();
4223 if (const VariableArrayType
*vla
=
4224 getContext().getAsVariableArrayType(E
->getType())) {
4225 // The base must be a pointer, which is not an aggregate. Emit
4226 // it. It needs to be emitted first in case it's what captures
4228 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
4229 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
4231 // The element count here is the total number of non-VLA elements.
4232 llvm::Value
*numElements
= getVLASize(vla
).NumElts
;
4234 // Effectively, the multiply by the VLA size is part of the GEP.
4235 // GEP indexes are signed, and scaling an index isn't permitted to
4236 // signed-overflow, so we use the same semantics for our explicit
4237 // multiply. We suppress this if overflow is not undefined behavior.
4238 if (getLangOpts().isSignedOverflowDefined()) {
4239 Idx
= Builder
.CreateMul(Idx
, numElements
);
4241 Idx
= Builder
.CreateNSWMul(Idx
, numElements
);
4244 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, vla
->getElementType(),
4245 !getLangOpts().isSignedOverflowDefined(),
4246 SignedIndices
, E
->getExprLoc());
4248 } else if (const ObjCObjectType
*OIT
= E
->getType()->getAs
<ObjCObjectType
>()){
4249 // Indexing over an interface, as in "NSString *P; P[4];"
4251 // Emit the base pointer.
4252 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
4253 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
4255 CharUnits InterfaceSize
= getContext().getTypeSizeInChars(OIT
);
4256 llvm::Value
*InterfaceSizeVal
=
4257 llvm::ConstantInt::get(Idx
->getType(), InterfaceSize
.getQuantity());
4259 llvm::Value
*ScaledIdx
= Builder
.CreateMul(Idx
, InterfaceSizeVal
);
4261 // We don't necessarily build correct LLVM struct types for ObjC
4262 // interfaces, so we can't rely on GEP to do this scaling
4263 // correctly, so we need to cast to i8*. FIXME: is this actually
4264 // true? A lot of other things in the fragile ABI would break...
4265 llvm::Type
*OrigBaseElemTy
= Addr
.getElementType();
4268 CharUnits EltAlign
=
4269 getArrayElementAlign(Addr
.getAlignment(), Idx
, InterfaceSize
);
4270 llvm::Value
*EltPtr
=
4271 emitArraySubscriptGEP(*this, Int8Ty
, Addr
.emitRawPointer(*this),
4272 ScaledIdx
, false, SignedIndices
, E
->getExprLoc());
4273 Addr
= Address(EltPtr
, OrigBaseElemTy
, EltAlign
);
4274 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
4275 // If this is A[i] where A is an array, the frontend will have decayed the
4276 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4277 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4278 // "gep x, i" here. Emit one "gep A, 0, i".
4279 assert(Array
->getType()->isArrayType() &&
4280 "Array to pointer decay must have array source type!");
4282 // For simple multidimensional array indexing, set the 'accessed' flag for
4283 // better bounds-checking of the base expression.
4284 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
4285 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
4287 ArrayLV
= EmitLValue(Array
);
4288 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
4290 if (SanOpts
.has(SanitizerKind::ArrayBounds
)) {
4291 // If the array being accessed has a "counted_by" attribute, generate
4292 // bounds checking code. The "count" field is at the top level of the
4293 // struct or in an anonymous struct, that's also at the top level. Future
4294 // expansions may allow the "count" to reside at any place in the struct,
4295 // but the value of "counted_by" will be a "simple" path to the count,
4296 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4297 // similar to emit the correct GEP.
4298 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
=
4299 getLangOpts().getStrictFlexArraysLevel();
4301 if (const auto *ME
= dyn_cast
<MemberExpr
>(Array
);
4303 ME
->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel
) &&
4304 ME
->getMemberDecl()->getType()->isCountAttributedType()) {
4305 const FieldDecl
*FAMDecl
= dyn_cast
<FieldDecl
>(ME
->getMemberDecl());
4306 if (const FieldDecl
*CountFD
= FindCountedByField(FAMDecl
)) {
4307 if (std::optional
<int64_t> Diff
=
4308 getOffsetDifferenceInBits(*this, CountFD
, FAMDecl
)) {
4309 CharUnits OffsetDiff
= CGM
.getContext().toCharUnitsFromBits(*Diff
);
4311 // Create a GEP with a byte offset between the FAM and count and
4312 // use that to load the count value.
4313 Addr
= Builder
.CreatePointerBitCastOrAddrSpaceCast(
4314 ArrayLV
.getAddress(), Int8PtrTy
, Int8Ty
);
4316 llvm::Type
*CountTy
= ConvertType(CountFD
->getType());
4317 llvm::Value
*Res
= Builder
.CreateInBoundsGEP(
4318 Int8Ty
, Addr
.emitRawPointer(*this),
4319 Builder
.getInt32(OffsetDiff
.getQuantity()), ".counted_by.gep");
4320 Res
= Builder
.CreateAlignedLoad(CountTy
, Res
, getIntAlign(),
4321 ".counted_by.load");
4323 // Now emit the bounds checking.
4324 EmitBoundsCheckImpl(E
, Res
, Idx
, E
->getIdx()->getType(),
4325 Array
->getType(), Accessed
);
4331 // Propagate the alignment from the array itself to the result.
4332 QualType arrayType
= Array
->getType();
4333 Addr
= emitArraySubscriptGEP(
4334 *this, ArrayLV
.getAddress(), {CGM
.getSize(CharUnits::Zero()), Idx
},
4335 E
->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices
,
4336 E
->getExprLoc(), &arrayType
, E
->getBase());
4337 EltBaseInfo
= ArrayLV
.getBaseInfo();
4338 EltTBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, E
->getType());
4340 // The base must be a pointer; emit it with an estimate of its alignment.
4341 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
4342 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
4343 QualType ptrType
= E
->getBase()->getType();
4344 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, E
->getType(),
4345 !getLangOpts().isSignedOverflowDefined(),
4346 SignedIndices
, E
->getExprLoc(), &ptrType
,
4350 LValue LV
= MakeAddrLValue(Addr
, E
->getType(), EltBaseInfo
, EltTBAAInfo
);
4352 if (getLangOpts().ObjC
&&
4353 getLangOpts().getGC() != LangOptions::NonGC
) {
4354 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
4355 setObjCGCLValueClass(getContext(), E
, LV
);
4360 LValue
CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
) {
4362 !E
->isIncomplete() &&
4363 "incomplete matrix subscript expressions should be rejected during Sema");
4364 LValue Base
= EmitLValue(E
->getBase());
4365 llvm::Value
*RowIdx
= EmitScalarExpr(E
->getRowIdx());
4366 llvm::Value
*ColIdx
= EmitScalarExpr(E
->getColumnIdx());
4367 llvm::Value
*NumRows
= Builder
.getIntN(
4368 RowIdx
->getType()->getScalarSizeInBits(),
4369 E
->getBase()->getType()->castAs
<ConstantMatrixType
>()->getNumRows());
4370 llvm::Value
*FinalIdx
=
4371 Builder
.CreateAdd(Builder
.CreateMul(ColIdx
, NumRows
), RowIdx
);
4372 return LValue::MakeMatrixElt(
4373 MaybeConvertMatrixAddress(Base
.getAddress(), *this), FinalIdx
,
4374 E
->getBase()->getType(), Base
.getBaseInfo(), TBAAAccessInfo());
4377 static Address
emitOMPArraySectionBase(CodeGenFunction
&CGF
, const Expr
*Base
,
4378 LValueBaseInfo
&BaseInfo
,
4379 TBAAAccessInfo
&TBAAInfo
,
4380 QualType BaseTy
, QualType ElTy
,
4381 bool IsLowerBound
) {
4383 if (auto *ASE
= dyn_cast
<ArraySectionExpr
>(Base
->IgnoreParenImpCasts())) {
4384 BaseLVal
= CGF
.EmitArraySectionExpr(ASE
, IsLowerBound
);
4385 if (BaseTy
->isArrayType()) {
4386 Address Addr
= BaseLVal
.getAddress();
4387 BaseInfo
= BaseLVal
.getBaseInfo();
4389 // If the array type was an incomplete type, we need to make sure
4390 // the decay ends up being the right type.
4391 llvm::Type
*NewTy
= CGF
.ConvertType(BaseTy
);
4392 Addr
= Addr
.withElementType(NewTy
);
4394 // Note that VLA pointers are always decayed, so we don't need to do
4396 if (!BaseTy
->isVariableArrayType()) {
4397 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
4398 "Expected pointer to array");
4399 Addr
= CGF
.Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
4402 return Addr
.withElementType(CGF
.ConvertTypeForMem(ElTy
));
4404 LValueBaseInfo TypeBaseInfo
;
4405 TBAAAccessInfo TypeTBAAInfo
;
4407 CGF
.CGM
.getNaturalTypeAlignment(ElTy
, &TypeBaseInfo
, &TypeTBAAInfo
);
4408 BaseInfo
.mergeForCast(TypeBaseInfo
);
4409 TBAAInfo
= CGF
.CGM
.mergeTBAAInfoForCast(TBAAInfo
, TypeTBAAInfo
);
4410 return Address(CGF
.Builder
.CreateLoad(BaseLVal
.getAddress()),
4411 CGF
.ConvertTypeForMem(ElTy
), Align
);
4413 return CGF
.EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
4416 LValue
CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr
*E
,
4417 bool IsLowerBound
) {
4419 assert(!E
->isOpenACCArraySection() &&
4420 "OpenACC Array section codegen not implemented");
4422 QualType BaseTy
= ArraySectionExpr::getBaseOriginalType(E
->getBase());
4423 QualType ResultExprTy
;
4424 if (auto *AT
= getContext().getAsArrayType(BaseTy
))
4425 ResultExprTy
= AT
->getElementType();
4427 ResultExprTy
= BaseTy
->getPointeeType();
4428 llvm::Value
*Idx
= nullptr;
4429 if (IsLowerBound
|| E
->getColonLocFirst().isInvalid()) {
4430 // Requesting lower bound or upper bound, but without provided length and
4431 // without ':' symbol for the default length -> length = 1.
4432 // Idx = LowerBound ?: 0;
4433 if (auto *LowerBound
= E
->getLowerBound()) {
4434 Idx
= Builder
.CreateIntCast(
4435 EmitScalarExpr(LowerBound
), IntPtrTy
,
4436 LowerBound
->getType()->hasSignedIntegerRepresentation());
4438 Idx
= llvm::ConstantInt::getNullValue(IntPtrTy
);
4440 // Try to emit length or lower bound as constant. If this is possible, 1
4441 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4442 // IR (LB + Len) - 1.
4443 auto &C
= CGM
.getContext();
4444 auto *Length
= E
->getLength();
4445 llvm::APSInt ConstLength
;
4447 // Idx = LowerBound + Length - 1;
4448 if (std::optional
<llvm::APSInt
> CL
= Length
->getIntegerConstantExpr(C
)) {
4449 ConstLength
= CL
->zextOrTrunc(PointerWidthInBits
);
4452 auto *LowerBound
= E
->getLowerBound();
4453 llvm::APSInt
ConstLowerBound(PointerWidthInBits
, /*isUnsigned=*/false);
4455 if (std::optional
<llvm::APSInt
> LB
=
4456 LowerBound
->getIntegerConstantExpr(C
)) {
4457 ConstLowerBound
= LB
->zextOrTrunc(PointerWidthInBits
);
4458 LowerBound
= nullptr;
4463 else if (!LowerBound
)
4466 if (Length
|| LowerBound
) {
4467 auto *LowerBoundVal
=
4469 ? Builder
.CreateIntCast(
4470 EmitScalarExpr(LowerBound
), IntPtrTy
,
4471 LowerBound
->getType()->hasSignedIntegerRepresentation())
4472 : llvm::ConstantInt::get(IntPtrTy
, ConstLowerBound
);
4475 ? Builder
.CreateIntCast(
4476 EmitScalarExpr(Length
), IntPtrTy
,
4477 Length
->getType()->hasSignedIntegerRepresentation())
4478 : llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4479 Idx
= Builder
.CreateAdd(LowerBoundVal
, LengthVal
, "lb_add_len",
4481 !getLangOpts().isSignedOverflowDefined());
4482 if (Length
&& LowerBound
) {
4483 Idx
= Builder
.CreateSub(
4484 Idx
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "idx_sub_1",
4485 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4488 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
+ ConstLowerBound
);
4490 // Idx = ArraySize - 1;
4491 QualType ArrayTy
= BaseTy
->isPointerType()
4492 ? E
->getBase()->IgnoreParenImpCasts()->getType()
4494 if (auto *VAT
= C
.getAsVariableArrayType(ArrayTy
)) {
4495 Length
= VAT
->getSizeExpr();
4496 if (std::optional
<llvm::APSInt
> L
= Length
->getIntegerConstantExpr(C
)) {
4501 auto *CAT
= C
.getAsConstantArrayType(ArrayTy
);
4502 assert(CAT
&& "unexpected type for array initializer");
4503 ConstLength
= CAT
->getSize();
4506 auto *LengthVal
= Builder
.CreateIntCast(
4507 EmitScalarExpr(Length
), IntPtrTy
,
4508 Length
->getType()->hasSignedIntegerRepresentation());
4509 Idx
= Builder
.CreateSub(
4510 LengthVal
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "len_sub_1",
4511 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4513 ConstLength
= ConstLength
.zextOrTrunc(PointerWidthInBits
);
4515 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4521 Address EltPtr
= Address::invalid();
4522 LValueBaseInfo BaseInfo
;
4523 TBAAAccessInfo TBAAInfo
;
4524 if (auto *VLA
= getContext().getAsVariableArrayType(ResultExprTy
)) {
4525 // The base must be a pointer, which is not an aggregate. Emit
4526 // it. It needs to be emitted first in case it's what captures
4529 emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
, TBAAInfo
,
4530 BaseTy
, VLA
->getElementType(), IsLowerBound
);
4531 // The element count here is the total number of non-VLA elements.
4532 llvm::Value
*NumElements
= getVLASize(VLA
).NumElts
;
4534 // Effectively, the multiply by the VLA size is part of the GEP.
4535 // GEP indexes are signed, and scaling an index isn't permitted to
4536 // signed-overflow, so we use the same semantics for our explicit
4537 // multiply. We suppress this if overflow is not undefined behavior.
4538 if (getLangOpts().isSignedOverflowDefined())
4539 Idx
= Builder
.CreateMul(Idx
, NumElements
);
4541 Idx
= Builder
.CreateNSWMul(Idx
, NumElements
);
4542 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, VLA
->getElementType(),
4543 !getLangOpts().isSignedOverflowDefined(),
4544 /*signedIndices=*/false, E
->getExprLoc());
4545 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
4546 // If this is A[i] where A is an array, the frontend will have decayed the
4547 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4548 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4549 // "gep x, i" here. Emit one "gep A, 0, i".
4550 assert(Array
->getType()->isArrayType() &&
4551 "Array to pointer decay must have array source type!");
4553 // For simple multidimensional array indexing, set the 'accessed' flag for
4554 // better bounds-checking of the base expression.
4555 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
4556 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
4558 ArrayLV
= EmitLValue(Array
);
4560 // Propagate the alignment from the array itself to the result.
4561 EltPtr
= emitArraySubscriptGEP(
4562 *this, ArrayLV
.getAddress(), {CGM
.getSize(CharUnits::Zero()), Idx
},
4563 ResultExprTy
, !getLangOpts().isSignedOverflowDefined(),
4564 /*signedIndices=*/false, E
->getExprLoc());
4565 BaseInfo
= ArrayLV
.getBaseInfo();
4566 TBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, ResultExprTy
);
4569 emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
, TBAAInfo
, BaseTy
,
4570 ResultExprTy
, IsLowerBound
);
4571 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, ResultExprTy
,
4572 !getLangOpts().isSignedOverflowDefined(),
4573 /*signedIndices=*/false, E
->getExprLoc());
4576 return MakeAddrLValue(EltPtr
, ResultExprTy
, BaseInfo
, TBAAInfo
);
4579 LValue
CodeGenFunction::
4580 EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
) {
4581 // Emit the base vector as an l-value.
4584 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4586 // If it is a pointer to a vector, emit the address and form an lvalue with
4588 LValueBaseInfo BaseInfo
;
4589 TBAAAccessInfo TBAAInfo
;
4590 Address Ptr
= EmitPointerWithAlignment(E
->getBase(), &BaseInfo
, &TBAAInfo
);
4591 const auto *PT
= E
->getBase()->getType()->castAs
<PointerType
>();
4592 Base
= MakeAddrLValue(Ptr
, PT
->getPointeeType(), BaseInfo
, TBAAInfo
);
4593 Base
.getQuals().removeObjCGCAttr();
4594 } else if (E
->getBase()->isGLValue()) {
4595 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4596 // emit the base as an lvalue.
4597 assert(E
->getBase()->getType()->isVectorType());
4598 Base
= EmitLValue(E
->getBase());
4600 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4601 assert(E
->getBase()->getType()->isVectorType() &&
4602 "Result must be a vector");
4603 llvm::Value
*Vec
= EmitScalarExpr(E
->getBase());
4605 // Store the vector to memory (because LValue wants an address).
4606 Address VecMem
= CreateMemTemp(E
->getBase()->getType());
4607 Builder
.CreateStore(Vec
, VecMem
);
4608 Base
= MakeAddrLValue(VecMem
, E
->getBase()->getType(),
4609 AlignmentSource::Decl
);
4613 E
->getType().withCVRQualifiers(Base
.getQuals().getCVRQualifiers());
4615 // Encode the element access list into a vector of unsigned indices.
4616 SmallVector
<uint32_t, 4> Indices
;
4617 E
->getEncodedElementAccess(Indices
);
4619 if (Base
.isSimple()) {
4620 llvm::Constant
*CV
=
4621 llvm::ConstantDataVector::get(getLLVMContext(), Indices
);
4622 return LValue::MakeExtVectorElt(Base
.getAddress(), CV
, type
,
4623 Base
.getBaseInfo(), TBAAAccessInfo());
4625 assert(Base
.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4627 llvm::Constant
*BaseElts
= Base
.getExtVectorElts();
4628 SmallVector
<llvm::Constant
*, 4> CElts
;
4630 for (unsigned i
= 0, e
= Indices
.size(); i
!= e
; ++i
)
4631 CElts
.push_back(BaseElts
->getAggregateElement(Indices
[i
]));
4632 llvm::Constant
*CV
= llvm::ConstantVector::get(CElts
);
4633 return LValue::MakeExtVectorElt(Base
.getExtVectorAddress(), CV
, type
,
4634 Base
.getBaseInfo(), TBAAAccessInfo());
4637 LValue
CodeGenFunction::EmitMemberExpr(const MemberExpr
*E
) {
4638 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, E
)) {
4639 EmitIgnoredExpr(E
->getBase());
4640 return EmitDeclRefLValue(DRE
);
4643 Expr
*BaseExpr
= E
->getBase();
4644 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4647 LValueBaseInfo BaseInfo
;
4648 TBAAAccessInfo TBAAInfo
;
4649 Address Addr
= EmitPointerWithAlignment(BaseExpr
, &BaseInfo
, &TBAAInfo
);
4650 QualType PtrTy
= BaseExpr
->getType()->getPointeeType();
4651 SanitizerSet SkippedChecks
;
4652 bool IsBaseCXXThis
= IsWrappedCXXThis(BaseExpr
);
4654 SkippedChecks
.set(SanitizerKind::Alignment
, true);
4655 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(BaseExpr
))
4656 SkippedChecks
.set(SanitizerKind::Null
, true);
4657 EmitTypeCheck(TCK_MemberAccess
, E
->getExprLoc(), Addr
, PtrTy
,
4658 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
4659 BaseLV
= MakeAddrLValue(Addr
, PtrTy
, BaseInfo
, TBAAInfo
);
4661 BaseLV
= EmitCheckedLValue(BaseExpr
, TCK_MemberAccess
);
4663 NamedDecl
*ND
= E
->getMemberDecl();
4664 if (auto *Field
= dyn_cast
<FieldDecl
>(ND
)) {
4665 LValue LV
= EmitLValueForField(BaseLV
, Field
);
4666 setObjCGCLValueClass(getContext(), E
, LV
);
4667 if (getLangOpts().OpenMP
) {
4668 // If the member was explicitly marked as nontemporal, mark it as
4669 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4670 // to children as nontemporal too.
4671 if ((IsWrappedCXXThis(BaseExpr
) &&
4672 CGM
.getOpenMPRuntime().isNontemporalDecl(Field
)) ||
4673 BaseLV
.isNontemporal())
4674 LV
.setNontemporal(/*Value=*/true);
4679 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
))
4680 return EmitFunctionDeclLValue(*this, E
, FD
);
4682 llvm_unreachable("Unhandled member declaration!");
4685 /// Given that we are currently emitting a lambda, emit an l-value for
4686 /// one of its members.
4688 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
,
4689 llvm::Value
*ThisValue
) {
4690 bool HasExplicitObjectParameter
= false;
4691 const auto *MD
= dyn_cast_if_present
<CXXMethodDecl
>(CurCodeDecl
);
4693 HasExplicitObjectParameter
= MD
->isExplicitObjectMemberFunction();
4694 assert(MD
->getParent()->isLambda());
4695 assert(MD
->getParent() == Field
->getParent());
4698 if (HasExplicitObjectParameter
) {
4699 const VarDecl
*D
= cast
<CXXMethodDecl
>(CurCodeDecl
)->getParamDecl(0);
4700 auto It
= LocalDeclMap
.find(D
);
4701 assert(It
!= LocalDeclMap
.end() && "explicit parameter not loaded?");
4702 Address AddrOfExplicitObject
= It
->getSecond();
4703 if (D
->getType()->isReferenceType())
4704 LambdaLV
= EmitLoadOfReferenceLValue(AddrOfExplicitObject
, D
->getType(),
4705 AlignmentSource::Decl
);
4707 LambdaLV
= MakeAddrLValue(AddrOfExplicitObject
,
4708 D
->getType().getNonReferenceType());
4710 // Make sure we have an lvalue to the lambda itself and not a derived class.
4711 auto *ThisTy
= D
->getType().getNonReferenceType()->getAsCXXRecordDecl();
4712 auto *LambdaTy
= cast
<CXXRecordDecl
>(Field
->getParent());
4713 if (ThisTy
!= LambdaTy
) {
4714 const CXXCastPath
&BasePathArray
= getContext().LambdaCastPaths
.at(MD
);
4715 Address Base
= GetAddressOfBaseClass(
4716 LambdaLV
.getAddress(), ThisTy
, BasePathArray
.begin(),
4717 BasePathArray
.end(), /*NullCheckValue=*/false, SourceLocation());
4718 LambdaLV
= MakeAddrLValue(Base
, QualType
{LambdaTy
->getTypeForDecl(), 0});
4721 QualType LambdaTagType
= getContext().getTagDeclType(Field
->getParent());
4722 LambdaLV
= MakeNaturalAlignAddrLValue(ThisValue
, LambdaTagType
);
4724 return EmitLValueForField(LambdaLV
, Field
);
4727 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
) {
4728 return EmitLValueForLambdaField(Field
, CXXABIThisValue
);
4731 /// Get the field index in the debug info. The debug info structure/union
4732 /// will ignore the unnamed bitfields.
4733 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl
*Rec
,
4734 unsigned FieldIndex
) {
4735 unsigned I
= 0, Skipped
= 0;
4737 for (auto *F
: Rec
->getDefinition()->fields()) {
4738 if (I
== FieldIndex
)
4740 if (F
->isUnnamedBitField())
4745 return FieldIndex
- Skipped
;
4748 /// Get the address of a zero-sized field within a record. The resulting
4749 /// address doesn't necessarily have the right type.
4750 static Address
emitAddrOfZeroSizeField(CodeGenFunction
&CGF
, Address Base
,
4751 const FieldDecl
*Field
) {
4752 CharUnits Offset
= CGF
.getContext().toCharUnitsFromBits(
4753 CGF
.getContext().getFieldOffset(Field
));
4754 if (Offset
.isZero())
4756 Base
= Base
.withElementType(CGF
.Int8Ty
);
4757 return CGF
.Builder
.CreateConstInBoundsByteGEP(Base
, Offset
);
4760 /// Drill down to the storage of a field without walking into
4761 /// reference types.
4763 /// The resulting address doesn't necessarily have the right type.
4764 static Address
emitAddrOfFieldStorage(CodeGenFunction
&CGF
, Address base
,
4765 const FieldDecl
*field
) {
4766 if (isEmptyFieldForLayout(CGF
.getContext(), field
))
4767 return emitAddrOfZeroSizeField(CGF
, base
, field
);
4769 const RecordDecl
*rec
= field
->getParent();
4772 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4774 return CGF
.Builder
.CreateStructGEP(base
, idx
, field
->getName());
4777 static Address
emitPreserveStructAccess(CodeGenFunction
&CGF
, LValue base
,
4778 Address addr
, const FieldDecl
*field
) {
4779 const RecordDecl
*rec
= field
->getParent();
4780 llvm::DIType
*DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(
4781 base
.getType(), rec
->getLocation());
4784 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4786 return CGF
.Builder
.CreatePreserveStructAccessIndex(
4787 addr
, idx
, CGF
.getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
);
4790 static bool hasAnyVptr(const QualType Type
, const ASTContext
&Context
) {
4791 const auto *RD
= Type
.getTypePtr()->getAsCXXRecordDecl();
4795 if (RD
->isDynamicClass())
4798 for (const auto &Base
: RD
->bases())
4799 if (hasAnyVptr(Base
.getType(), Context
))
4802 for (const FieldDecl
*Field
: RD
->fields())
4803 if (hasAnyVptr(Field
->getType(), Context
))
4809 LValue
CodeGenFunction::EmitLValueForField(LValue base
,
4810 const FieldDecl
*field
) {
4811 LValueBaseInfo BaseInfo
= base
.getBaseInfo();
4813 if (field
->isBitField()) {
4814 const CGRecordLayout
&RL
=
4815 CGM
.getTypes().getCGRecordLayout(field
->getParent());
4816 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(field
);
4817 const bool UseVolatile
= isAAPCS(CGM
.getTarget()) &&
4818 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&&
4819 Info
.VolatileStorageSize
!= 0 &&
4821 .withCVRQualifiers(base
.getVRQualifiers())
4822 .isVolatileQualified();
4823 Address Addr
= base
.getAddress();
4824 unsigned Idx
= RL
.getLLVMFieldNo(field
);
4825 const RecordDecl
*rec
= field
->getParent();
4826 if (hasBPFPreserveStaticOffset(rec
))
4827 Addr
= wrapWithBPFPreserveStaticOffset(*this, Addr
);
4829 if (!IsInPreservedAIRegion
&&
4830 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4832 // For structs, we GEP to the field that the record layout suggests.
4833 Addr
= Builder
.CreateStructGEP(Addr
, Idx
, field
->getName());
4835 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateRecordType(
4836 getContext().getRecordType(rec
), rec
->getLocation());
4837 Addr
= Builder
.CreatePreserveStructAccessIndex(
4838 Addr
, Idx
, getDebugInfoFIndex(rec
, field
->getFieldIndex()),
4843 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
4844 // Get the access type.
4845 llvm::Type
*FieldIntTy
= llvm::Type::getIntNTy(getLLVMContext(), SS
);
4846 Addr
= Addr
.withElementType(FieldIntTy
);
4848 const unsigned VolatileOffset
= Info
.VolatileStorageOffset
.getQuantity();
4850 Addr
= Builder
.CreateConstInBoundsGEP(Addr
, VolatileOffset
);
4853 QualType fieldType
=
4854 field
->getType().withCVRQualifiers(base
.getVRQualifiers());
4855 // TODO: Support TBAA for bit fields.
4856 LValueBaseInfo
FieldBaseInfo(BaseInfo
.getAlignmentSource());
4857 return LValue::MakeBitfield(Addr
, Info
, fieldType
, FieldBaseInfo
,
4861 // Fields of may-alias structures are may-alias themselves.
4862 // FIXME: this should get propagated down through anonymous structs
4864 QualType FieldType
= field
->getType();
4865 const RecordDecl
*rec
= field
->getParent();
4866 AlignmentSource BaseAlignSource
= BaseInfo
.getAlignmentSource();
4867 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource
));
4868 TBAAAccessInfo FieldTBAAInfo
;
4869 if (base
.getTBAAInfo().isMayAlias() ||
4870 rec
->hasAttr
<MayAliasAttr
>() || FieldType
->isVectorType()) {
4871 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4872 } else if (rec
->isUnion()) {
4873 // TODO: Support TBAA for unions.
4874 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4876 // If no base type been assigned for the base access, then try to generate
4877 // one for this base lvalue.
4878 FieldTBAAInfo
= base
.getTBAAInfo();
4879 if (!FieldTBAAInfo
.BaseType
) {
4880 FieldTBAAInfo
.BaseType
= CGM
.getTBAABaseTypeInfo(base
.getType());
4881 assert(!FieldTBAAInfo
.Offset
&&
4882 "Nonzero offset for an access with no base type!");
4885 // Adjust offset to be relative to the base type.
4886 const ASTRecordLayout
&Layout
=
4887 getContext().getASTRecordLayout(field
->getParent());
4888 unsigned CharWidth
= getContext().getCharWidth();
4889 if (FieldTBAAInfo
.BaseType
)
4890 FieldTBAAInfo
.Offset
+=
4891 Layout
.getFieldOffset(field
->getFieldIndex()) / CharWidth
;
4893 // Update the final access type and size.
4894 FieldTBAAInfo
.AccessType
= CGM
.getTBAATypeInfo(FieldType
);
4895 FieldTBAAInfo
.Size
=
4896 getContext().getTypeSizeInChars(FieldType
).getQuantity();
4899 Address addr
= base
.getAddress();
4900 if (hasBPFPreserveStaticOffset(rec
))
4901 addr
= wrapWithBPFPreserveStaticOffset(*this, addr
);
4902 if (auto *ClassDef
= dyn_cast
<CXXRecordDecl
>(rec
)) {
4903 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4904 ClassDef
->isDynamicClass()) {
4905 // Getting to any field of dynamic object requires stripping dynamic
4906 // information provided by invariant.group. This is because accessing
4907 // fields may leak the real address of dynamic object, which could result
4908 // in miscompilation when leaked pointer would be compared.
4910 Builder
.CreateStripInvariantGroup(addr
.emitRawPointer(*this));
4911 addr
= Address(stripped
, addr
.getElementType(), addr
.getAlignment());
4915 unsigned RecordCVR
= base
.getVRQualifiers();
4916 if (rec
->isUnion()) {
4917 // For unions, there is no pointer adjustment.
4918 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4919 hasAnyVptr(FieldType
, getContext()))
4920 // Because unions can easily skip invariant.barriers, we need to add
4921 // a barrier every time CXXRecord field with vptr is referenced.
4922 addr
= Builder
.CreateLaunderInvariantGroup(addr
);
4924 if (IsInPreservedAIRegion
||
4925 (getDebugInfo() && rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4926 // Remember the original union field index
4927 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(base
.getType(),
4928 rec
->getLocation());
4930 Address(Builder
.CreatePreserveUnionAccessIndex(
4931 addr
.emitRawPointer(*this),
4932 getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
),
4933 addr
.getElementType(), addr
.getAlignment());
4936 if (FieldType
->isReferenceType())
4937 addr
= addr
.withElementType(CGM
.getTypes().ConvertTypeForMem(FieldType
));
4939 if (!IsInPreservedAIRegion
&&
4940 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>()))
4941 // For structs, we GEP to the field that the record layout suggests.
4942 addr
= emitAddrOfFieldStorage(*this, addr
, field
);
4944 // Remember the original struct field index
4945 addr
= emitPreserveStructAccess(*this, base
, addr
, field
);
4948 // If this is a reference field, load the reference right now.
4949 if (FieldType
->isReferenceType()) {
4951 MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4952 if (RecordCVR
& Qualifiers::Volatile
)
4953 RefLVal
.getQuals().addVolatile();
4954 addr
= EmitLoadOfReference(RefLVal
, &FieldBaseInfo
, &FieldTBAAInfo
);
4956 // Qualifiers on the struct don't apply to the referencee.
4958 FieldType
= FieldType
->getPointeeType();
4961 // Make sure that the address is pointing to the right type. This is critical
4962 // for both unions and structs.
4963 addr
= addr
.withElementType(CGM
.getTypes().ConvertTypeForMem(FieldType
));
4965 if (field
->hasAttr
<AnnotateAttr
>())
4966 addr
= EmitFieldAnnotations(field
, addr
);
4968 LValue LV
= MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4969 LV
.getQuals().addCVRQualifiers(RecordCVR
);
4971 // __weak attribute on a field is ignored.
4972 if (LV
.getQuals().getObjCGCAttr() == Qualifiers::Weak
)
4973 LV
.getQuals().removeObjCGCAttr();
4979 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base
,
4980 const FieldDecl
*Field
) {
4981 QualType FieldType
= Field
->getType();
4983 if (!FieldType
->isReferenceType())
4984 return EmitLValueForField(Base
, Field
);
4986 Address V
= emitAddrOfFieldStorage(*this, Base
.getAddress(), Field
);
4988 // Make sure that the address is pointing to the right type.
4989 llvm::Type
*llvmType
= ConvertTypeForMem(FieldType
);
4990 V
= V
.withElementType(llvmType
);
4992 // TODO: Generate TBAA information that describes this access as a structure
4993 // member access and not just an access to an object of the field's type. This
4994 // should be similar to what we do in EmitLValueForField().
4995 LValueBaseInfo BaseInfo
= Base
.getBaseInfo();
4996 AlignmentSource FieldAlignSource
= BaseInfo
.getAlignmentSource();
4997 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource
));
4998 return MakeAddrLValue(V
, FieldType
, FieldBaseInfo
,
4999 CGM
.getTBAAInfoForSubobject(Base
, FieldType
));
5002 LValue
CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
){
5003 if (E
->isFileScope()) {
5004 ConstantAddress GlobalPtr
= CGM
.GetAddrOfConstantCompoundLiteral(E
);
5005 return MakeAddrLValue(GlobalPtr
, E
->getType(), AlignmentSource::Decl
);
5007 if (E
->getType()->isVariablyModifiedType())
5008 // make sure to emit the VLA size.
5009 EmitVariablyModifiedType(E
->getType());
5011 Address DeclPtr
= CreateMemTemp(E
->getType(), ".compoundliteral");
5012 const Expr
*InitExpr
= E
->getInitializer();
5013 LValue Result
= MakeAddrLValue(DeclPtr
, E
->getType(), AlignmentSource::Decl
);
5015 EmitAnyExprToMem(InitExpr
, DeclPtr
, E
->getType().getQualifiers(),
5018 // Block-scope compound literals are destroyed at the end of the enclosing
5020 if (!getLangOpts().CPlusPlus
)
5021 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
5022 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind
), DeclPtr
,
5023 E
->getType(), getDestroyer(DtorKind
),
5024 DtorKind
& EHCleanup
);
5029 LValue
CodeGenFunction::EmitInitListLValue(const InitListExpr
*E
) {
5030 if (!E
->isGLValue())
5031 // Initializing an aggregate temporary in C++11: T{...}.
5032 return EmitAggExprToLValue(E
);
5034 // An lvalue initializer list must be initializing a reference.
5035 assert(E
->isTransparent() && "non-transparent glvalue init list");
5036 return EmitLValue(E
->getInit(0));
5039 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
5040 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5041 /// LValue is returned and the current block has been terminated.
5042 static std::optional
<LValue
> EmitLValueOrThrowExpression(CodeGenFunction
&CGF
,
5043 const Expr
*Operand
) {
5044 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Operand
->IgnoreParens())) {
5045 CGF
.EmitCXXThrowExpr(ThrowExpr
, /*KeepInsertionPoint*/false);
5046 return std::nullopt
;
5049 return CGF
.EmitLValue(Operand
);
5053 // Handle the case where the condition is a constant evaluatable simple integer,
5054 // which means we don't have to separately handle the true/false blocks.
5055 std::optional
<LValue
> HandleConditionalOperatorLValueSimpleCase(
5056 CodeGenFunction
&CGF
, const AbstractConditionalOperator
*E
) {
5057 const Expr
*condExpr
= E
->getCond();
5059 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
5060 const Expr
*Live
= E
->getTrueExpr(), *Dead
= E
->getFalseExpr();
5062 std::swap(Live
, Dead
);
5064 if (!CGF
.ContainsLabel(Dead
)) {
5065 // If the true case is live, we need to track its region.
5067 CGF
.incrementProfileCounter(E
);
5068 // If a throw expression we emit it and return an undefined lvalue
5069 // because it can't be used.
5070 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Live
->IgnoreParens())) {
5071 CGF
.EmitCXXThrowExpr(ThrowExpr
);
5072 llvm::Type
*ElemTy
= CGF
.ConvertType(Dead
->getType());
5073 llvm::Type
*Ty
= CGF
.UnqualPtrTy
;
5074 return CGF
.MakeAddrLValue(
5075 Address(llvm::UndefValue::get(Ty
), ElemTy
, CharUnits::One()),
5078 return CGF
.EmitLValue(Live
);
5081 return std::nullopt
;
5083 struct ConditionalInfo
{
5084 llvm::BasicBlock
*lhsBlock
, *rhsBlock
;
5085 std::optional
<LValue
> LHS
, RHS
;
5088 // Create and generate the 3 blocks for a conditional operator.
5089 // Leaves the 'current block' in the continuation basic block.
5090 template<typename FuncTy
>
5091 ConditionalInfo
EmitConditionalBlocks(CodeGenFunction
&CGF
,
5092 const AbstractConditionalOperator
*E
,
5093 const FuncTy
&BranchGenFunc
) {
5094 ConditionalInfo Info
{CGF
.createBasicBlock("cond.true"),
5095 CGF
.createBasicBlock("cond.false"), std::nullopt
,
5097 llvm::BasicBlock
*endBlock
= CGF
.createBasicBlock("cond.end");
5099 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5100 CGF
.EmitBranchOnBoolExpr(E
->getCond(), Info
.lhsBlock
, Info
.rhsBlock
,
5101 CGF
.getProfileCount(E
));
5103 // Any temporaries created here are conditional.
5104 CGF
.EmitBlock(Info
.lhsBlock
);
5105 CGF
.incrementProfileCounter(E
);
5107 Info
.LHS
= BranchGenFunc(CGF
, E
->getTrueExpr());
5109 Info
.lhsBlock
= CGF
.Builder
.GetInsertBlock();
5112 CGF
.Builder
.CreateBr(endBlock
);
5114 // Any temporaries created here are conditional.
5115 CGF
.EmitBlock(Info
.rhsBlock
);
5117 Info
.RHS
= BranchGenFunc(CGF
, E
->getFalseExpr());
5119 Info
.rhsBlock
= CGF
.Builder
.GetInsertBlock();
5120 CGF
.EmitBlock(endBlock
);
5126 void CodeGenFunction::EmitIgnoredConditionalOperator(
5127 const AbstractConditionalOperator
*E
) {
5128 if (!E
->isGLValue()) {
5129 // ?: here should be an aggregate.
5130 assert(hasAggregateEvaluationKind(E
->getType()) &&
5131 "Unexpected conditional operator!");
5132 return (void)EmitAggExprToLValue(E
);
5135 OpaqueValueMapping
binding(*this, E
);
5136 if (HandleConditionalOperatorLValueSimpleCase(*this, E
))
5139 EmitConditionalBlocks(*this, E
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
5140 CGF
.EmitIgnoredExpr(E
);
5144 LValue
CodeGenFunction::EmitConditionalOperatorLValue(
5145 const AbstractConditionalOperator
*expr
) {
5146 if (!expr
->isGLValue()) {
5147 // ?: here should be an aggregate.
5148 assert(hasAggregateEvaluationKind(expr
->getType()) &&
5149 "Unexpected conditional operator!");
5150 return EmitAggExprToLValue(expr
);
5153 OpaqueValueMapping
binding(*this, expr
);
5154 if (std::optional
<LValue
> Res
=
5155 HandleConditionalOperatorLValueSimpleCase(*this, expr
))
5158 ConditionalInfo Info
= EmitConditionalBlocks(
5159 *this, expr
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
5160 return EmitLValueOrThrowExpression(CGF
, E
);
5163 if ((Info
.LHS
&& !Info
.LHS
->isSimple()) ||
5164 (Info
.RHS
&& !Info
.RHS
->isSimple()))
5165 return EmitUnsupportedLValue(expr
, "conditional operator");
5167 if (Info
.LHS
&& Info
.RHS
) {
5168 Address lhsAddr
= Info
.LHS
->getAddress();
5169 Address rhsAddr
= Info
.RHS
->getAddress();
5170 Address result
= mergeAddressesInConditionalExpr(
5171 lhsAddr
, rhsAddr
, Info
.lhsBlock
, Info
.rhsBlock
,
5172 Builder
.GetInsertBlock(), expr
->getType());
5173 AlignmentSource alignSource
=
5174 std::max(Info
.LHS
->getBaseInfo().getAlignmentSource(),
5175 Info
.RHS
->getBaseInfo().getAlignmentSource());
5176 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForConditionalOperator(
5177 Info
.LHS
->getTBAAInfo(), Info
.RHS
->getTBAAInfo());
5178 return MakeAddrLValue(result
, expr
->getType(), LValueBaseInfo(alignSource
),
5181 assert((Info
.LHS
|| Info
.RHS
) &&
5182 "both operands of glvalue conditional are throw-expressions?");
5183 return Info
.LHS
? *Info
.LHS
: *Info
.RHS
;
5187 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5188 /// type. If the cast is to a reference, we can have the usual lvalue result,
5189 /// otherwise if a cast is needed by the code generator in an lvalue context,
5190 /// then it must mean that we need the address of an aggregate in order to
5191 /// access one of its members. This can happen for all the reasons that casts
5192 /// are permitted with aggregate result, including noop aggregate casts, and
5193 /// cast from scalar to union.
5194 LValue
CodeGenFunction::EmitCastLValue(const CastExpr
*E
) {
5195 switch (E
->getCastKind()) {
5198 case CK_LValueToRValueBitCast
:
5199 case CK_ArrayToPointerDecay
:
5200 case CK_FunctionToPointerDecay
:
5201 case CK_NullToMemberPointer
:
5202 case CK_NullToPointer
:
5203 case CK_IntegralToPointer
:
5204 case CK_PointerToIntegral
:
5205 case CK_PointerToBoolean
:
5206 case CK_IntegralCast
:
5207 case CK_BooleanToSignedIntegral
:
5208 case CK_IntegralToBoolean
:
5209 case CK_IntegralToFloating
:
5210 case CK_FloatingToIntegral
:
5211 case CK_FloatingToBoolean
:
5212 case CK_FloatingCast
:
5213 case CK_FloatingRealToComplex
:
5214 case CK_FloatingComplexToReal
:
5215 case CK_FloatingComplexToBoolean
:
5216 case CK_FloatingComplexCast
:
5217 case CK_FloatingComplexToIntegralComplex
:
5218 case CK_IntegralRealToComplex
:
5219 case CK_IntegralComplexToReal
:
5220 case CK_IntegralComplexToBoolean
:
5221 case CK_IntegralComplexCast
:
5222 case CK_IntegralComplexToFloatingComplex
:
5223 case CK_DerivedToBaseMemberPointer
:
5224 case CK_BaseToDerivedMemberPointer
:
5225 case CK_MemberPointerToBoolean
:
5226 case CK_ReinterpretMemberPointer
:
5227 case CK_AnyPointerToBlockPointerCast
:
5228 case CK_ARCProduceObject
:
5229 case CK_ARCConsumeObject
:
5230 case CK_ARCReclaimReturnedObject
:
5231 case CK_ARCExtendBlockObject
:
5232 case CK_CopyAndAutoreleaseBlockObject
:
5233 case CK_IntToOCLSampler
:
5234 case CK_FloatingToFixedPoint
:
5235 case CK_FixedPointToFloating
:
5236 case CK_FixedPointCast
:
5237 case CK_FixedPointToBoolean
:
5238 case CK_FixedPointToIntegral
:
5239 case CK_IntegralToFixedPoint
:
5241 case CK_HLSLVectorTruncation
:
5242 case CK_HLSLArrayRValue
:
5243 return EmitUnsupportedLValue(E
, "unexpected cast lvalue");
5246 llvm_unreachable("dependent cast kind in IR gen!");
5248 case CK_BuiltinFnToFnPtr
:
5249 llvm_unreachable("builtin functions are handled elsewhere");
5251 // These are never l-values; just use the aggregate emission code.
5252 case CK_NonAtomicToAtomic
:
5253 case CK_AtomicToNonAtomic
:
5254 return EmitAggExprToLValue(E
);
5257 LValue LV
= EmitLValue(E
->getSubExpr());
5258 Address V
= LV
.getAddress();
5259 const auto *DCE
= cast
<CXXDynamicCastExpr
>(E
);
5260 return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V
, DCE
), E
->getType());
5263 case CK_ConstructorConversion
:
5264 case CK_UserDefinedConversion
:
5265 case CK_CPointerToObjCPointerCast
:
5266 case CK_BlockPointerToObjCPointerCast
:
5267 case CK_LValueToRValue
:
5268 return EmitLValue(E
->getSubExpr());
5271 // CK_NoOp can model a qualification conversion, which can remove an array
5272 // bound and change the IR type.
5273 // FIXME: Once pointee types are removed from IR, remove this.
5274 LValue LV
= EmitLValue(E
->getSubExpr());
5275 // Propagate the volatile qualifer to LValue, if exist in E.
5276 if (E
->changesVolatileQualification())
5277 LV
.getQuals() = E
->getType().getQualifiers();
5278 if (LV
.isSimple()) {
5279 Address V
= LV
.getAddress();
5281 llvm::Type
*T
= ConvertTypeForMem(E
->getType());
5282 if (V
.getElementType() != T
)
5283 LV
.setAddress(V
.withElementType(T
));
5289 case CK_UncheckedDerivedToBase
:
5290 case CK_DerivedToBase
: {
5291 const auto *DerivedClassTy
=
5292 E
->getSubExpr()->getType()->castAs
<RecordType
>();
5293 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
5295 LValue LV
= EmitLValue(E
->getSubExpr());
5296 Address This
= LV
.getAddress();
5298 // Perform the derived-to-base conversion
5299 Address Base
= GetAddressOfBaseClass(
5300 This
, DerivedClassDecl
, E
->path_begin(), E
->path_end(),
5301 /*NullCheckValue=*/false, E
->getExprLoc());
5303 // TODO: Support accesses to members of base classes in TBAA. For now, we
5304 // conservatively pretend that the complete object is of the base class
5306 return MakeAddrLValue(Base
, E
->getType(), LV
.getBaseInfo(),
5307 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
5310 return EmitAggExprToLValue(E
);
5311 case CK_BaseToDerived
: {
5312 const auto *DerivedClassTy
= E
->getType()->castAs
<RecordType
>();
5313 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
5315 LValue LV
= EmitLValue(E
->getSubExpr());
5317 // Perform the base-to-derived conversion
5318 Address Derived
= GetAddressOfDerivedClass(
5319 LV
.getAddress(), DerivedClassDecl
, E
->path_begin(), E
->path_end(),
5320 /*NullCheckValue=*/false);
5322 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5323 // performed and the object is not of the derived type.
5324 if (sanitizePerformTypeCheck())
5325 EmitTypeCheck(TCK_DowncastReference
, E
->getExprLoc(), Derived
,
5328 if (SanOpts
.has(SanitizerKind::CFIDerivedCast
))
5329 EmitVTablePtrCheckForCast(E
->getType(), Derived
,
5330 /*MayBeNull=*/false, CFITCK_DerivedCast
,
5333 return MakeAddrLValue(Derived
, E
->getType(), LV
.getBaseInfo(),
5334 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
5336 case CK_LValueBitCast
: {
5337 // This must be a reinterpret_cast (or c-style equivalent).
5338 const auto *CE
= cast
<ExplicitCastExpr
>(E
);
5340 CGM
.EmitExplicitCastExprType(CE
, this);
5341 LValue LV
= EmitLValue(E
->getSubExpr());
5342 Address V
= LV
.getAddress().withElementType(
5343 ConvertTypeForMem(CE
->getTypeAsWritten()->getPointeeType()));
5345 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
))
5346 EmitVTablePtrCheckForCast(E
->getType(), V
,
5347 /*MayBeNull=*/false, CFITCK_UnrelatedCast
,
5350 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
5351 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
5353 case CK_AddressSpaceConversion
: {
5354 LValue LV
= EmitLValue(E
->getSubExpr());
5355 QualType DestTy
= getContext().getPointerType(E
->getType());
5356 llvm::Value
*V
= getTargetHooks().performAddrSpaceCast(
5357 *this, LV
.getPointer(*this),
5358 E
->getSubExpr()->getType().getAddressSpace(),
5359 E
->getType().getAddressSpace(), ConvertType(DestTy
));
5360 return MakeAddrLValue(Address(V
, ConvertTypeForMem(E
->getType()),
5361 LV
.getAddress().getAlignment()),
5362 E
->getType(), LV
.getBaseInfo(), LV
.getTBAAInfo());
5364 case CK_ObjCObjectLValueCast
: {
5365 LValue LV
= EmitLValue(E
->getSubExpr());
5366 Address V
= LV
.getAddress().withElementType(ConvertType(E
->getType()));
5367 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
5368 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
5370 case CK_ZeroToOCLOpaqueType
:
5371 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5373 case CK_VectorSplat
: {
5374 // LValue results of vector splats are only supported in HLSL.
5375 if (!getLangOpts().HLSL
)
5376 return EmitUnsupportedLValue(E
, "unexpected cast lvalue");
5377 return EmitLValue(E
->getSubExpr());
5381 llvm_unreachable("Unhandled lvalue cast kind?");
5384 LValue
CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr
*e
) {
5385 assert(OpaqueValueMappingData::shouldBindAsLValue(e
));
5386 return getOrCreateOpaqueLValueMapping(e
);
5390 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
) {
5391 assert(OpaqueValueMapping::shouldBindAsLValue(e
));
5393 llvm::DenseMap
<const OpaqueValueExpr
*,LValue
>::iterator
5394 it
= OpaqueLValues
.find(e
);
5396 if (it
!= OpaqueLValues
.end())
5399 assert(e
->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5400 return EmitLValue(e
->getSourceExpr());
5404 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
) {
5405 assert(!OpaqueValueMapping::shouldBindAsLValue(e
));
5407 llvm::DenseMap
<const OpaqueValueExpr
*,RValue
>::iterator
5408 it
= OpaqueRValues
.find(e
);
5410 if (it
!= OpaqueRValues
.end())
5413 assert(e
->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5414 return EmitAnyExpr(e
->getSourceExpr());
5417 RValue
CodeGenFunction::EmitRValueForField(LValue LV
,
5418 const FieldDecl
*FD
,
5419 SourceLocation Loc
) {
5420 QualType FT
= FD
->getType();
5421 LValue FieldLV
= EmitLValueForField(LV
, FD
);
5422 switch (getEvaluationKind(FT
)) {
5424 return RValue::getComplex(EmitLoadOfComplex(FieldLV
, Loc
));
5426 return FieldLV
.asAggregateRValue();
5428 // This routine is used to load fields one-by-one to perform a copy, so
5429 // don't load reference fields.
5430 if (FD
->getType()->isReferenceType())
5431 return RValue::get(FieldLV
.getPointer(*this));
5432 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5434 if (FieldLV
.isBitField())
5435 return EmitLoadOfLValue(FieldLV
, Loc
);
5436 return RValue::get(EmitLoadOfScalar(FieldLV
, Loc
));
5438 llvm_unreachable("bad evaluation kind");
5441 //===--------------------------------------------------------------------===//
5442 // Expression Emission
5443 //===--------------------------------------------------------------------===//
5445 RValue
CodeGenFunction::EmitCallExpr(const CallExpr
*E
,
5446 ReturnValueSlot ReturnValue
) {
5447 // Builtins never have block type.
5448 if (E
->getCallee()->getType()->isBlockPointerType())
5449 return EmitBlockCallExpr(E
, ReturnValue
);
5451 if (const auto *CE
= dyn_cast
<CXXMemberCallExpr
>(E
))
5452 return EmitCXXMemberCallExpr(CE
, ReturnValue
);
5454 if (const auto *CE
= dyn_cast
<CUDAKernelCallExpr
>(E
))
5455 return EmitCUDAKernelCallExpr(CE
, ReturnValue
);
5457 // A CXXOperatorCallExpr is created even for explicit object methods, but
5458 // these should be treated like static function call.
5459 if (const auto *CE
= dyn_cast
<CXXOperatorCallExpr
>(E
))
5460 if (const auto *MD
=
5461 dyn_cast_if_present
<CXXMethodDecl
>(CE
->getCalleeDecl());
5462 MD
&& MD
->isImplicitObjectMemberFunction())
5463 return EmitCXXOperatorMemberCallExpr(CE
, MD
, ReturnValue
);
5465 CGCallee callee
= EmitCallee(E
->getCallee());
5467 if (callee
.isBuiltin()) {
5468 return EmitBuiltinExpr(callee
.getBuiltinDecl(), callee
.getBuiltinID(),
5472 if (callee
.isPseudoDestructor()) {
5473 return EmitCXXPseudoDestructorExpr(callee
.getPseudoDestructorExpr());
5476 return EmitCall(E
->getCallee()->getType(), callee
, E
, ReturnValue
);
5479 /// Emit a CallExpr without considering whether it might be a subclass.
5480 RValue
CodeGenFunction::EmitSimpleCallExpr(const CallExpr
*E
,
5481 ReturnValueSlot ReturnValue
) {
5482 CGCallee Callee
= EmitCallee(E
->getCallee());
5483 return EmitCall(E
->getCallee()->getType(), Callee
, E
, ReturnValue
);
5486 // Detect the unusual situation where an inline version is shadowed by a
5487 // non-inline version. In that case we should pick the external one
5488 // everywhere. That's GCC behavior too.
5489 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl
*FD
) {
5490 for (const FunctionDecl
*PD
= FD
; PD
; PD
= PD
->getPreviousDecl())
5491 if (!PD
->isInlineBuiltinDeclaration())
5496 static CGCallee
EmitDirectCallee(CodeGenFunction
&CGF
, GlobalDecl GD
) {
5497 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
5499 if (auto builtinID
= FD
->getBuiltinID()) {
5500 std::string NoBuiltinFD
= ("no-builtin-" + FD
->getName()).str();
5501 std::string NoBuiltins
= "no-builtins";
5503 StringRef Ident
= CGF
.CGM
.getMangledName(GD
);
5504 std::string FDInlineName
= (Ident
+ ".inline").str();
5506 bool IsPredefinedLibFunction
=
5507 CGF
.getContext().BuiltinInfo
.isPredefinedLibFunction(builtinID
);
5508 bool HasAttributeNoBuiltin
=
5509 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltinFD
) ||
5510 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltins
);
5512 // When directing calling an inline builtin, call it through it's mangled
5513 // name to make it clear it's not the actual builtin.
5514 if (CGF
.CurFn
->getName() != FDInlineName
&&
5515 OnlyHasInlineBuiltinDeclaration(FD
)) {
5516 llvm::Constant
*CalleePtr
= CGF
.CGM
.getRawFunctionPointer(GD
);
5517 llvm::Function
*Fn
= llvm::cast
<llvm::Function
>(CalleePtr
);
5518 llvm::Module
*M
= Fn
->getParent();
5519 llvm::Function
*Clone
= M
->getFunction(FDInlineName
);
5521 Clone
= llvm::Function::Create(Fn
->getFunctionType(),
5522 llvm::GlobalValue::InternalLinkage
,
5523 Fn
->getAddressSpace(), FDInlineName
, M
);
5524 Clone
->addFnAttr(llvm::Attribute::AlwaysInline
);
5526 return CGCallee::forDirect(Clone
, GD
);
5529 // Replaceable builtins provide their own implementation of a builtin. If we
5530 // are in an inline builtin implementation, avoid trivial infinite
5531 // recursion. Honor __attribute__((no_builtin("foo"))) or
5532 // __attribute__((no_builtin)) on the current function unless foo is
5533 // not a predefined library function which means we must generate the
5534 // builtin no matter what.
5535 else if (!IsPredefinedLibFunction
|| !HasAttributeNoBuiltin
)
5536 return CGCallee::forBuiltin(builtinID
, FD
);
5539 llvm::Constant
*CalleePtr
= CGF
.CGM
.getRawFunctionPointer(GD
);
5540 if (CGF
.CGM
.getLangOpts().CUDA
&& !CGF
.CGM
.getLangOpts().CUDAIsDevice
&&
5541 FD
->hasAttr
<CUDAGlobalAttr
>())
5542 CalleePtr
= CGF
.CGM
.getCUDARuntime().getKernelStub(
5543 cast
<llvm::GlobalValue
>(CalleePtr
->stripPointerCasts()));
5545 return CGCallee::forDirect(CalleePtr
, GD
);
5548 CGCallee
CodeGenFunction::EmitCallee(const Expr
*E
) {
5549 E
= E
->IgnoreParens();
5551 // Look through function-to-pointer decay.
5552 if (auto ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
5553 if (ICE
->getCastKind() == CK_FunctionToPointerDecay
||
5554 ICE
->getCastKind() == CK_BuiltinFnToFnPtr
) {
5555 return EmitCallee(ICE
->getSubExpr());
5558 // Resolve direct calls.
5559 } else if (auto DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
5560 if (auto FD
= dyn_cast
<FunctionDecl
>(DRE
->getDecl())) {
5561 return EmitDirectCallee(*this, FD
);
5563 } else if (auto ME
= dyn_cast
<MemberExpr
>(E
)) {
5564 if (auto FD
= dyn_cast
<FunctionDecl
>(ME
->getMemberDecl())) {
5565 EmitIgnoredExpr(ME
->getBase());
5566 return EmitDirectCallee(*this, FD
);
5569 // Look through template substitutions.
5570 } else if (auto NTTP
= dyn_cast
<SubstNonTypeTemplateParmExpr
>(E
)) {
5571 return EmitCallee(NTTP
->getReplacement());
5573 // Treat pseudo-destructor calls differently.
5574 } else if (auto PDE
= dyn_cast
<CXXPseudoDestructorExpr
>(E
)) {
5575 return CGCallee::forPseudoDestructor(PDE
);
5578 // Otherwise, we have an indirect reference.
5579 llvm::Value
*calleePtr
;
5580 QualType functionType
;
5581 if (auto ptrType
= E
->getType()->getAs
<PointerType
>()) {
5582 calleePtr
= EmitScalarExpr(E
);
5583 functionType
= ptrType
->getPointeeType();
5585 functionType
= E
->getType();
5586 calleePtr
= EmitLValue(E
, KnownNonNull
).getPointer(*this);
5588 assert(functionType
->isFunctionType());
5591 if (const auto *VD
=
5592 dyn_cast_or_null
<VarDecl
>(E
->getReferencedDeclOfCallee()))
5593 GD
= GlobalDecl(VD
);
5595 CGCalleeInfo
calleeInfo(functionType
->getAs
<FunctionProtoType
>(), GD
);
5596 CGPointerAuthInfo pointerAuth
= CGM
.getFunctionPointerAuthInfo(functionType
);
5597 CGCallee
callee(calleeInfo
, calleePtr
, pointerAuth
);
5601 LValue
CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator
*E
) {
5602 // Comma expressions just emit their LHS then their RHS as an l-value.
5603 if (E
->getOpcode() == BO_Comma
) {
5604 EmitIgnoredExpr(E
->getLHS());
5605 EnsureInsertPoint();
5606 return EmitLValue(E
->getRHS());
5609 if (E
->getOpcode() == BO_PtrMemD
||
5610 E
->getOpcode() == BO_PtrMemI
)
5611 return EmitPointerToDataMemberBinaryExpr(E
);
5613 assert(E
->getOpcode() == BO_Assign
&& "unexpected binary l-value");
5615 // Note that in all of these cases, __block variables need the RHS
5616 // evaluated first just in case the variable gets moved by the RHS.
5618 switch (getEvaluationKind(E
->getType())) {
5620 switch (E
->getLHS()->getType().getObjCLifetime()) {
5621 case Qualifiers::OCL_Strong
:
5622 return EmitARCStoreStrong(E
, /*ignored*/ false).first
;
5624 case Qualifiers::OCL_Autoreleasing
:
5625 return EmitARCStoreAutoreleasing(E
).first
;
5627 // No reason to do any of these differently.
5628 case Qualifiers::OCL_None
:
5629 case Qualifiers::OCL_ExplicitNone
:
5630 case Qualifiers::OCL_Weak
:
5634 // TODO: Can we de-duplicate this code with the corresponding code in
5635 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5637 llvm::Value
*Previous
= nullptr;
5638 QualType SrcType
= E
->getRHS()->getType();
5639 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5640 // we want to extract that value and potentially (if the bitfield sanitizer
5641 // is enabled) use it to check for an implicit conversion.
5642 if (E
->getLHS()->refersToBitField()) {
5644 EmitWithOriginalRHSBitfieldAssignment(E
, &Previous
, &SrcType
);
5645 RV
= RValue::get(RHS
);
5647 RV
= EmitAnyExpr(E
->getRHS());
5649 LValue LV
= EmitCheckedLValue(E
->getLHS(), TCK_Store
);
5652 EmitNullabilityCheck(LV
, RV
.getScalarVal(), E
->getExprLoc());
5654 if (LV
.isBitField()) {
5655 llvm::Value
*Result
= nullptr;
5656 // If bitfield sanitizers are enabled we want to use the result
5657 // to check whether a truncation or sign change has occurred.
5658 if (SanOpts
.has(SanitizerKind::ImplicitBitfieldConversion
))
5659 EmitStoreThroughBitfieldLValue(RV
, LV
, &Result
);
5661 EmitStoreThroughBitfieldLValue(RV
, LV
);
5663 // If the expression contained an implicit conversion, make sure
5664 // to use the value before the scalar conversion.
5665 llvm::Value
*Src
= Previous
? Previous
: RV
.getScalarVal();
5666 QualType DstType
= E
->getLHS()->getType();
5667 EmitBitfieldConversionCheck(Src
, SrcType
, Result
, DstType
,
5668 LV
.getBitFieldInfo(), E
->getExprLoc());
5670 EmitStoreThroughLValue(RV
, LV
);
5672 if (getLangOpts().OpenMP
)
5673 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5679 return EmitComplexAssignmentLValue(E
);
5682 return EmitAggExprToLValue(E
);
5684 llvm_unreachable("bad evaluation kind");
5687 LValue
CodeGenFunction::EmitCallExprLValue(const CallExpr
*E
) {
5688 RValue RV
= EmitCallExpr(E
);
5691 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5692 AlignmentSource::Decl
);
5694 assert(E
->getCallReturnType(getContext())->isReferenceType() &&
5695 "Can't have a scalar return unless the return type is a "
5698 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5701 LValue
CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr
*E
) {
5702 // FIXME: This shouldn't require another copy.
5703 return EmitAggExprToLValue(E
);
5706 LValue
CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr
*E
) {
5707 assert(E
->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5708 && "binding l-value to type which needs a temporary");
5709 AggValueSlot Slot
= CreateAggTemp(E
->getType());
5710 EmitCXXConstructExpr(E
, Slot
);
5711 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5715 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr
*E
) {
5716 return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E
), E
->getType());
5719 Address
CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr
*E
) {
5720 return CGM
.GetAddrOfMSGuidDecl(E
->getGuidDecl())
5721 .withElementType(ConvertType(E
->getType()));
5724 LValue
CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr
*E
) {
5725 return MakeAddrLValue(EmitCXXUuidofExpr(E
), E
->getType(),
5726 AlignmentSource::Decl
);
5730 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
) {
5731 AggValueSlot Slot
= CreateAggTemp(E
->getType(), "temp.lvalue");
5732 Slot
.setExternallyDestructed();
5733 EmitAggExpr(E
->getSubExpr(), Slot
);
5734 EmitCXXTemporary(E
->getTemporary(), E
->getType(), Slot
.getAddress());
5735 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5738 LValue
CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
) {
5739 RValue RV
= EmitObjCMessageExpr(E
);
5742 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5743 AlignmentSource::Decl
);
5745 assert(E
->getMethodDecl()->getReturnType()->isReferenceType() &&
5746 "Can't have a scalar return unless the return type is a "
5749 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5752 LValue
CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
) {
5754 CGM
.getObjCRuntime().GetAddrOfSelector(*this, E
->getSelector());
5755 return MakeAddrLValue(V
, E
->getType(), AlignmentSource::Decl
);
5758 llvm::Value
*CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
5759 const ObjCIvarDecl
*Ivar
) {
5760 return CGM
.getObjCRuntime().EmitIvarOffset(*this, Interface
, Ivar
);
5764 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl
*Interface
,
5765 const ObjCIvarDecl
*Ivar
) {
5766 llvm::Value
*OffsetValue
= EmitIvarOffset(Interface
, Ivar
);
5767 QualType PointerDiffType
= getContext().getPointerDiffType();
5768 return Builder
.CreateZExtOrTrunc(OffsetValue
,
5769 getTypes().ConvertType(PointerDiffType
));
5772 LValue
CodeGenFunction::EmitLValueForIvar(QualType ObjectTy
,
5773 llvm::Value
*BaseValue
,
5774 const ObjCIvarDecl
*Ivar
,
5775 unsigned CVRQualifiers
) {
5776 return CGM
.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy
, BaseValue
,
5777 Ivar
, CVRQualifiers
);
5780 LValue
CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
) {
5781 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5782 llvm::Value
*BaseValue
= nullptr;
5783 const Expr
*BaseExpr
= E
->getBase();
5784 Qualifiers BaseQuals
;
5787 BaseValue
= EmitScalarExpr(BaseExpr
);
5788 ObjectTy
= BaseExpr
->getType()->getPointeeType();
5789 BaseQuals
= ObjectTy
.getQualifiers();
5791 LValue BaseLV
= EmitLValue(BaseExpr
);
5792 BaseValue
= BaseLV
.getPointer(*this);
5793 ObjectTy
= BaseExpr
->getType();
5794 BaseQuals
= ObjectTy
.getQualifiers();
5798 EmitLValueForIvar(ObjectTy
, BaseValue
, E
->getDecl(),
5799 BaseQuals
.getCVRQualifiers());
5800 setObjCGCLValueClass(getContext(), E
, LV
);
5804 LValue
CodeGenFunction::EmitStmtExprLValue(const StmtExpr
*E
) {
5805 // Can only get l-value for message expression returning aggregate type
5806 RValue RV
= EmitAnyExprToTemp(E
);
5807 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5808 AlignmentSource::Decl
);
5811 RValue
CodeGenFunction::EmitCall(QualType CalleeType
, const CGCallee
&OrigCallee
,
5812 const CallExpr
*E
, ReturnValueSlot ReturnValue
,
5813 llvm::Value
*Chain
) {
5814 // Get the actual function type. The callee type will always be a pointer to
5815 // function type or a block pointer type.
5816 assert(CalleeType
->isFunctionPointerType() &&
5817 "Call must have function pointer type!");
5819 const Decl
*TargetDecl
=
5820 OrigCallee
.getAbstractInfo().getCalleeDecl().getDecl();
5822 assert((!isa_and_present
<FunctionDecl
>(TargetDecl
) ||
5823 !cast
<FunctionDecl
>(TargetDecl
)->isImmediateFunction()) &&
5824 "trying to emit a call to an immediate function");
5826 CalleeType
= getContext().getCanonicalType(CalleeType
);
5828 auto PointeeType
= cast
<PointerType
>(CalleeType
)->getPointeeType();
5830 CGCallee Callee
= OrigCallee
;
5832 if (SanOpts
.has(SanitizerKind::Function
) &&
5833 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
)) &&
5834 !isa
<FunctionNoProtoType
>(PointeeType
)) {
5835 if (llvm::Constant
*PrefixSig
=
5836 CGM
.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM
)) {
5837 SanitizerScope
SanScope(this);
5838 auto *TypeHash
= getUBSanFunctionTypeHash(PointeeType
);
5840 llvm::Type
*PrefixSigType
= PrefixSig
->getType();
5841 llvm::StructType
*PrefixStructTy
= llvm::StructType::get(
5842 CGM
.getLLVMContext(), {PrefixSigType
, Int32Ty
}, /*isPacked=*/true);
5844 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5845 if (CGM
.getCodeGenOpts().PointerAuth
.FunctionPointers
) {
5846 // Use raw pointer since we are using the callee pointer as data here.
5848 Address(CalleePtr
, CalleePtr
->getType(),
5849 CharUnits::fromQuantity(
5850 CalleePtr
->getPointerAlignment(CGM
.getDataLayout())),
5851 Callee
.getPointerAuthInfo(), nullptr);
5852 CalleePtr
= Addr
.emitRawPointer(*this);
5855 // On 32-bit Arm, the low bit of a function pointer indicates whether
5856 // it's using the Arm or Thumb instruction set. The actual first
5857 // instruction lives at the same address either way, so we must clear
5858 // that low bit before using the function address to find the prefix
5861 // This applies to both Arm and Thumb target triples, because
5862 // either one could be used in an interworking context where it
5863 // might be passed function pointers of both types.
5864 llvm::Value
*AlignedCalleePtr
;
5865 if (CGM
.getTriple().isARM() || CGM
.getTriple().isThumb()) {
5866 llvm::Value
*CalleeAddress
=
5867 Builder
.CreatePtrToInt(CalleePtr
, IntPtrTy
);
5868 llvm::Value
*Mask
= llvm::ConstantInt::get(IntPtrTy
, ~1);
5869 llvm::Value
*AlignedCalleeAddress
=
5870 Builder
.CreateAnd(CalleeAddress
, Mask
);
5872 Builder
.CreateIntToPtr(AlignedCalleeAddress
, CalleePtr
->getType());
5874 AlignedCalleePtr
= CalleePtr
;
5877 llvm::Value
*CalleePrefixStruct
= AlignedCalleePtr
;
5878 llvm::Value
*CalleeSigPtr
=
5879 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, -1, 0);
5880 llvm::Value
*CalleeSig
=
5881 Builder
.CreateAlignedLoad(PrefixSigType
, CalleeSigPtr
, getIntAlign());
5882 llvm::Value
*CalleeSigMatch
= Builder
.CreateICmpEQ(CalleeSig
, PrefixSig
);
5884 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
5885 llvm::BasicBlock
*TypeCheck
= createBasicBlock("typecheck");
5886 Builder
.CreateCondBr(CalleeSigMatch
, TypeCheck
, Cont
);
5888 EmitBlock(TypeCheck
);
5889 llvm::Value
*CalleeTypeHash
= Builder
.CreateAlignedLoad(
5891 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, -1, 1),
5893 llvm::Value
*CalleeTypeHashMatch
=
5894 Builder
.CreateICmpEQ(CalleeTypeHash
, TypeHash
);
5895 llvm::Constant
*StaticData
[] = {EmitCheckSourceLocation(E
->getBeginLoc()),
5896 EmitCheckTypeDescriptor(CalleeType
)};
5897 EmitCheck(std::make_pair(CalleeTypeHashMatch
, SanitizerKind::Function
),
5898 SanitizerHandler::FunctionTypeMismatch
, StaticData
,
5901 Builder
.CreateBr(Cont
);
5906 const auto *FnType
= cast
<FunctionType
>(PointeeType
);
5908 // If we are checking indirect calls and this call is indirect, check that the
5909 // function pointer is a member of the bit set for the function type.
5910 if (SanOpts
.has(SanitizerKind::CFIICall
) &&
5911 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5912 SanitizerScope
SanScope(this);
5913 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall
);
5916 if (CGM
.getCodeGenOpts().SanitizeCfiICallGeneralizePointers
)
5917 MD
= CGM
.CreateMetadataIdentifierGeneralized(QualType(FnType
, 0));
5919 MD
= CGM
.CreateMetadataIdentifierForType(QualType(FnType
, 0));
5921 llvm::Value
*TypeId
= llvm::MetadataAsValue::get(getLLVMContext(), MD
);
5923 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5924 llvm::Value
*TypeTest
= Builder
.CreateCall(
5925 CGM
.getIntrinsic(llvm::Intrinsic::type_test
), {CalleePtr
, TypeId
});
5927 auto CrossDsoTypeId
= CGM
.CreateCrossDsoCfiTypeId(MD
);
5928 llvm::Constant
*StaticData
[] = {
5929 llvm::ConstantInt::get(Int8Ty
, CFITCK_ICall
),
5930 EmitCheckSourceLocation(E
->getBeginLoc()),
5931 EmitCheckTypeDescriptor(QualType(FnType
, 0)),
5933 if (CGM
.getCodeGenOpts().SanitizeCfiCrossDso
&& CrossDsoTypeId
) {
5934 EmitCfiSlowPathCheck(SanitizerKind::CFIICall
, TypeTest
, CrossDsoTypeId
,
5935 CalleePtr
, StaticData
);
5937 EmitCheck(std::make_pair(TypeTest
, SanitizerKind::CFIICall
),
5938 SanitizerHandler::CFICheckFail
, StaticData
,
5939 {CalleePtr
, llvm::UndefValue::get(IntPtrTy
)});
5945 Args
.add(RValue::get(Chain
), CGM
.getContext().VoidPtrTy
);
5947 // C++17 requires that we evaluate arguments to a call using assignment syntax
5948 // right-to-left, and that we evaluate arguments to certain other operators
5949 // left-to-right. Note that we allow this to override the order dictated by
5950 // the calling convention on the MS ABI, which means that parameter
5951 // destruction order is not necessarily reverse construction order.
5952 // FIXME: Revisit this based on C++ committee response to unimplementability.
5953 EvaluationOrder Order
= EvaluationOrder::Default
;
5954 bool StaticOperator
= false;
5955 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(E
)) {
5956 if (OCE
->isAssignmentOp())
5957 Order
= EvaluationOrder::ForceRightToLeft
;
5959 switch (OCE
->getOperator()) {
5961 case OO_GreaterGreater
:
5966 Order
= EvaluationOrder::ForceLeftToRight
;
5973 if (const auto *MD
=
5974 dyn_cast_if_present
<CXXMethodDecl
>(OCE
->getCalleeDecl());
5975 MD
&& MD
->isStatic())
5976 StaticOperator
= true;
5979 auto Arguments
= E
->arguments();
5980 if (StaticOperator
) {
5981 // If we're calling a static operator, we need to emit the object argument
5983 EmitIgnoredExpr(E
->getArg(0));
5984 Arguments
= drop_begin(Arguments
, 1);
5986 EmitCallArgs(Args
, dyn_cast
<FunctionProtoType
>(FnType
), Arguments
,
5987 E
->getDirectCallee(), /*ParamsToSkip=*/0, Order
);
5989 const CGFunctionInfo
&FnInfo
= CGM
.getTypes().arrangeFreeFunctionCall(
5990 Args
, FnType
, /*ChainCall=*/Chain
);
5993 // If the expression that denotes the called function has a type
5994 // that does not include a prototype, [the default argument
5995 // promotions are performed]. If the number of arguments does not
5996 // equal the number of parameters, the behavior is undefined. If
5997 // the function is defined with a type that includes a prototype,
5998 // and either the prototype ends with an ellipsis (, ...) or the
5999 // types of the arguments after promotion are not compatible with
6000 // the types of the parameters, the behavior is undefined. If the
6001 // function is defined with a type that does not include a
6002 // prototype, and the types of the arguments after promotion are
6003 // not compatible with those of the parameters after promotion,
6004 // the behavior is undefined [except in some trivial cases].
6005 // That is, in the general case, we should assume that a call
6006 // through an unprototyped function type works like a *non-variadic*
6007 // call. The way we make this work is to cast to the exact type
6008 // of the promoted arguments.
6010 // Chain calls use this same code path to add the invisible chain parameter
6011 // to the function type.
6012 if (isa
<FunctionNoProtoType
>(FnType
) || Chain
) {
6013 llvm::Type
*CalleeTy
= getTypes().GetFunctionType(FnInfo
);
6014 int AS
= Callee
.getFunctionPointer()->getType()->getPointerAddressSpace();
6015 CalleeTy
= CalleeTy
->getPointerTo(AS
);
6017 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
6018 CalleePtr
= Builder
.CreateBitCast(CalleePtr
, CalleeTy
, "callee.knr.cast");
6019 Callee
.setFunctionPointer(CalleePtr
);
6022 // HIP function pointer contains kernel handle when it is used in triple
6023 // chevron. The kernel stub needs to be loaded from kernel handle and used
6025 if (CGM
.getLangOpts().HIP
&& !CGM
.getLangOpts().CUDAIsDevice
&&
6026 isa
<CUDAKernelCallExpr
>(E
) &&
6027 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
6028 llvm::Value
*Handle
= Callee
.getFunctionPointer();
6029 auto *Stub
= Builder
.CreateLoad(
6030 Address(Handle
, Handle
->getType(), CGM
.getPointerAlign()));
6031 Callee
.setFunctionPointer(Stub
);
6033 llvm::CallBase
*CallOrInvoke
= nullptr;
6034 RValue Call
= EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, &CallOrInvoke
,
6035 E
== MustTailCall
, E
->getExprLoc());
6037 // Generate function declaration DISuprogram in order to be used
6038 // in debug info about call sites.
6039 if (CGDebugInfo
*DI
= getDebugInfo()) {
6040 if (auto *CalleeDecl
= dyn_cast_or_null
<FunctionDecl
>(TargetDecl
)) {
6041 FunctionArgList Args
;
6042 QualType ResTy
= BuildFunctionArgList(CalleeDecl
, Args
);
6043 DI
->EmitFuncDeclForCallSite(CallOrInvoke
,
6044 DI
->getFunctionType(CalleeDecl
, ResTy
, Args
),
6052 LValue
CodeGenFunction::
6053 EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
) {
6054 Address BaseAddr
= Address::invalid();
6055 if (E
->getOpcode() == BO_PtrMemI
) {
6056 BaseAddr
= EmitPointerWithAlignment(E
->getLHS());
6058 BaseAddr
= EmitLValue(E
->getLHS()).getAddress();
6061 llvm::Value
*OffsetV
= EmitScalarExpr(E
->getRHS());
6062 const auto *MPT
= E
->getRHS()->getType()->castAs
<MemberPointerType
>();
6064 LValueBaseInfo BaseInfo
;
6065 TBAAAccessInfo TBAAInfo
;
6066 Address MemberAddr
=
6067 EmitCXXMemberDataPointerAddress(E
, BaseAddr
, OffsetV
, MPT
, &BaseInfo
,
6070 return MakeAddrLValue(MemberAddr
, MPT
->getPointeeType(), BaseInfo
, TBAAInfo
);
6073 /// Given the address of a temporary variable, produce an r-value of
6075 RValue
CodeGenFunction::convertTempToRValue(Address addr
,
6077 SourceLocation loc
) {
6078 LValue lvalue
= MakeAddrLValue(addr
, type
, AlignmentSource::Decl
);
6079 switch (getEvaluationKind(type
)) {
6081 return RValue::getComplex(EmitLoadOfComplex(lvalue
, loc
));
6083 return lvalue
.asAggregateRValue();
6085 return RValue::get(EmitLoadOfScalar(lvalue
, loc
));
6087 llvm_unreachable("bad evaluation kind");
6090 void CodeGenFunction::SetFPAccuracy(llvm::Value
*Val
, float Accuracy
) {
6091 assert(Val
->getType()->isFPOrFPVectorTy());
6092 if (Accuracy
== 0.0 || !isa
<llvm::Instruction
>(Val
))
6095 llvm::MDBuilder
MDHelper(getLLVMContext());
6096 llvm::MDNode
*Node
= MDHelper
.createFPMath(Accuracy
);
6098 cast
<llvm::Instruction
>(Val
)->setMetadata(llvm::LLVMContext::MD_fpmath
, Node
);
6101 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value
*Val
) {
6102 llvm::Type
*EltTy
= Val
->getType()->getScalarType();
6103 if (!EltTy
->isFloatTy())
6106 if ((getLangOpts().OpenCL
&&
6107 !CGM
.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt
) ||
6108 (getLangOpts().HIP
&& getLangOpts().CUDAIsDevice
&&
6109 !CGM
.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt
)) {
6110 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6112 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6113 // build option allows an application to specify that single precision
6114 // floating-point divide (x/y and 1/x) and sqrt used in the program
6115 // source are correctly rounded.
6117 // TODO: CUDA has a prec-sqrt flag
6118 SetFPAccuracy(Val
, 3.0f
);
6122 void CodeGenFunction::SetDivFPAccuracy(llvm::Value
*Val
) {
6123 llvm::Type
*EltTy
= Val
->getType()->getScalarType();
6124 if (!EltTy
->isFloatTy())
6127 if ((getLangOpts().OpenCL
&&
6128 !CGM
.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt
) ||
6129 (getLangOpts().HIP
&& getLangOpts().CUDAIsDevice
&&
6130 !CGM
.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt
)) {
6131 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6133 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6134 // build option allows an application to specify that single precision
6135 // floating-point divide (x/y and 1/x) and sqrt used in the program
6136 // source are correctly rounded.
6138 // TODO: CUDA has a prec-div flag
6139 SetFPAccuracy(Val
, 2.5f
);
6144 struct LValueOrRValue
{
6150 static LValueOrRValue
emitPseudoObjectExpr(CodeGenFunction
&CGF
,
6151 const PseudoObjectExpr
*E
,
6153 AggValueSlot slot
) {
6154 SmallVector
<CodeGenFunction::OpaqueValueMappingData
, 4> opaques
;
6156 // Find the result expression, if any.
6157 const Expr
*resultExpr
= E
->getResultExpr();
6158 LValueOrRValue result
;
6160 for (PseudoObjectExpr::const_semantics_iterator
6161 i
= E
->semantics_begin(), e
= E
->semantics_end(); i
!= e
; ++i
) {
6162 const Expr
*semantic
= *i
;
6164 // If this semantic expression is an opaque value, bind it
6165 // to the result of its source expression.
6166 if (const auto *ov
= dyn_cast
<OpaqueValueExpr
>(semantic
)) {
6167 // Skip unique OVEs.
6168 if (ov
->isUnique()) {
6169 assert(ov
!= resultExpr
&&
6170 "A unique OVE cannot be used as the result expression");
6174 // If this is the result expression, we may need to evaluate
6175 // directly into the slot.
6176 typedef CodeGenFunction::OpaqueValueMappingData OVMA
;
6178 if (ov
== resultExpr
&& ov
->isPRValue() && !forLValue
&&
6179 CodeGenFunction::hasAggregateEvaluationKind(ov
->getType())) {
6180 CGF
.EmitAggExpr(ov
->getSourceExpr(), slot
);
6181 LValue LV
= CGF
.MakeAddrLValue(slot
.getAddress(), ov
->getType(),
6182 AlignmentSource::Decl
);
6183 opaqueData
= OVMA::bind(CGF
, ov
, LV
);
6184 result
.RV
= slot
.asRValue();
6186 // Otherwise, emit as normal.
6188 opaqueData
= OVMA::bind(CGF
, ov
, ov
->getSourceExpr());
6190 // If this is the result, also evaluate the result now.
6191 if (ov
== resultExpr
) {
6193 result
.LV
= CGF
.EmitLValue(ov
);
6195 result
.RV
= CGF
.EmitAnyExpr(ov
, slot
);
6199 opaques
.push_back(opaqueData
);
6201 // Otherwise, if the expression is the result, evaluate it
6202 // and remember the result.
6203 } else if (semantic
== resultExpr
) {
6205 result
.LV
= CGF
.EmitLValue(semantic
);
6207 result
.RV
= CGF
.EmitAnyExpr(semantic
, slot
);
6209 // Otherwise, evaluate the expression in an ignored context.
6211 CGF
.EmitIgnoredExpr(semantic
);
6215 // Unbind all the opaques now.
6216 for (unsigned i
= 0, e
= opaques
.size(); i
!= e
; ++i
)
6217 opaques
[i
].unbind(CGF
);
6222 RValue
CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr
*E
,
6223 AggValueSlot slot
) {
6224 return emitPseudoObjectExpr(*this, E
, false, slot
).RV
;
6227 LValue
CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr
*E
) {
6228 return emitPseudoObjectExpr(*this, E
, true, AggValueSlot::ignored()).LV
;