1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCUDARuntime.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/IntrinsicsWebAssembly.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/MatrixBuilder.h"
41 #include "llvm/Passes/OptimizationLevel.h"
42 #include "llvm/Support/ConvertUTF.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/Path.h"
45 #include "llvm/Support/SaveAndRestore.h"
46 #include "llvm/Support/xxhash.h"
47 #include "llvm/Transforms/Utils/SanitizerStats.h"
52 using namespace clang
;
53 using namespace CodeGen
;
55 // Experiment to make sanitizers easier to debug
56 static llvm::cl::opt
<bool> ClSanitizeDebugDeoptimization(
57 "ubsan-unique-traps", llvm::cl::Optional
,
58 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check"),
59 llvm::cl::init(false));
61 //===--------------------------------------------------------------------===//
62 // Miscellaneous Helper Methods
63 //===--------------------------------------------------------------------===//
65 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
67 Address
CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type
*Ty
,
70 llvm::Value
*ArraySize
) {
71 auto Alloca
= CreateTempAlloca(Ty
, Name
, ArraySize
);
72 Alloca
->setAlignment(Align
.getAsAlign());
73 return Address(Alloca
, Ty
, Align
, KnownNonNull
);
76 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
77 /// block. The alloca is casted to default address space if necessary.
78 Address
CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
, CharUnits Align
,
80 llvm::Value
*ArraySize
,
81 Address
*AllocaAddr
) {
82 auto Alloca
= CreateTempAllocaWithoutCast(Ty
, Align
, Name
, ArraySize
);
85 llvm::Value
*V
= Alloca
.getPointer();
86 // Alloca always returns a pointer in alloca address space, which may
87 // be different from the type defined by the language. For example,
88 // in C++ the auto variables are in the default address space. Therefore
89 // cast alloca to the default address space when necessary.
90 if (getASTAllocaAddressSpace() != LangAS::Default
) {
91 auto DestAddrSpace
= getContext().getTargetAddressSpace(LangAS::Default
);
92 llvm::IRBuilderBase::InsertPointGuard
IPG(Builder
);
93 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
94 // otherwise alloca is inserted at the current insertion point of the
97 Builder
.SetInsertPoint(getPostAllocaInsertPoint());
98 V
= getTargetHooks().performAddrSpaceCast(
99 *this, V
, getASTAllocaAddressSpace(), LangAS::Default
,
100 Ty
->getPointerTo(DestAddrSpace
), /*non-null*/ true);
103 return Address(V
, Ty
, Align
, KnownNonNull
);
106 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
107 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
108 /// insertion point of the builder.
109 llvm::AllocaInst
*CodeGenFunction::CreateTempAlloca(llvm::Type
*Ty
,
111 llvm::Value
*ArraySize
) {
113 return Builder
.CreateAlloca(Ty
, ArraySize
, Name
);
114 return new llvm::AllocaInst(Ty
, CGM
.getDataLayout().getAllocaAddrSpace(),
115 ArraySize
, Name
, AllocaInsertPt
);
118 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
119 /// default alignment of the corresponding LLVM type, which is *not*
120 /// guaranteed to be related in any way to the expected alignment of
121 /// an AST type that might have been lowered to Ty.
122 Address
CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type
*Ty
,
125 CharUnits::fromQuantity(CGM
.getDataLayout().getPrefTypeAlign(Ty
));
126 return CreateTempAlloca(Ty
, Align
, Name
);
129 Address
CodeGenFunction::CreateIRTemp(QualType Ty
, const Twine
&Name
) {
130 CharUnits Align
= getContext().getTypeAlignInChars(Ty
);
131 return CreateTempAlloca(ConvertType(Ty
), Align
, Name
);
134 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, const Twine
&Name
,
136 // FIXME: Should we prefer the preferred type alignment here?
137 return CreateMemTemp(Ty
, getContext().getTypeAlignInChars(Ty
), Name
, Alloca
);
140 Address
CodeGenFunction::CreateMemTemp(QualType Ty
, CharUnits Align
,
141 const Twine
&Name
, Address
*Alloca
) {
142 Address Result
= CreateTempAlloca(ConvertTypeForMem(Ty
), Align
, Name
,
143 /*ArraySize=*/nullptr, Alloca
);
145 if (Ty
->isConstantMatrixType()) {
146 auto *ArrayTy
= cast
<llvm::ArrayType
>(Result
.getElementType());
147 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
148 ArrayTy
->getNumElements());
150 Result
= Address(Result
.getPointer(), VectorTy
, Result
.getAlignment(),
156 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
, CharUnits Align
,
158 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty
), Align
, Name
);
161 Address
CodeGenFunction::CreateMemTempWithoutCast(QualType Ty
,
163 return CreateMemTempWithoutCast(Ty
, getContext().getTypeAlignInChars(Ty
),
167 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
168 /// expression and compare the result against zero, returning an Int1Ty value.
169 llvm::Value
*CodeGenFunction::EvaluateExprAsBool(const Expr
*E
) {
170 PGO
.setCurrentStmt(E
);
171 if (const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>()) {
172 llvm::Value
*MemPtr
= EmitScalarExpr(E
);
173 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr
, MPT
);
176 QualType BoolTy
= getContext().BoolTy
;
177 SourceLocation Loc
= E
->getExprLoc();
178 CGFPOptionsRAII
FPOptsRAII(*this, E
);
179 if (!E
->getType()->isAnyComplexType())
180 return EmitScalarConversion(EmitScalarExpr(E
), E
->getType(), BoolTy
, Loc
);
182 return EmitComplexToScalarConversion(EmitComplexExpr(E
), E
->getType(), BoolTy
,
186 /// EmitIgnoredExpr - Emit code to compute the specified expression,
187 /// ignoring the result.
188 void CodeGenFunction::EmitIgnoredExpr(const Expr
*E
) {
190 return (void)EmitAnyExpr(E
, AggValueSlot::ignored(), true);
192 // if this is a bitfield-resulting conditional operator, we can special case
193 // emit this. The normal 'EmitLValue' version of this is particularly
194 // difficult to codegen for, since creating a single "LValue" for two
195 // different sized arguments here is not particularly doable.
196 if (const auto *CondOp
= dyn_cast
<AbstractConditionalOperator
>(
197 E
->IgnoreParenNoopCasts(getContext()))) {
198 if (CondOp
->getObjectKind() == OK_BitField
)
199 return EmitIgnoredConditionalOperator(CondOp
);
202 // Just emit it as an l-value and drop the result.
206 /// EmitAnyExpr - Emit code to compute the specified expression which
207 /// can have any type. The result is returned as an RValue struct.
208 /// If this is an aggregate expression, AggSlot indicates where the
209 /// result should be returned.
210 RValue
CodeGenFunction::EmitAnyExpr(const Expr
*E
,
211 AggValueSlot aggSlot
,
213 switch (getEvaluationKind(E
->getType())) {
215 return RValue::get(EmitScalarExpr(E
, ignoreResult
));
217 return RValue::getComplex(EmitComplexExpr(E
, ignoreResult
, ignoreResult
));
219 if (!ignoreResult
&& aggSlot
.isIgnored())
220 aggSlot
= CreateAggTemp(E
->getType(), "agg-temp");
221 EmitAggExpr(E
, aggSlot
);
222 return aggSlot
.asRValue();
224 llvm_unreachable("bad evaluation kind");
227 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
228 /// always be accessible even if no aggregate location is provided.
229 RValue
CodeGenFunction::EmitAnyExprToTemp(const Expr
*E
) {
230 AggValueSlot AggSlot
= AggValueSlot::ignored();
232 if (hasAggregateEvaluationKind(E
->getType()))
233 AggSlot
= CreateAggTemp(E
->getType(), "agg.tmp");
234 return EmitAnyExpr(E
, AggSlot
);
237 /// EmitAnyExprToMem - Evaluate an expression into a given memory
239 void CodeGenFunction::EmitAnyExprToMem(const Expr
*E
,
243 // FIXME: This function should take an LValue as an argument.
244 switch (getEvaluationKind(E
->getType())) {
246 EmitComplexExprIntoLValue(E
, MakeAddrLValue(Location
, E
->getType()),
250 case TEK_Aggregate
: {
251 EmitAggExpr(E
, AggValueSlot::forAddr(Location
, Quals
,
252 AggValueSlot::IsDestructed_t(IsInit
),
253 AggValueSlot::DoesNotNeedGCBarriers
,
254 AggValueSlot::IsAliased_t(!IsInit
),
255 AggValueSlot::MayOverlap
));
260 RValue RV
= RValue::get(EmitScalarExpr(E
, /*Ignore*/ false));
261 LValue LV
= MakeAddrLValue(Location
, E
->getType());
262 EmitStoreThroughLValue(RV
, LV
);
266 llvm_unreachable("bad evaluation kind");
270 pushTemporaryCleanup(CodeGenFunction
&CGF
, const MaterializeTemporaryExpr
*M
,
271 const Expr
*E
, Address ReferenceTemporary
) {
272 // Objective-C++ ARC:
273 // If we are binding a reference to a temporary that has ownership, we
274 // need to perform retain/release operations on the temporary.
276 // FIXME: This should be looking at E, not M.
277 if (auto Lifetime
= M
->getType().getObjCLifetime()) {
279 case Qualifiers::OCL_None
:
280 case Qualifiers::OCL_ExplicitNone
:
281 // Carry on to normal cleanup handling.
284 case Qualifiers::OCL_Autoreleasing
:
285 // Nothing to do; cleaned up by an autorelease pool.
288 case Qualifiers::OCL_Strong
:
289 case Qualifiers::OCL_Weak
:
290 switch (StorageDuration Duration
= M
->getStorageDuration()) {
292 // Note: we intentionally do not register a cleanup to release
293 // the object on program termination.
297 // FIXME: We should probably register a cleanup in this case.
301 case SD_FullExpression
:
302 CodeGenFunction::Destroyer
*Destroy
;
303 CleanupKind CleanupKind
;
304 if (Lifetime
== Qualifiers::OCL_Strong
) {
305 const ValueDecl
*VD
= M
->getExtendingDecl();
307 VD
&& isa
<VarDecl
>(VD
) && VD
->hasAttr
<ObjCPreciseLifetimeAttr
>();
308 CleanupKind
= CGF
.getARCCleanupKind();
309 Destroy
= Precise
? &CodeGenFunction::destroyARCStrongPrecise
310 : &CodeGenFunction::destroyARCStrongImprecise
;
312 // __weak objects always get EH cleanups; otherwise, exceptions
313 // could cause really nasty crashes instead of mere leaks.
314 CleanupKind
= NormalAndEHCleanup
;
315 Destroy
= &CodeGenFunction::destroyARCWeak
;
317 if (Duration
== SD_FullExpression
)
318 CGF
.pushDestroy(CleanupKind
, ReferenceTemporary
,
319 M
->getType(), *Destroy
,
320 CleanupKind
& EHCleanup
);
322 CGF
.pushLifetimeExtendedDestroy(CleanupKind
, ReferenceTemporary
,
324 *Destroy
, CleanupKind
& EHCleanup
);
328 llvm_unreachable("temporary cannot have dynamic storage duration");
330 llvm_unreachable("unknown storage duration");
334 CXXDestructorDecl
*ReferenceTemporaryDtor
= nullptr;
335 if (const RecordType
*RT
=
336 E
->getType()->getBaseElementTypeUnsafe()->getAs
<RecordType
>()) {
337 // Get the destructor for the reference temporary.
338 auto *ClassDecl
= cast
<CXXRecordDecl
>(RT
->getDecl());
339 if (!ClassDecl
->hasTrivialDestructor())
340 ReferenceTemporaryDtor
= ClassDecl
->getDestructor();
343 if (!ReferenceTemporaryDtor
)
346 // Call the destructor for the temporary.
347 switch (M
->getStorageDuration()) {
350 llvm::FunctionCallee CleanupFn
;
351 llvm::Constant
*CleanupArg
;
352 if (E
->getType()->isArrayType()) {
353 CleanupFn
= CodeGenFunction(CGF
.CGM
).generateDestroyHelper(
354 ReferenceTemporary
, E
->getType(),
355 CodeGenFunction::destroyCXXObject
, CGF
.getLangOpts().Exceptions
,
356 dyn_cast_or_null
<VarDecl
>(M
->getExtendingDecl()));
357 CleanupArg
= llvm::Constant::getNullValue(CGF
.Int8PtrTy
);
359 CleanupFn
= CGF
.CGM
.getAddrAndTypeOfCXXStructor(
360 GlobalDecl(ReferenceTemporaryDtor
, Dtor_Complete
));
361 CleanupArg
= cast
<llvm::Constant
>(ReferenceTemporary
.getPointer());
363 CGF
.CGM
.getCXXABI().registerGlobalDtor(
364 CGF
, *cast
<VarDecl
>(M
->getExtendingDecl()), CleanupFn
, CleanupArg
);
368 case SD_FullExpression
:
369 CGF
.pushDestroy(NormalAndEHCleanup
, ReferenceTemporary
, E
->getType(),
370 CodeGenFunction::destroyCXXObject
,
371 CGF
.getLangOpts().Exceptions
);
375 CGF
.pushLifetimeExtendedDestroy(NormalAndEHCleanup
,
376 ReferenceTemporary
, E
->getType(),
377 CodeGenFunction::destroyCXXObject
,
378 CGF
.getLangOpts().Exceptions
);
382 llvm_unreachable("temporary cannot have dynamic storage duration");
386 static Address
createReferenceTemporary(CodeGenFunction
&CGF
,
387 const MaterializeTemporaryExpr
*M
,
389 Address
*Alloca
= nullptr) {
390 auto &TCG
= CGF
.getTargetHooks();
391 switch (M
->getStorageDuration()) {
392 case SD_FullExpression
:
394 // If we have a constant temporary array or record try to promote it into a
395 // constant global under the same rules a normal constant would've been
396 // promoted. This is easier on the optimizer and generally emits fewer
398 QualType Ty
= Inner
->getType();
399 if (CGF
.CGM
.getCodeGenOpts().MergeAllConstants
&&
400 (Ty
->isArrayType() || Ty
->isRecordType()) &&
401 Ty
.isConstantStorage(CGF
.getContext(), true, false))
402 if (auto Init
= ConstantEmitter(CGF
).tryEmitAbstract(Inner
, Ty
)) {
403 auto AS
= CGF
.CGM
.GetGlobalConstantAddressSpace();
404 auto *GV
= new llvm::GlobalVariable(
405 CGF
.CGM
.getModule(), Init
->getType(), /*isConstant=*/true,
406 llvm::GlobalValue::PrivateLinkage
, Init
, ".ref.tmp", nullptr,
407 llvm::GlobalValue::NotThreadLocal
,
408 CGF
.getContext().getTargetAddressSpace(AS
));
409 CharUnits alignment
= CGF
.getContext().getTypeAlignInChars(Ty
);
410 GV
->setAlignment(alignment
.getAsAlign());
411 llvm::Constant
*C
= GV
;
412 if (AS
!= LangAS::Default
)
413 C
= TCG
.performAddrSpaceCast(
414 CGF
.CGM
, GV
, AS
, LangAS::Default
,
415 GV
->getValueType()->getPointerTo(
416 CGF
.getContext().getTargetAddressSpace(LangAS::Default
)));
417 // FIXME: Should we put the new global into a COMDAT?
418 return Address(C
, GV
->getValueType(), alignment
);
420 return CGF
.CreateMemTemp(Ty
, "ref.tmp", Alloca
);
424 return CGF
.CGM
.GetAddrOfGlobalTemporary(M
, Inner
);
427 llvm_unreachable("temporary can't have dynamic storage duration");
429 llvm_unreachable("unknown storage duration");
432 /// Helper method to check if the underlying ABI is AAPCS
433 static bool isAAPCS(const TargetInfo
&TargetInfo
) {
434 return TargetInfo
.getABI().startswith("aapcs");
437 LValue
CodeGenFunction::
438 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*M
) {
439 const Expr
*E
= M
->getSubExpr();
441 assert((!M
->getExtendingDecl() || !isa
<VarDecl
>(M
->getExtendingDecl()) ||
442 !cast
<VarDecl
>(M
->getExtendingDecl())->isARCPseudoStrong()) &&
443 "Reference should never be pseudo-strong!");
445 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
446 // as that will cause the lifetime adjustment to be lost for ARC
447 auto ownership
= M
->getType().getObjCLifetime();
448 if (ownership
!= Qualifiers::OCL_None
&&
449 ownership
!= Qualifiers::OCL_ExplicitNone
) {
450 Address Object
= createReferenceTemporary(*this, M
, E
);
451 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(Object
.getPointer())) {
452 llvm::Type
*Ty
= ConvertTypeForMem(E
->getType());
453 Object
= Object
.withElementType(Ty
);
455 // createReferenceTemporary will promote the temporary to a global with a
456 // constant initializer if it can. It can only do this to a value of
457 // ARC-manageable type if the value is global and therefore "immune" to
458 // ref-counting operations. Therefore we have no need to emit either a
459 // dynamic initialization or a cleanup and we can just return the address
461 if (Var
->hasInitializer())
462 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
464 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
466 LValue RefTempDst
= MakeAddrLValue(Object
, M
->getType(),
467 AlignmentSource::Decl
);
469 switch (getEvaluationKind(E
->getType())) {
470 default: llvm_unreachable("expected scalar or aggregate expression");
472 EmitScalarInit(E
, M
->getExtendingDecl(), RefTempDst
, false);
474 case TEK_Aggregate
: {
475 EmitAggExpr(E
, AggValueSlot::forAddr(Object
,
476 E
->getType().getQualifiers(),
477 AggValueSlot::IsDestructed
,
478 AggValueSlot::DoesNotNeedGCBarriers
,
479 AggValueSlot::IsNotAliased
,
480 AggValueSlot::DoesNotOverlap
));
485 pushTemporaryCleanup(*this, M
, E
, Object
);
489 SmallVector
<const Expr
*, 2> CommaLHSs
;
490 SmallVector
<SubobjectAdjustment
, 2> Adjustments
;
491 E
= E
->skipRValueSubobjectAdjustments(CommaLHSs
, Adjustments
);
493 for (const auto &Ignored
: CommaLHSs
)
494 EmitIgnoredExpr(Ignored
);
496 if (const auto *opaque
= dyn_cast
<OpaqueValueExpr
>(E
)) {
497 if (opaque
->getType()->isRecordType()) {
498 assert(Adjustments
.empty());
499 return EmitOpaqueValueLValue(opaque
);
503 // Create and initialize the reference temporary.
504 Address Alloca
= Address::invalid();
505 Address Object
= createReferenceTemporary(*this, M
, E
, &Alloca
);
506 if (auto *Var
= dyn_cast
<llvm::GlobalVariable
>(
507 Object
.getPointer()->stripPointerCasts())) {
508 llvm::Type
*TemporaryType
= ConvertTypeForMem(E
->getType());
509 Object
= Object
.withElementType(TemporaryType
);
510 // If the temporary is a global and has a constant initializer or is a
511 // constant temporary that we promoted to a global, we may have already
513 if (!Var
->hasInitializer()) {
514 Var
->setInitializer(CGM
.EmitNullConstant(E
->getType()));
515 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
518 switch (M
->getStorageDuration()) {
520 if (auto *Size
= EmitLifetimeStart(
521 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
522 Alloca
.getPointer())) {
523 pushCleanupAfterFullExpr
<CallLifetimeEnd
>(NormalEHLifetimeMarker
,
528 case SD_FullExpression
: {
529 if (!ShouldEmitLifetimeMarkers
)
532 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
533 // marker. Instead, start the lifetime of a conditional temporary earlier
534 // so that it's unconditional. Don't do this with sanitizers which need
535 // more precise lifetime marks. However when inside an "await.suspend"
536 // block, we should always avoid conditional cleanup because it creates
537 // boolean marker that lives across await_suspend, which can destroy coro
539 ConditionalEvaluation
*OldConditional
= nullptr;
540 CGBuilderTy::InsertPoint OldIP
;
541 if (isInConditionalBranch() && !E
->getType().isDestructedType() &&
542 ((!SanOpts
.has(SanitizerKind::HWAddress
) &&
543 !SanOpts
.has(SanitizerKind::Memory
) &&
544 !CGM
.getCodeGenOpts().SanitizeAddressUseAfterScope
) ||
546 OldConditional
= OutermostConditional
;
547 OutermostConditional
= nullptr;
549 OldIP
= Builder
.saveIP();
550 llvm::BasicBlock
*Block
= OldConditional
->getStartingBlock();
551 Builder
.restoreIP(CGBuilderTy::InsertPoint(
552 Block
, llvm::BasicBlock::iterator(Block
->back())));
555 if (auto *Size
= EmitLifetimeStart(
556 CGM
.getDataLayout().getTypeAllocSize(Alloca
.getElementType()),
557 Alloca
.getPointer())) {
558 pushFullExprCleanup
<CallLifetimeEnd
>(NormalEHLifetimeMarker
, Alloca
,
562 if (OldConditional
) {
563 OutermostConditional
= OldConditional
;
564 Builder
.restoreIP(OldIP
);
572 EmitAnyExprToMem(E
, Object
, Qualifiers(), /*IsInit*/true);
574 pushTemporaryCleanup(*this, M
, E
, Object
);
576 // Perform derived-to-base casts and/or field accesses, to get from the
577 // temporary object we created (and, potentially, for which we extended
578 // the lifetime) to the subobject we're binding the reference to.
579 for (SubobjectAdjustment
&Adjustment
: llvm::reverse(Adjustments
)) {
580 switch (Adjustment
.Kind
) {
581 case SubobjectAdjustment::DerivedToBaseAdjustment
:
583 GetAddressOfBaseClass(Object
, Adjustment
.DerivedToBase
.DerivedClass
,
584 Adjustment
.DerivedToBase
.BasePath
->path_begin(),
585 Adjustment
.DerivedToBase
.BasePath
->path_end(),
586 /*NullCheckValue=*/ false, E
->getExprLoc());
589 case SubobjectAdjustment::FieldAdjustment
: {
590 LValue LV
= MakeAddrLValue(Object
, E
->getType(), AlignmentSource::Decl
);
591 LV
= EmitLValueForField(LV
, Adjustment
.Field
);
592 assert(LV
.isSimple() &&
593 "materialized temporary field is not a simple lvalue");
594 Object
= LV
.getAddress(*this);
598 case SubobjectAdjustment::MemberPointerAdjustment
: {
599 llvm::Value
*Ptr
= EmitScalarExpr(Adjustment
.Ptr
.RHS
);
600 Object
= EmitCXXMemberDataPointerAddress(E
, Object
, Ptr
,
607 return MakeAddrLValue(Object
, M
->getType(), AlignmentSource::Decl
);
611 CodeGenFunction::EmitReferenceBindingToExpr(const Expr
*E
) {
612 // Emit the expression as an lvalue.
613 LValue LV
= EmitLValue(E
);
614 assert(LV
.isSimple());
615 llvm::Value
*Value
= LV
.getPointer(*this);
617 if (sanitizePerformTypeCheck() && !E
->getType()->isFunctionType()) {
618 // C++11 [dcl.ref]p5 (as amended by core issue 453):
619 // If a glvalue to which a reference is directly bound designates neither
620 // an existing object or function of an appropriate type nor a region of
621 // storage of suitable size and alignment to contain an object of the
622 // reference's type, the behavior is undefined.
623 QualType Ty
= E
->getType();
624 EmitTypeCheck(TCK_ReferenceBinding
, E
->getExprLoc(), Value
, Ty
);
627 return RValue::get(Value
);
631 /// getAccessedFieldNo - Given an encoded value and a result number, return the
632 /// input field number being accessed.
633 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx
,
634 const llvm::Constant
*Elts
) {
635 return cast
<llvm::ConstantInt
>(Elts
->getAggregateElement(Idx
))
639 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
640 static llvm::Value
*emitHash16Bytes(CGBuilderTy
&Builder
, llvm::Value
*Low
,
642 llvm::Value
*KMul
= Builder
.getInt64(0x9ddfea08eb382d69ULL
);
643 llvm::Value
*K47
= Builder
.getInt64(47);
644 llvm::Value
*A0
= Builder
.CreateMul(Builder
.CreateXor(Low
, High
), KMul
);
645 llvm::Value
*A1
= Builder
.CreateXor(Builder
.CreateLShr(A0
, K47
), A0
);
646 llvm::Value
*B0
= Builder
.CreateMul(Builder
.CreateXor(High
, A1
), KMul
);
647 llvm::Value
*B1
= Builder
.CreateXor(Builder
.CreateLShr(B0
, K47
), B0
);
648 return Builder
.CreateMul(B1
, KMul
);
651 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK
) {
652 return TCK
== TCK_DowncastPointer
|| TCK
== TCK_Upcast
||
653 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
;
656 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK
, QualType Ty
) {
657 CXXRecordDecl
*RD
= Ty
->getAsCXXRecordDecl();
658 return (RD
&& RD
->hasDefinition() && RD
->isDynamicClass()) &&
659 (TCK
== TCK_MemberAccess
|| TCK
== TCK_MemberCall
||
660 TCK
== TCK_DowncastPointer
|| TCK
== TCK_DowncastReference
||
661 TCK
== TCK_UpcastToVirtualBase
|| TCK
== TCK_DynamicOperation
);
664 bool CodeGenFunction::sanitizePerformTypeCheck() const {
665 return SanOpts
.has(SanitizerKind::Null
) ||
666 SanOpts
.has(SanitizerKind::Alignment
) ||
667 SanOpts
.has(SanitizerKind::ObjectSize
) ||
668 SanOpts
.has(SanitizerKind::Vptr
);
671 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK
, SourceLocation Loc
,
672 llvm::Value
*Ptr
, QualType Ty
,
674 SanitizerSet SkippedChecks
,
675 llvm::Value
*ArraySize
) {
676 if (!sanitizePerformTypeCheck())
679 // Don't check pointers outside the default address space. The null check
680 // isn't correct, the object-size check isn't supported by LLVM, and we can't
681 // communicate the addresses to the runtime handler for the vptr check.
682 if (Ptr
->getType()->getPointerAddressSpace())
685 // Don't check pointers to volatile data. The behavior here is implementation-
687 if (Ty
.isVolatileQualified())
690 SanitizerScope
SanScope(this);
692 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 3> Checks
;
693 llvm::BasicBlock
*Done
= nullptr;
695 // Quickly determine whether we have a pointer to an alloca. It's possible
696 // to skip null checks, and some alignment checks, for these pointers. This
697 // can reduce compile-time significantly.
698 auto PtrToAlloca
= dyn_cast
<llvm::AllocaInst
>(Ptr
->stripPointerCasts());
700 llvm::Value
*True
= llvm::ConstantInt::getTrue(getLLVMContext());
701 llvm::Value
*IsNonNull
= nullptr;
702 bool IsGuaranteedNonNull
=
703 SkippedChecks
.has(SanitizerKind::Null
) || PtrToAlloca
;
704 bool AllowNullPointers
= isNullPointerAllowed(TCK
);
705 if ((SanOpts
.has(SanitizerKind::Null
) || AllowNullPointers
) &&
706 !IsGuaranteedNonNull
) {
707 // The glvalue must not be an empty glvalue.
708 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
710 // The IR builder can constant-fold the null check if the pointer points to
712 IsGuaranteedNonNull
= IsNonNull
== True
;
714 // Skip the null check if the pointer is known to be non-null.
715 if (!IsGuaranteedNonNull
) {
716 if (AllowNullPointers
) {
717 // When performing pointer casts, it's OK if the value is null.
718 // Skip the remaining checks in that case.
719 Done
= createBasicBlock("null");
720 llvm::BasicBlock
*Rest
= createBasicBlock("not.null");
721 Builder
.CreateCondBr(IsNonNull
, Rest
, Done
);
724 Checks
.push_back(std::make_pair(IsNonNull
, SanitizerKind::Null
));
729 if (SanOpts
.has(SanitizerKind::ObjectSize
) &&
730 !SkippedChecks
.has(SanitizerKind::ObjectSize
) &&
731 !Ty
->isIncompleteType()) {
732 uint64_t TySize
= CGM
.getMinimumObjectSize(Ty
).getQuantity();
733 llvm::Value
*Size
= llvm::ConstantInt::get(IntPtrTy
, TySize
);
735 Size
= Builder
.CreateMul(Size
, ArraySize
);
737 // Degenerate case: new X[0] does not need an objectsize check.
738 llvm::Constant
*ConstantSize
= dyn_cast
<llvm::Constant
>(Size
);
739 if (!ConstantSize
|| !ConstantSize
->isNullValue()) {
740 // The glvalue must refer to a large enough storage region.
741 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
743 // FIXME: Get object address space
744 llvm::Type
*Tys
[2] = { IntPtrTy
, Int8PtrTy
};
745 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::objectsize
, Tys
);
746 llvm::Value
*Min
= Builder
.getFalse();
747 llvm::Value
*NullIsUnknown
= Builder
.getFalse();
748 llvm::Value
*Dynamic
= Builder
.getFalse();
749 llvm::Value
*LargeEnough
= Builder
.CreateICmpUGE(
750 Builder
.CreateCall(F
, {Ptr
, Min
, NullIsUnknown
, Dynamic
}), Size
);
751 Checks
.push_back(std::make_pair(LargeEnough
, SanitizerKind::ObjectSize
));
755 llvm::MaybeAlign AlignVal
;
756 llvm::Value
*PtrAsInt
= nullptr;
758 if (SanOpts
.has(SanitizerKind::Alignment
) &&
759 !SkippedChecks
.has(SanitizerKind::Alignment
)) {
760 AlignVal
= Alignment
.getAsMaybeAlign();
761 if (!Ty
->isIncompleteType() && !AlignVal
)
762 AlignVal
= CGM
.getNaturalTypeAlignment(Ty
, nullptr, nullptr,
763 /*ForPointeeType=*/true)
766 // The glvalue must be suitably aligned.
767 if (AlignVal
&& *AlignVal
> llvm::Align(1) &&
768 (!PtrToAlloca
|| PtrToAlloca
->getAlign() < *AlignVal
)) {
769 PtrAsInt
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
770 llvm::Value
*Align
= Builder
.CreateAnd(
771 PtrAsInt
, llvm::ConstantInt::get(IntPtrTy
, AlignVal
->value() - 1));
772 llvm::Value
*Aligned
=
773 Builder
.CreateICmpEQ(Align
, llvm::ConstantInt::get(IntPtrTy
, 0));
775 Checks
.push_back(std::make_pair(Aligned
, SanitizerKind::Alignment
));
779 if (Checks
.size() > 0) {
780 llvm::Constant
*StaticData
[] = {
781 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(Ty
),
782 llvm::ConstantInt::get(Int8Ty
, AlignVal
? llvm::Log2(*AlignVal
) : 1),
783 llvm::ConstantInt::get(Int8Ty
, TCK
)};
784 EmitCheck(Checks
, SanitizerHandler::TypeMismatch
, StaticData
,
785 PtrAsInt
? PtrAsInt
: Ptr
);
788 // If possible, check that the vptr indicates that there is a subobject of
789 // type Ty at offset zero within this object.
791 // C++11 [basic.life]p5,6:
792 // [For storage which does not refer to an object within its lifetime]
793 // The program has undefined behavior if:
794 // -- the [pointer or glvalue] is used to access a non-static data member
795 // or call a non-static member function
796 if (SanOpts
.has(SanitizerKind::Vptr
) &&
797 !SkippedChecks
.has(SanitizerKind::Vptr
) && isVptrCheckRequired(TCK
, Ty
)) {
798 // Ensure that the pointer is non-null before loading it. If there is no
799 // compile-time guarantee, reuse the run-time null check or emit a new one.
800 if (!IsGuaranteedNonNull
) {
802 IsNonNull
= Builder
.CreateIsNotNull(Ptr
);
804 Done
= createBasicBlock("vptr.null");
805 llvm::BasicBlock
*VptrNotNull
= createBasicBlock("vptr.not.null");
806 Builder
.CreateCondBr(IsNonNull
, VptrNotNull
, Done
);
807 EmitBlock(VptrNotNull
);
810 // Compute a hash of the mangled name of the type.
812 // FIXME: This is not guaranteed to be deterministic! Move to a
813 // fingerprinting mechanism once LLVM provides one. For the time
814 // being the implementation happens to be deterministic.
815 SmallString
<64> MangledName
;
816 llvm::raw_svector_ostream
Out(MangledName
);
817 CGM
.getCXXABI().getMangleContext().mangleCXXRTTI(Ty
.getUnqualifiedType(),
820 // Contained in NoSanitizeList based on the mangled type.
821 if (!CGM
.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr
,
823 llvm::hash_code TypeHash
= hash_value(Out
.str());
825 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
826 llvm::Value
*Low
= llvm::ConstantInt::get(Int64Ty
, TypeHash
);
827 Address
VPtrAddr(Ptr
, IntPtrTy
, getPointerAlign());
828 llvm::Value
*VPtrVal
= Builder
.CreateLoad(VPtrAddr
);
829 llvm::Value
*High
= Builder
.CreateZExt(VPtrVal
, Int64Ty
);
831 llvm::Value
*Hash
= emitHash16Bytes(Builder
, Low
, High
);
832 Hash
= Builder
.CreateTrunc(Hash
, IntPtrTy
);
834 // Look the hash up in our cache.
835 const int CacheSize
= 128;
836 llvm::Type
*HashTable
= llvm::ArrayType::get(IntPtrTy
, CacheSize
);
837 llvm::Value
*Cache
= CGM
.CreateRuntimeVariable(HashTable
,
838 "__ubsan_vptr_type_cache");
839 llvm::Value
*Slot
= Builder
.CreateAnd(Hash
,
840 llvm::ConstantInt::get(IntPtrTy
,
842 llvm::Value
*Indices
[] = { Builder
.getInt32(0), Slot
};
843 llvm::Value
*CacheVal
= Builder
.CreateAlignedLoad(
844 IntPtrTy
, Builder
.CreateInBoundsGEP(HashTable
, Cache
, Indices
),
847 // If the hash isn't in the cache, call a runtime handler to perform the
848 // hard work of checking whether the vptr is for an object of the right
849 // type. This will either fill in the cache and return, or produce a
851 llvm::Value
*EqualHash
= Builder
.CreateICmpEQ(CacheVal
, Hash
);
852 llvm::Constant
*StaticData
[] = {
853 EmitCheckSourceLocation(Loc
),
854 EmitCheckTypeDescriptor(Ty
),
855 CGM
.GetAddrOfRTTIDescriptor(Ty
.getUnqualifiedType()),
856 llvm::ConstantInt::get(Int8Ty
, TCK
)
858 llvm::Value
*DynamicData
[] = { Ptr
, Hash
};
859 EmitCheck(std::make_pair(EqualHash
, SanitizerKind::Vptr
),
860 SanitizerHandler::DynamicTypeCacheMiss
, StaticData
,
866 Builder
.CreateBr(Done
);
871 llvm::Value
*CodeGenFunction::LoadPassedObjectSize(const Expr
*E
,
873 ASTContext
&C
= getContext();
874 uint64_t EltSize
= C
.getTypeSizeInChars(EltTy
).getQuantity();
878 auto *ArrayDeclRef
= dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts());
882 auto *ParamDecl
= dyn_cast
<ParmVarDecl
>(ArrayDeclRef
->getDecl());
886 auto *POSAttr
= ParamDecl
->getAttr
<PassObjectSizeAttr
>();
890 // Don't load the size if it's a lower bound.
891 int POSType
= POSAttr
->getType();
892 if (POSType
!= 0 && POSType
!= 1)
895 // Find the implicit size parameter.
896 auto PassedSizeIt
= SizeArguments
.find(ParamDecl
);
897 if (PassedSizeIt
== SizeArguments
.end())
900 const ImplicitParamDecl
*PassedSizeDecl
= PassedSizeIt
->second
;
901 assert(LocalDeclMap
.count(PassedSizeDecl
) && "Passed size not loadable");
902 Address AddrOfSize
= LocalDeclMap
.find(PassedSizeDecl
)->second
;
903 llvm::Value
*SizeInBytes
= EmitLoadOfScalar(AddrOfSize
, /*Volatile=*/false,
904 C
.getSizeType(), E
->getExprLoc());
905 llvm::Value
*SizeOfElement
=
906 llvm::ConstantInt::get(SizeInBytes
->getType(), EltSize
);
907 return Builder
.CreateUDiv(SizeInBytes
, SizeOfElement
);
910 /// If Base is known to point to the start of an array, return the length of
911 /// that array. Return 0 if the length cannot be determined.
912 static llvm::Value
*getArrayIndexingBound(CodeGenFunction
&CGF
,
914 QualType
&IndexedType
,
915 LangOptions::StrictFlexArraysLevelKind
916 StrictFlexArraysLevel
) {
917 // For the vector indexing extension, the bound is the number of elements.
918 if (const VectorType
*VT
= Base
->getType()->getAs
<VectorType
>()) {
919 IndexedType
= Base
->getType();
920 return CGF
.Builder
.getInt32(VT
->getNumElements());
923 Base
= Base
->IgnoreParens();
925 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
926 if (CE
->getCastKind() == CK_ArrayToPointerDecay
&&
927 !CE
->getSubExpr()->isFlexibleArrayMemberLike(CGF
.getContext(),
928 StrictFlexArraysLevel
)) {
929 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
931 IndexedType
= CE
->getSubExpr()->getType();
932 const ArrayType
*AT
= IndexedType
->castAsArrayTypeUnsafe();
933 if (const auto *CAT
= dyn_cast
<ConstantArrayType
>(AT
))
934 return CGF
.Builder
.getInt(CAT
->getSize());
936 if (const auto *VAT
= dyn_cast
<VariableArrayType
>(AT
))
937 return CGF
.getVLASize(VAT
).NumElts
;
938 // Ignore pass_object_size here. It's not applicable on decayed pointers.
941 if (FieldDecl
*FD
= CGF
.FindCountedByField(Base
, StrictFlexArraysLevel
)) {
942 const auto *ME
= dyn_cast
<MemberExpr
>(CE
->getSubExpr());
943 IndexedType
= Base
->getType();
945 .EmitAnyExprToTemp(MemberExpr::CreateImplicit(
946 CGF
.getContext(), const_cast<Expr
*>(ME
->getBase()),
947 ME
->isArrow(), FD
, FD
->getType(), VK_LValue
, OK_Ordinary
))
952 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
954 QualType EltTy
{Base
->getType()->getPointeeOrArrayElementType(), 0};
955 if (llvm::Value
*POS
= CGF
.LoadPassedObjectSize(Base
, EltTy
)) {
956 IndexedType
= Base
->getType();
963 FieldDecl
*CodeGenFunction::FindCountedByField(
965 LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
) {
966 const ValueDecl
*VD
= nullptr;
968 Base
= Base
->IgnoreParenImpCasts();
970 if (const auto *ME
= dyn_cast
<MemberExpr
>(Base
)) {
971 VD
= dyn_cast
<ValueDecl
>(ME
->getMemberDecl());
972 } else if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(Base
)) {
973 // Pointing to the full structure.
974 VD
= dyn_cast
<ValueDecl
>(DRE
->getDecl());
976 QualType Ty
= VD
->getType();
977 if (Ty
->isPointerType())
978 Ty
= Ty
->getPointeeType();
980 if (const auto *RD
= Ty
->getAsRecordDecl())
981 VD
= RD
->getLastField();
982 } else if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
983 if (const auto *ME
= dyn_cast
<MemberExpr
>(CE
->getSubExpr()))
984 VD
= dyn_cast
<ValueDecl
>(ME
->getMemberDecl());
987 const auto *FD
= dyn_cast_if_present
<FieldDecl
>(VD
);
988 if (!FD
|| !FD
->getParent() ||
989 !Decl::isFlexibleArrayMemberLike(getContext(), FD
, FD
->getType(),
990 StrictFlexArraysLevel
, true))
993 const auto *CBA
= FD
->getAttr
<CountedByAttr
>();
997 StringRef FieldName
= CBA
->getCountedByField()->getName();
999 llvm::find_if(FD
->getParent()->fields(), [&](const FieldDecl
*Field
) {
1000 return FieldName
== Field
->getName();
1002 return It
!= FD
->getParent()->field_end() ? *It
: nullptr;
1005 void CodeGenFunction::EmitBoundsCheck(const Expr
*E
, const Expr
*Base
,
1006 llvm::Value
*Index
, QualType IndexType
,
1008 assert(SanOpts
.has(SanitizerKind::ArrayBounds
) &&
1009 "should not be called unless adding bounds checks");
1010 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
=
1011 getLangOpts().getStrictFlexArraysLevel();
1013 QualType IndexedType
;
1014 llvm::Value
*Bound
=
1015 getArrayIndexingBound(*this, Base
, IndexedType
, StrictFlexArraysLevel
);
1019 SanitizerScope
SanScope(this);
1021 bool IndexSigned
= IndexType
->isSignedIntegerOrEnumerationType();
1022 llvm::Value
*IndexVal
= Builder
.CreateIntCast(Index
, SizeTy
, IndexSigned
);
1023 llvm::Value
*BoundVal
= Builder
.CreateIntCast(Bound
, SizeTy
, false);
1025 llvm::Constant
*StaticData
[] = {
1026 EmitCheckSourceLocation(E
->getExprLoc()),
1027 EmitCheckTypeDescriptor(IndexedType
),
1028 EmitCheckTypeDescriptor(IndexType
)
1030 llvm::Value
*Check
= Accessed
? Builder
.CreateICmpULT(IndexVal
, BoundVal
)
1031 : Builder
.CreateICmpULE(IndexVal
, BoundVal
);
1032 EmitCheck(std::make_pair(Check
, SanitizerKind::ArrayBounds
),
1033 SanitizerHandler::OutOfBounds
, StaticData
, Index
);
1037 CodeGenFunction::ComplexPairTy
CodeGenFunction::
1038 EmitComplexPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
1039 bool isInc
, bool isPre
) {
1040 ComplexPairTy InVal
= EmitLoadOfComplex(LV
, E
->getExprLoc());
1042 llvm::Value
*NextVal
;
1043 if (isa
<llvm::IntegerType
>(InVal
.first
->getType())) {
1044 uint64_t AmountVal
= isInc
? 1 : -1;
1045 NextVal
= llvm::ConstantInt::get(InVal
.first
->getType(), AmountVal
, true);
1047 // Add the inc/dec to the real part.
1048 NextVal
= Builder
.CreateAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1050 QualType ElemTy
= E
->getType()->castAs
<ComplexType
>()->getElementType();
1051 llvm::APFloat
FVal(getContext().getFloatTypeSemantics(ElemTy
), 1);
1054 NextVal
= llvm::ConstantFP::get(getLLVMContext(), FVal
);
1056 // Add the inc/dec to the real part.
1057 NextVal
= Builder
.CreateFAdd(InVal
.first
, NextVal
, isInc
? "inc" : "dec");
1060 ComplexPairTy
IncVal(NextVal
, InVal
.second
);
1062 // Store the updated result through the lvalue.
1063 EmitStoreOfComplex(IncVal
, LV
, /*init*/ false);
1064 if (getLangOpts().OpenMP
)
1065 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1068 // If this is a postinc, return the value read from memory, otherwise use the
1070 return isPre
? IncVal
: InVal
;
1073 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr
*E
,
1074 CodeGenFunction
*CGF
) {
1075 // Bind VLAs in the cast type.
1076 if (CGF
&& E
->getType()->isVariablyModifiedType())
1077 CGF
->EmitVariablyModifiedType(E
->getType());
1079 if (CGDebugInfo
*DI
= getModuleDebugInfo())
1080 DI
->EmitExplicitCastType(E
->getType());
1083 //===----------------------------------------------------------------------===//
1084 // LValue Expression Emission
1085 //===----------------------------------------------------------------------===//
1087 static Address
EmitPointerWithAlignment(const Expr
*E
, LValueBaseInfo
*BaseInfo
,
1088 TBAAAccessInfo
*TBAAInfo
,
1089 KnownNonNull_t IsKnownNonNull
,
1090 CodeGenFunction
&CGF
) {
1091 // We allow this with ObjC object pointers because of fragile ABIs.
1092 assert(E
->getType()->isPointerType() ||
1093 E
->getType()->isObjCObjectPointerType());
1094 E
= E
->IgnoreParens();
1097 if (const CastExpr
*CE
= dyn_cast
<CastExpr
>(E
)) {
1098 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(CE
))
1099 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
1101 switch (CE
->getCastKind()) {
1102 // Non-converting casts (but not C's implicit conversion from void*).
1105 case CK_AddressSpaceConversion
:
1106 if (auto PtrTy
= CE
->getSubExpr()->getType()->getAs
<PointerType
>()) {
1107 if (PtrTy
->getPointeeType()->isVoidType())
1110 LValueBaseInfo InnerBaseInfo
;
1111 TBAAAccessInfo InnerTBAAInfo
;
1112 Address Addr
= CGF
.EmitPointerWithAlignment(
1113 CE
->getSubExpr(), &InnerBaseInfo
, &InnerTBAAInfo
, IsKnownNonNull
);
1114 if (BaseInfo
) *BaseInfo
= InnerBaseInfo
;
1115 if (TBAAInfo
) *TBAAInfo
= InnerTBAAInfo
;
1117 if (isa
<ExplicitCastExpr
>(CE
)) {
1118 LValueBaseInfo TargetTypeBaseInfo
;
1119 TBAAAccessInfo TargetTypeTBAAInfo
;
1120 CharUnits Align
= CGF
.CGM
.getNaturalPointeeTypeAlignment(
1121 E
->getType(), &TargetTypeBaseInfo
, &TargetTypeTBAAInfo
);
1124 CGF
.CGM
.mergeTBAAInfoForCast(*TBAAInfo
, TargetTypeTBAAInfo
);
1125 // If the source l-value is opaque, honor the alignment of the
1127 if (InnerBaseInfo
.getAlignmentSource() != AlignmentSource::Decl
) {
1129 BaseInfo
->mergeForCast(TargetTypeBaseInfo
);
1130 Addr
= Address(Addr
.getPointer(), Addr
.getElementType(), Align
,
1135 if (CGF
.SanOpts
.has(SanitizerKind::CFIUnrelatedCast
) &&
1136 CE
->getCastKind() == CK_BitCast
) {
1137 if (auto PT
= E
->getType()->getAs
<PointerType
>())
1138 CGF
.EmitVTablePtrCheckForCast(PT
->getPointeeType(), Addr
,
1140 CodeGenFunction::CFITCK_UnrelatedCast
,
1144 llvm::Type
*ElemTy
=
1145 CGF
.ConvertTypeForMem(E
->getType()->getPointeeType());
1146 Addr
= Addr
.withElementType(ElemTy
);
1147 if (CE
->getCastKind() == CK_AddressSpaceConversion
)
1148 Addr
= CGF
.Builder
.CreateAddrSpaceCast(Addr
,
1149 CGF
.ConvertType(E
->getType()));
1154 // Array-to-pointer decay.
1155 case CK_ArrayToPointerDecay
:
1156 return CGF
.EmitArrayToPointerDecay(CE
->getSubExpr(), BaseInfo
, TBAAInfo
);
1158 // Derived-to-base conversions.
1159 case CK_UncheckedDerivedToBase
:
1160 case CK_DerivedToBase
: {
1161 // TODO: Support accesses to members of base classes in TBAA. For now, we
1162 // conservatively pretend that the complete object is of the base class
1165 *TBAAInfo
= CGF
.CGM
.getTBAAAccessInfo(E
->getType());
1166 Address Addr
= CGF
.EmitPointerWithAlignment(
1167 CE
->getSubExpr(), BaseInfo
, nullptr,
1168 (KnownNonNull_t
)(IsKnownNonNull
||
1169 CE
->getCastKind() == CK_UncheckedDerivedToBase
));
1170 auto Derived
= CE
->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1171 return CGF
.GetAddressOfBaseClass(
1172 Addr
, Derived
, CE
->path_begin(), CE
->path_end(),
1173 CGF
.ShouldNullCheckClassCastValue(CE
), CE
->getExprLoc());
1176 // TODO: Is there any reason to treat base-to-derived conversions
1184 if (const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(E
)) {
1185 if (UO
->getOpcode() == UO_AddrOf
) {
1186 LValue LV
= CGF
.EmitLValue(UO
->getSubExpr(), IsKnownNonNull
);
1187 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1188 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1189 return LV
.getAddress(CGF
);
1193 // std::addressof and variants.
1194 if (auto *Call
= dyn_cast
<CallExpr
>(E
)) {
1195 switch (Call
->getBuiltinCallee()) {
1198 case Builtin::BIaddressof
:
1199 case Builtin::BI__addressof
:
1200 case Builtin::BI__builtin_addressof
: {
1201 LValue LV
= CGF
.EmitLValue(Call
->getArg(0), IsKnownNonNull
);
1202 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
1203 if (TBAAInfo
) *TBAAInfo
= LV
.getTBAAInfo();
1204 return LV
.getAddress(CGF
);
1209 // TODO: conditional operators, comma.
1211 // Otherwise, use the alignment of the type.
1213 CGF
.CGM
.getNaturalPointeeTypeAlignment(E
->getType(), BaseInfo
, TBAAInfo
);
1214 llvm::Type
*ElemTy
= CGF
.ConvertTypeForMem(E
->getType()->getPointeeType());
1215 return Address(CGF
.EmitScalarExpr(E
), ElemTy
, Align
, IsKnownNonNull
);
1218 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1219 /// derive a more accurate bound on the alignment of the pointer.
1220 Address
CodeGenFunction::EmitPointerWithAlignment(
1221 const Expr
*E
, LValueBaseInfo
*BaseInfo
, TBAAAccessInfo
*TBAAInfo
,
1222 KnownNonNull_t IsKnownNonNull
) {
1224 ::EmitPointerWithAlignment(E
, BaseInfo
, TBAAInfo
, IsKnownNonNull
, *this);
1225 if (IsKnownNonNull
&& !Addr
.isKnownNonNull())
1226 Addr
.setKnownNonNull();
1230 llvm::Value
*CodeGenFunction::EmitNonNullRValueCheck(RValue RV
, QualType T
) {
1231 llvm::Value
*V
= RV
.getScalarVal();
1232 if (auto MPT
= T
->getAs
<MemberPointerType
>())
1233 return CGM
.getCXXABI().EmitMemberPointerIsNotNull(*this, V
, MPT
);
1234 return Builder
.CreateICmpNE(V
, llvm::Constant::getNullValue(V
->getType()));
1237 RValue
CodeGenFunction::GetUndefRValue(QualType Ty
) {
1238 if (Ty
->isVoidType())
1239 return RValue::get(nullptr);
1241 switch (getEvaluationKind(Ty
)) {
1244 ConvertType(Ty
->castAs
<ComplexType
>()->getElementType());
1245 llvm::Value
*U
= llvm::UndefValue::get(EltTy
);
1246 return RValue::getComplex(std::make_pair(U
, U
));
1249 // If this is a use of an undefined aggregate type, the aggregate must have an
1250 // identifiable address. Just because the contents of the value are undefined
1251 // doesn't mean that the address can't be taken and compared.
1252 case TEK_Aggregate
: {
1253 Address DestPtr
= CreateMemTemp(Ty
, "undef.agg.tmp");
1254 return RValue::getAggregate(DestPtr
);
1258 return RValue::get(llvm::UndefValue::get(ConvertType(Ty
)));
1260 llvm_unreachable("bad evaluation kind");
1263 RValue
CodeGenFunction::EmitUnsupportedRValue(const Expr
*E
,
1265 ErrorUnsupported(E
, Name
);
1266 return GetUndefRValue(E
->getType());
1269 LValue
CodeGenFunction::EmitUnsupportedLValue(const Expr
*E
,
1271 ErrorUnsupported(E
, Name
);
1272 llvm::Type
*ElTy
= ConvertType(E
->getType());
1273 llvm::Type
*Ty
= UnqualPtrTy
;
1274 return MakeAddrLValue(
1275 Address(llvm::UndefValue::get(Ty
), ElTy
, CharUnits::One()), E
->getType());
1278 bool CodeGenFunction::IsWrappedCXXThis(const Expr
*Obj
) {
1279 const Expr
*Base
= Obj
;
1280 while (!isa
<CXXThisExpr
>(Base
)) {
1281 // The result of a dynamic_cast can be null.
1282 if (isa
<CXXDynamicCastExpr
>(Base
))
1285 if (const auto *CE
= dyn_cast
<CastExpr
>(Base
)) {
1286 Base
= CE
->getSubExpr();
1287 } else if (const auto *PE
= dyn_cast
<ParenExpr
>(Base
)) {
1288 Base
= PE
->getSubExpr();
1289 } else if (const auto *UO
= dyn_cast
<UnaryOperator
>(Base
)) {
1290 if (UO
->getOpcode() == UO_Extension
)
1291 Base
= UO
->getSubExpr();
1301 LValue
CodeGenFunction::EmitCheckedLValue(const Expr
*E
, TypeCheckKind TCK
) {
1303 if (SanOpts
.has(SanitizerKind::ArrayBounds
) && isa
<ArraySubscriptExpr
>(E
))
1304 LV
= EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
), /*Accessed*/true);
1307 if (!isa
<DeclRefExpr
>(E
) && !LV
.isBitField() && LV
.isSimple()) {
1308 SanitizerSet SkippedChecks
;
1309 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
)) {
1310 bool IsBaseCXXThis
= IsWrappedCXXThis(ME
->getBase());
1312 SkippedChecks
.set(SanitizerKind::Alignment
, true);
1313 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(ME
->getBase()))
1314 SkippedChecks
.set(SanitizerKind::Null
, true);
1316 EmitTypeCheck(TCK
, E
->getExprLoc(), LV
.getPointer(*this), E
->getType(),
1317 LV
.getAlignment(), SkippedChecks
);
1322 /// EmitLValue - Emit code to compute a designator that specifies the location
1323 /// of the expression.
1325 /// This can return one of two things: a simple address or a bitfield reference.
1326 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1327 /// an LLVM pointer type.
1329 /// If this returns a bitfield reference, nothing about the pointee type of the
1330 /// LLVM value is known: For example, it may not be a pointer to an integer.
1332 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1333 /// this method guarantees that the returned pointer type will point to an LLVM
1334 /// type of the same size of the lvalue's type. If the lvalue has a variable
1335 /// length type, this is not possible.
1337 LValue
CodeGenFunction::EmitLValue(const Expr
*E
,
1338 KnownNonNull_t IsKnownNonNull
) {
1339 LValue LV
= EmitLValueHelper(E
, IsKnownNonNull
);
1340 if (IsKnownNonNull
&& !LV
.isKnownNonNull())
1341 LV
.setKnownNonNull();
1345 LValue
CodeGenFunction::EmitLValueHelper(const Expr
*E
,
1346 KnownNonNull_t IsKnownNonNull
) {
1347 ApplyDebugLocation
DL(*this, E
);
1348 switch (E
->getStmtClass()) {
1349 default: return EmitUnsupportedLValue(E
, "l-value expression");
1351 case Expr::ObjCPropertyRefExprClass
:
1352 llvm_unreachable("cannot emit a property reference directly");
1354 case Expr::ObjCSelectorExprClass
:
1355 return EmitObjCSelectorLValue(cast
<ObjCSelectorExpr
>(E
));
1356 case Expr::ObjCIsaExprClass
:
1357 return EmitObjCIsaExpr(cast
<ObjCIsaExpr
>(E
));
1358 case Expr::BinaryOperatorClass
:
1359 return EmitBinaryOperatorLValue(cast
<BinaryOperator
>(E
));
1360 case Expr::CompoundAssignOperatorClass
: {
1361 QualType Ty
= E
->getType();
1362 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1363 Ty
= AT
->getValueType();
1364 if (!Ty
->isAnyComplexType())
1365 return EmitCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1366 return EmitComplexCompoundAssignmentLValue(cast
<CompoundAssignOperator
>(E
));
1368 case Expr::CallExprClass
:
1369 case Expr::CXXMemberCallExprClass
:
1370 case Expr::CXXOperatorCallExprClass
:
1371 case Expr::UserDefinedLiteralClass
:
1372 return EmitCallExprLValue(cast
<CallExpr
>(E
));
1373 case Expr::CXXRewrittenBinaryOperatorClass
:
1374 return EmitLValue(cast
<CXXRewrittenBinaryOperator
>(E
)->getSemanticForm(),
1376 case Expr::VAArgExprClass
:
1377 return EmitVAArgExprLValue(cast
<VAArgExpr
>(E
));
1378 case Expr::DeclRefExprClass
:
1379 return EmitDeclRefLValue(cast
<DeclRefExpr
>(E
));
1380 case Expr::ConstantExprClass
: {
1381 const ConstantExpr
*CE
= cast
<ConstantExpr
>(E
);
1382 if (llvm::Value
*Result
= ConstantEmitter(*this).tryEmitConstantExpr(CE
)) {
1383 QualType RetType
= cast
<CallExpr
>(CE
->getSubExpr()->IgnoreImplicit())
1384 ->getCallReturnType(getContext())
1386 return MakeNaturalAlignAddrLValue(Result
, RetType
);
1388 return EmitLValue(cast
<ConstantExpr
>(E
)->getSubExpr(), IsKnownNonNull
);
1390 case Expr::ParenExprClass
:
1391 return EmitLValue(cast
<ParenExpr
>(E
)->getSubExpr(), IsKnownNonNull
);
1392 case Expr::GenericSelectionExprClass
:
1393 return EmitLValue(cast
<GenericSelectionExpr
>(E
)->getResultExpr(),
1395 case Expr::PredefinedExprClass
:
1396 return EmitPredefinedLValue(cast
<PredefinedExpr
>(E
));
1397 case Expr::StringLiteralClass
:
1398 return EmitStringLiteralLValue(cast
<StringLiteral
>(E
));
1399 case Expr::ObjCEncodeExprClass
:
1400 return EmitObjCEncodeExprLValue(cast
<ObjCEncodeExpr
>(E
));
1401 case Expr::PseudoObjectExprClass
:
1402 return EmitPseudoObjectLValue(cast
<PseudoObjectExpr
>(E
));
1403 case Expr::InitListExprClass
:
1404 return EmitInitListLValue(cast
<InitListExpr
>(E
));
1405 case Expr::CXXTemporaryObjectExprClass
:
1406 case Expr::CXXConstructExprClass
:
1407 return EmitCXXConstructLValue(cast
<CXXConstructExpr
>(E
));
1408 case Expr::CXXBindTemporaryExprClass
:
1409 return EmitCXXBindTemporaryLValue(cast
<CXXBindTemporaryExpr
>(E
));
1410 case Expr::CXXUuidofExprClass
:
1411 return EmitCXXUuidofLValue(cast
<CXXUuidofExpr
>(E
));
1412 case Expr::LambdaExprClass
:
1413 return EmitAggExprToLValue(E
);
1415 case Expr::ExprWithCleanupsClass
: {
1416 const auto *cleanups
= cast
<ExprWithCleanups
>(E
);
1417 RunCleanupsScope
Scope(*this);
1418 LValue LV
= EmitLValue(cleanups
->getSubExpr(), IsKnownNonNull
);
1419 if (LV
.isSimple()) {
1420 // Defend against branches out of gnu statement expressions surrounded by
1422 Address Addr
= LV
.getAddress(*this);
1423 llvm::Value
*V
= Addr
.getPointer();
1424 Scope
.ForceCleanup({&V
});
1425 return LValue::MakeAddr(Addr
.withPointer(V
, Addr
.isKnownNonNull()),
1426 LV
.getType(), getContext(), LV
.getBaseInfo(),
1429 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1430 // bitfield lvalue or some other non-simple lvalue?
1434 case Expr::CXXDefaultArgExprClass
: {
1435 auto *DAE
= cast
<CXXDefaultArgExpr
>(E
);
1436 CXXDefaultArgExprScope
Scope(*this, DAE
);
1437 return EmitLValue(DAE
->getExpr(), IsKnownNonNull
);
1439 case Expr::CXXDefaultInitExprClass
: {
1440 auto *DIE
= cast
<CXXDefaultInitExpr
>(E
);
1441 CXXDefaultInitExprScope
Scope(*this, DIE
);
1442 return EmitLValue(DIE
->getExpr(), IsKnownNonNull
);
1444 case Expr::CXXTypeidExprClass
:
1445 return EmitCXXTypeidLValue(cast
<CXXTypeidExpr
>(E
));
1447 case Expr::ObjCMessageExprClass
:
1448 return EmitObjCMessageExprLValue(cast
<ObjCMessageExpr
>(E
));
1449 case Expr::ObjCIvarRefExprClass
:
1450 return EmitObjCIvarRefLValue(cast
<ObjCIvarRefExpr
>(E
));
1451 case Expr::StmtExprClass
:
1452 return EmitStmtExprLValue(cast
<StmtExpr
>(E
));
1453 case Expr::UnaryOperatorClass
:
1454 return EmitUnaryOpLValue(cast
<UnaryOperator
>(E
));
1455 case Expr::ArraySubscriptExprClass
:
1456 return EmitArraySubscriptExpr(cast
<ArraySubscriptExpr
>(E
));
1457 case Expr::MatrixSubscriptExprClass
:
1458 return EmitMatrixSubscriptExpr(cast
<MatrixSubscriptExpr
>(E
));
1459 case Expr::OMPArraySectionExprClass
:
1460 return EmitOMPArraySectionExpr(cast
<OMPArraySectionExpr
>(E
));
1461 case Expr::ExtVectorElementExprClass
:
1462 return EmitExtVectorElementExpr(cast
<ExtVectorElementExpr
>(E
));
1463 case Expr::CXXThisExprClass
:
1464 return MakeAddrLValue(LoadCXXThisAddress(), E
->getType());
1465 case Expr::MemberExprClass
:
1466 return EmitMemberExpr(cast
<MemberExpr
>(E
));
1467 case Expr::CompoundLiteralExprClass
:
1468 return EmitCompoundLiteralLValue(cast
<CompoundLiteralExpr
>(E
));
1469 case Expr::ConditionalOperatorClass
:
1470 return EmitConditionalOperatorLValue(cast
<ConditionalOperator
>(E
));
1471 case Expr::BinaryConditionalOperatorClass
:
1472 return EmitConditionalOperatorLValue(cast
<BinaryConditionalOperator
>(E
));
1473 case Expr::ChooseExprClass
:
1474 return EmitLValue(cast
<ChooseExpr
>(E
)->getChosenSubExpr(), IsKnownNonNull
);
1475 case Expr::OpaqueValueExprClass
:
1476 return EmitOpaqueValueLValue(cast
<OpaqueValueExpr
>(E
));
1477 case Expr::SubstNonTypeTemplateParmExprClass
:
1478 return EmitLValue(cast
<SubstNonTypeTemplateParmExpr
>(E
)->getReplacement(),
1480 case Expr::ImplicitCastExprClass
:
1481 case Expr::CStyleCastExprClass
:
1482 case Expr::CXXFunctionalCastExprClass
:
1483 case Expr::CXXStaticCastExprClass
:
1484 case Expr::CXXDynamicCastExprClass
:
1485 case Expr::CXXReinterpretCastExprClass
:
1486 case Expr::CXXConstCastExprClass
:
1487 case Expr::CXXAddrspaceCastExprClass
:
1488 case Expr::ObjCBridgedCastExprClass
:
1489 return EmitCastLValue(cast
<CastExpr
>(E
));
1491 case Expr::MaterializeTemporaryExprClass
:
1492 return EmitMaterializeTemporaryExpr(cast
<MaterializeTemporaryExpr
>(E
));
1494 case Expr::CoawaitExprClass
:
1495 return EmitCoawaitLValue(cast
<CoawaitExpr
>(E
));
1496 case Expr::CoyieldExprClass
:
1497 return EmitCoyieldLValue(cast
<CoyieldExpr
>(E
));
1501 /// Given an object of the given canonical type, can we safely copy a
1502 /// value out of it based on its initializer?
1503 static bool isConstantEmittableObjectType(QualType type
) {
1504 assert(type
.isCanonical());
1505 assert(!type
->isReferenceType());
1507 // Must be const-qualified but non-volatile.
1508 Qualifiers qs
= type
.getLocalQualifiers();
1509 if (!qs
.hasConst() || qs
.hasVolatile()) return false;
1511 // Otherwise, all object types satisfy this except C++ classes with
1512 // mutable subobjects or non-trivial copy/destroy behavior.
1513 if (const auto *RT
= dyn_cast
<RecordType
>(type
))
1514 if (const auto *RD
= dyn_cast
<CXXRecordDecl
>(RT
->getDecl()))
1515 if (RD
->hasMutableFields() || !RD
->isTrivial())
1521 /// Can we constant-emit a load of a reference to a variable of the
1522 /// given type? This is different from predicates like
1523 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1524 /// in situations that don't necessarily satisfy the language's rules
1525 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1526 /// to do this with const float variables even if those variables
1527 /// aren't marked 'constexpr'.
1528 enum ConstantEmissionKind
{
1530 CEK_AsReferenceOnly
,
1531 CEK_AsValueOrReference
,
1534 static ConstantEmissionKind
checkVarTypeForConstantEmission(QualType type
) {
1535 type
= type
.getCanonicalType();
1536 if (const auto *ref
= dyn_cast
<ReferenceType
>(type
)) {
1537 if (isConstantEmittableObjectType(ref
->getPointeeType()))
1538 return CEK_AsValueOrReference
;
1539 return CEK_AsReferenceOnly
;
1541 if (isConstantEmittableObjectType(type
))
1542 return CEK_AsValueOnly
;
1546 /// Try to emit a reference to the given value without producing it as
1547 /// an l-value. This is just an optimization, but it avoids us needing
1548 /// to emit global copies of variables if they're named without triggering
1549 /// a formal use in a context where we can't emit a direct reference to them,
1550 /// for instance if a block or lambda or a member of a local class uses a
1551 /// const int variable or constexpr variable from an enclosing function.
1552 CodeGenFunction::ConstantEmission
1553 CodeGenFunction::tryEmitAsConstant(DeclRefExpr
*refExpr
) {
1554 ValueDecl
*value
= refExpr
->getDecl();
1556 // The value needs to be an enum constant or a constant variable.
1557 ConstantEmissionKind CEK
;
1558 if (isa
<ParmVarDecl
>(value
)) {
1560 } else if (auto *var
= dyn_cast
<VarDecl
>(value
)) {
1561 CEK
= checkVarTypeForConstantEmission(var
->getType());
1562 } else if (isa
<EnumConstantDecl
>(value
)) {
1563 CEK
= CEK_AsValueOnly
;
1567 if (CEK
== CEK_None
) return ConstantEmission();
1569 Expr::EvalResult result
;
1570 bool resultIsReference
;
1571 QualType resultType
;
1573 // It's best to evaluate all the way as an r-value if that's permitted.
1574 if (CEK
!= CEK_AsReferenceOnly
&&
1575 refExpr
->EvaluateAsRValue(result
, getContext())) {
1576 resultIsReference
= false;
1577 resultType
= refExpr
->getType();
1579 // Otherwise, try to evaluate as an l-value.
1580 } else if (CEK
!= CEK_AsValueOnly
&&
1581 refExpr
->EvaluateAsLValue(result
, getContext())) {
1582 resultIsReference
= true;
1583 resultType
= value
->getType();
1587 return ConstantEmission();
1590 // In any case, if the initializer has side-effects, abandon ship.
1591 if (result
.HasSideEffects
)
1592 return ConstantEmission();
1594 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1595 // referencing a global host variable by copy. In this case the lambda should
1596 // make a copy of the value of the global host variable. The DRE of the
1597 // captured reference variable cannot be emitted as load from the host
1598 // global variable as compile time constant, since the host variable is not
1599 // accessible on device. The DRE of the captured reference variable has to be
1600 // loaded from captures.
1601 if (CGM
.getLangOpts().CUDAIsDevice
&& result
.Val
.isLValue() &&
1602 refExpr
->refersToEnclosingVariableOrCapture()) {
1603 auto *MD
= dyn_cast_or_null
<CXXMethodDecl
>(CurCodeDecl
);
1604 if (MD
&& MD
->getParent()->isLambda() &&
1605 MD
->getOverloadedOperator() == OO_Call
) {
1606 const APValue::LValueBase
&base
= result
.Val
.getLValueBase();
1607 if (const ValueDecl
*D
= base
.dyn_cast
<const ValueDecl
*>()) {
1608 if (const VarDecl
*VD
= dyn_cast
<const VarDecl
>(D
)) {
1609 if (!VD
->hasAttr
<CUDADeviceAttr
>()) {
1610 return ConstantEmission();
1617 // Emit as a constant.
1618 auto C
= ConstantEmitter(*this).emitAbstract(refExpr
->getLocation(),
1619 result
.Val
, resultType
);
1621 // Make sure we emit a debug reference to the global variable.
1622 // This should probably fire even for
1623 if (isa
<VarDecl
>(value
)) {
1624 if (!getContext().DeclMustBeEmitted(cast
<VarDecl
>(value
)))
1625 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1627 assert(isa
<EnumConstantDecl
>(value
));
1628 EmitDeclRefExprDbgValue(refExpr
, result
.Val
);
1631 // If we emitted a reference constant, we need to dereference that.
1632 if (resultIsReference
)
1633 return ConstantEmission::forReference(C
);
1635 return ConstantEmission::forValue(C
);
1638 static DeclRefExpr
*tryToConvertMemberExprToDeclRefExpr(CodeGenFunction
&CGF
,
1639 const MemberExpr
*ME
) {
1640 if (auto *VD
= dyn_cast
<VarDecl
>(ME
->getMemberDecl())) {
1641 // Try to emit static variable member expressions as DREs.
1642 return DeclRefExpr::Create(
1643 CGF
.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD
,
1644 /*RefersToEnclosingVariableOrCapture=*/false, ME
->getExprLoc(),
1645 ME
->getType(), ME
->getValueKind(), nullptr, nullptr, ME
->isNonOdrUse());
1650 CodeGenFunction::ConstantEmission
1651 CodeGenFunction::tryEmitAsConstant(const MemberExpr
*ME
) {
1652 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, ME
))
1653 return tryEmitAsConstant(DRE
);
1654 return ConstantEmission();
1657 llvm::Value
*CodeGenFunction::emitScalarConstant(
1658 const CodeGenFunction::ConstantEmission
&Constant
, Expr
*E
) {
1659 assert(Constant
&& "not a constant");
1660 if (Constant
.isReference())
1661 return EmitLoadOfLValue(Constant
.getReferenceLValue(*this, E
),
1664 return Constant
.getValue();
1667 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(LValue lvalue
,
1668 SourceLocation Loc
) {
1669 return EmitLoadOfScalar(lvalue
.getAddress(*this), lvalue
.isVolatile(),
1670 lvalue
.getType(), Loc
, lvalue
.getBaseInfo(),
1671 lvalue
.getTBAAInfo(), lvalue
.isNontemporal());
1674 static bool hasBooleanRepresentation(QualType Ty
) {
1675 if (Ty
->isBooleanType())
1678 if (const EnumType
*ET
= Ty
->getAs
<EnumType
>())
1679 return ET
->getDecl()->getIntegerType()->isBooleanType();
1681 if (const AtomicType
*AT
= Ty
->getAs
<AtomicType
>())
1682 return hasBooleanRepresentation(AT
->getValueType());
1687 static bool getRangeForType(CodeGenFunction
&CGF
, QualType Ty
,
1688 llvm::APInt
&Min
, llvm::APInt
&End
,
1689 bool StrictEnums
, bool IsBool
) {
1690 const EnumType
*ET
= Ty
->getAs
<EnumType
>();
1691 bool IsRegularCPlusPlusEnum
= CGF
.getLangOpts().CPlusPlus
&& StrictEnums
&&
1692 ET
&& !ET
->getDecl()->isFixed();
1693 if (!IsBool
&& !IsRegularCPlusPlusEnum
)
1697 Min
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 0);
1698 End
= llvm::APInt(CGF
.getContext().getTypeSize(Ty
), 2);
1700 const EnumDecl
*ED
= ET
->getDecl();
1701 ED
->getValueRange(End
, Min
);
1706 llvm::MDNode
*CodeGenFunction::getRangeForLoadFromType(QualType Ty
) {
1707 llvm::APInt Min
, End
;
1708 if (!getRangeForType(*this, Ty
, Min
, End
, CGM
.getCodeGenOpts().StrictEnums
,
1709 hasBooleanRepresentation(Ty
)))
1712 llvm::MDBuilder
MDHelper(getLLVMContext());
1713 return MDHelper
.createRange(Min
, End
);
1716 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value
*Value
, QualType Ty
,
1717 SourceLocation Loc
) {
1718 bool HasBoolCheck
= SanOpts
.has(SanitizerKind::Bool
);
1719 bool HasEnumCheck
= SanOpts
.has(SanitizerKind::Enum
);
1720 if (!HasBoolCheck
&& !HasEnumCheck
)
1723 bool IsBool
= hasBooleanRepresentation(Ty
) ||
1724 NSAPI(CGM
.getContext()).isObjCBOOLType(Ty
);
1725 bool NeedsBoolCheck
= HasBoolCheck
&& IsBool
;
1726 bool NeedsEnumCheck
= HasEnumCheck
&& Ty
->getAs
<EnumType
>();
1727 if (!NeedsBoolCheck
&& !NeedsEnumCheck
)
1730 // Single-bit booleans don't need to be checked. Special-case this to avoid
1731 // a bit width mismatch when handling bitfield values. This is handled by
1732 // EmitFromMemory for the non-bitfield case.
1734 cast
<llvm::IntegerType
>(Value
->getType())->getBitWidth() == 1)
1737 llvm::APInt Min
, End
;
1738 if (!getRangeForType(*this, Ty
, Min
, End
, /*StrictEnums=*/true, IsBool
))
1741 auto &Ctx
= getLLVMContext();
1742 SanitizerScope
SanScope(this);
1746 Check
= Builder
.CreateICmpULE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1748 llvm::Value
*Upper
=
1749 Builder
.CreateICmpSLE(Value
, llvm::ConstantInt::get(Ctx
, End
));
1750 llvm::Value
*Lower
=
1751 Builder
.CreateICmpSGE(Value
, llvm::ConstantInt::get(Ctx
, Min
));
1752 Check
= Builder
.CreateAnd(Upper
, Lower
);
1754 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
),
1755 EmitCheckTypeDescriptor(Ty
)};
1756 SanitizerMask Kind
=
1757 NeedsEnumCheck
? SanitizerKind::Enum
: SanitizerKind::Bool
;
1758 EmitCheck(std::make_pair(Check
, Kind
), SanitizerHandler::LoadInvalidValue
,
1759 StaticArgs
, EmitCheckValue(Value
));
1763 llvm::Value
*CodeGenFunction::EmitLoadOfScalar(Address Addr
, bool Volatile
,
1766 LValueBaseInfo BaseInfo
,
1767 TBAAAccessInfo TBAAInfo
,
1768 bool isNontemporal
) {
1769 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1770 if (GV
->isThreadLocal())
1771 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
),
1774 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1775 // Boolean vectors use `iN` as storage type.
1776 if (ClangVecTy
->isExtVectorBoolType()) {
1777 llvm::Type
*ValTy
= ConvertType(Ty
);
1778 unsigned ValNumElems
=
1779 cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1780 // Load the `iP` storage object (P is the padded vector size).
1781 auto *RawIntV
= Builder
.CreateLoad(Addr
, Volatile
, "load_bits");
1782 const auto *RawIntTy
= RawIntV
->getType();
1783 assert(RawIntTy
->isIntegerTy() && "compressed iN storage for bitvectors");
1784 // Bitcast iP --> <P x i1>.
1785 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1786 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1787 llvm::Value
*V
= Builder
.CreateBitCast(RawIntV
, PaddedVecTy
);
1788 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1789 V
= emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1791 return EmitFromMemory(V
, Ty
);
1794 // Handle vectors of size 3 like size 4 for better performance.
1795 const llvm::Type
*EltTy
= Addr
.getElementType();
1796 const auto *VTy
= cast
<llvm::FixedVectorType
>(EltTy
);
1798 if (!CGM
.getCodeGenOpts().PreserveVec3Type
&& VTy
->getNumElements() == 3) {
1800 llvm::VectorType
*vec4Ty
=
1801 llvm::FixedVectorType::get(VTy
->getElementType(), 4);
1802 Address Cast
= Addr
.withElementType(vec4Ty
);
1804 llvm::Value
*V
= Builder
.CreateLoad(Cast
, Volatile
, "loadVec4");
1806 // Shuffle vector to get vec3.
1807 V
= Builder
.CreateShuffleVector(V
, ArrayRef
<int>{0, 1, 2}, "extractVec");
1808 return EmitFromMemory(V
, Ty
);
1812 // Atomic operations have to be done on integral types.
1813 LValue AtomicLValue
=
1814 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1815 if (Ty
->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue
)) {
1816 return EmitAtomicLoad(AtomicLValue
, Loc
).getScalarVal();
1819 llvm::LoadInst
*Load
= Builder
.CreateLoad(Addr
, Volatile
);
1820 if (isNontemporal
) {
1821 llvm::MDNode
*Node
= llvm::MDNode::get(
1822 Load
->getContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1823 Load
->setMetadata(llvm::LLVMContext::MD_nontemporal
, Node
);
1826 CGM
.DecorateInstructionWithTBAA(Load
, TBAAInfo
);
1828 if (EmitScalarRangeCheck(Load
, Ty
, Loc
)) {
1829 // In order to prevent the optimizer from throwing away the check, don't
1830 // attach range metadata to the load.
1831 } else if (CGM
.getCodeGenOpts().OptimizationLevel
> 0)
1832 if (llvm::MDNode
*RangeInfo
= getRangeForLoadFromType(Ty
)) {
1833 Load
->setMetadata(llvm::LLVMContext::MD_range
, RangeInfo
);
1834 Load
->setMetadata(llvm::LLVMContext::MD_noundef
,
1835 llvm::MDNode::get(getLLVMContext(), std::nullopt
));
1838 return EmitFromMemory(Load
, Ty
);
1841 llvm::Value
*CodeGenFunction::EmitToMemory(llvm::Value
*Value
, QualType Ty
) {
1842 // Bool has a different representation in memory than in registers.
1843 if (hasBooleanRepresentation(Ty
)) {
1844 // This should really always be an i1, but sometimes it's already
1845 // an i8, and it's awkward to track those cases down.
1846 if (Value
->getType()->isIntegerTy(1))
1847 return Builder
.CreateZExt(Value
, ConvertTypeForMem(Ty
), "frombool");
1848 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1849 "wrong value rep of bool");
1855 llvm::Value
*CodeGenFunction::EmitFromMemory(llvm::Value
*Value
, QualType Ty
) {
1856 // Bool has a different representation in memory than in registers.
1857 if (hasBooleanRepresentation(Ty
)) {
1858 assert(Value
->getType()->isIntegerTy(getContext().getTypeSize(Ty
)) &&
1859 "wrong value rep of bool");
1860 return Builder
.CreateTrunc(Value
, Builder
.getInt1Ty(), "tobool");
1862 if (Ty
->isExtVectorBoolType()) {
1863 const auto *RawIntTy
= Value
->getType();
1864 // Bitcast iP --> <P x i1>.
1865 auto *PaddedVecTy
= llvm::FixedVectorType::get(
1866 Builder
.getInt1Ty(), RawIntTy
->getPrimitiveSizeInBits());
1867 auto *V
= Builder
.CreateBitCast(Value
, PaddedVecTy
);
1868 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1869 llvm::Type
*ValTy
= ConvertType(Ty
);
1870 unsigned ValNumElems
= cast
<llvm::FixedVectorType
>(ValTy
)->getNumElements();
1871 return emitBoolVecConversion(V
, ValNumElems
, "extractvec");
1877 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1878 // MatrixType), if it points to a array (the memory type of MatrixType).
1879 static Address
MaybeConvertMatrixAddress(Address Addr
, CodeGenFunction
&CGF
,
1880 bool IsVector
= true) {
1881 auto *ArrayTy
= dyn_cast
<llvm::ArrayType
>(Addr
.getElementType());
1882 if (ArrayTy
&& IsVector
) {
1883 auto *VectorTy
= llvm::FixedVectorType::get(ArrayTy
->getElementType(),
1884 ArrayTy
->getNumElements());
1886 return Addr
.withElementType(VectorTy
);
1888 auto *VectorTy
= dyn_cast
<llvm::VectorType
>(Addr
.getElementType());
1889 if (VectorTy
&& !IsVector
) {
1890 auto *ArrayTy
= llvm::ArrayType::get(
1891 VectorTy
->getElementType(),
1892 cast
<llvm::FixedVectorType
>(VectorTy
)->getNumElements());
1894 return Addr
.withElementType(ArrayTy
);
1900 // Emit a store of a matrix LValue. This may require casting the original
1901 // pointer to memory address (ArrayType) to a pointer to the value type
1903 static void EmitStoreOfMatrixScalar(llvm::Value
*value
, LValue lvalue
,
1904 bool isInit
, CodeGenFunction
&CGF
) {
1905 Address Addr
= MaybeConvertMatrixAddress(lvalue
.getAddress(CGF
), CGF
,
1906 value
->getType()->isVectorTy());
1907 CGF
.EmitStoreOfScalar(value
, Addr
, lvalue
.isVolatile(), lvalue
.getType(),
1908 lvalue
.getBaseInfo(), lvalue
.getTBAAInfo(), isInit
,
1909 lvalue
.isNontemporal());
1912 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*Value
, Address Addr
,
1913 bool Volatile
, QualType Ty
,
1914 LValueBaseInfo BaseInfo
,
1915 TBAAAccessInfo TBAAInfo
,
1916 bool isInit
, bool isNontemporal
) {
1917 if (auto *GV
= dyn_cast
<llvm::GlobalValue
>(Addr
.getPointer()))
1918 if (GV
->isThreadLocal())
1919 Addr
= Addr
.withPointer(Builder
.CreateThreadLocalAddress(GV
),
1922 llvm::Type
*SrcTy
= Value
->getType();
1923 if (const auto *ClangVecTy
= Ty
->getAs
<VectorType
>()) {
1924 auto *VecTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
);
1925 if (VecTy
&& ClangVecTy
->isExtVectorBoolType()) {
1926 auto *MemIntTy
= cast
<llvm::IntegerType
>(Addr
.getElementType());
1927 // Expand to the memory bit width.
1928 unsigned MemNumElems
= MemIntTy
->getPrimitiveSizeInBits();
1929 // <N x i1> --> <P x i1>.
1930 Value
= emitBoolVecConversion(Value
, MemNumElems
, "insertvec");
1932 Value
= Builder
.CreateBitCast(Value
, MemIntTy
);
1933 } else if (!CGM
.getCodeGenOpts().PreserveVec3Type
) {
1934 // Handle vec3 special.
1935 if (VecTy
&& cast
<llvm::FixedVectorType
>(VecTy
)->getNumElements() == 3) {
1936 // Our source is a vec3, do a shuffle vector to make it a vec4.
1937 Value
= Builder
.CreateShuffleVector(Value
, ArrayRef
<int>{0, 1, 2, -1},
1939 SrcTy
= llvm::FixedVectorType::get(VecTy
->getElementType(), 4);
1941 if (Addr
.getElementType() != SrcTy
) {
1942 Addr
= Addr
.withElementType(SrcTy
);
1947 Value
= EmitToMemory(Value
, Ty
);
1949 LValue AtomicLValue
=
1950 LValue::MakeAddr(Addr
, Ty
, getContext(), BaseInfo
, TBAAInfo
);
1951 if (Ty
->isAtomicType() ||
1952 (!isInit
&& LValueIsSuitableForInlineAtomic(AtomicLValue
))) {
1953 EmitAtomicStore(RValue::get(Value
), AtomicLValue
, isInit
);
1957 llvm::StoreInst
*Store
= Builder
.CreateStore(Value
, Addr
, Volatile
);
1958 if (isNontemporal
) {
1959 llvm::MDNode
*Node
=
1960 llvm::MDNode::get(Store
->getContext(),
1961 llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
1962 Store
->setMetadata(llvm::LLVMContext::MD_nontemporal
, Node
);
1965 CGM
.DecorateInstructionWithTBAA(Store
, TBAAInfo
);
1968 void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*value
, LValue lvalue
,
1970 if (lvalue
.getType()->isConstantMatrixType()) {
1971 EmitStoreOfMatrixScalar(value
, lvalue
, isInit
, *this);
1975 EmitStoreOfScalar(value
, lvalue
.getAddress(*this), lvalue
.isVolatile(),
1976 lvalue
.getType(), lvalue
.getBaseInfo(),
1977 lvalue
.getTBAAInfo(), isInit
, lvalue
.isNontemporal());
1980 // Emit a load of a LValue of matrix type. This may require casting the pointer
1981 // to memory address (ArrayType) to a pointer to the value type (VectorType).
1982 static RValue
EmitLoadOfMatrixLValue(LValue LV
, SourceLocation Loc
,
1983 CodeGenFunction
&CGF
) {
1984 assert(LV
.getType()->isConstantMatrixType());
1985 Address Addr
= MaybeConvertMatrixAddress(LV
.getAddress(CGF
), CGF
);
1986 LV
.setAddress(Addr
);
1987 return RValue::get(CGF
.EmitLoadOfScalar(LV
, Loc
));
1990 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1991 /// method emits the address of the lvalue, then loads the result as an rvalue,
1992 /// returning the rvalue.
1993 RValue
CodeGenFunction::EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
1994 if (LV
.isObjCWeak()) {
1995 // load of a __weak object.
1996 Address AddrWeakObj
= LV
.getAddress(*this);
1997 return RValue::get(CGM
.getObjCRuntime().EmitObjCWeakRead(*this,
2000 if (LV
.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak
) {
2001 // In MRC mode, we do a load+autorelease.
2002 if (!getLangOpts().ObjCAutoRefCount
) {
2003 return RValue::get(EmitARCLoadWeak(LV
.getAddress(*this)));
2006 // In ARC mode, we load retained and then consume the value.
2007 llvm::Value
*Object
= EmitARCLoadWeakRetained(LV
.getAddress(*this));
2008 Object
= EmitObjCConsumeObject(LV
.getType(), Object
);
2009 return RValue::get(Object
);
2012 if (LV
.isSimple()) {
2013 assert(!LV
.getType()->isFunctionType());
2015 if (LV
.getType()->isConstantMatrixType())
2016 return EmitLoadOfMatrixLValue(LV
, Loc
, *this);
2018 // Everything needs a load.
2019 return RValue::get(EmitLoadOfScalar(LV
, Loc
));
2022 if (LV
.isVectorElt()) {
2023 llvm::LoadInst
*Load
= Builder
.CreateLoad(LV
.getVectorAddress(),
2024 LV
.isVolatileQualified());
2025 return RValue::get(Builder
.CreateExtractElement(Load
, LV
.getVectorIdx(),
2029 // If this is a reference to a subset of the elements of a vector, either
2030 // shuffle the input or extract/insert them as appropriate.
2031 if (LV
.isExtVectorElt()) {
2032 return EmitLoadOfExtVectorElementLValue(LV
);
2035 // Global Register variables always invoke intrinsics
2036 if (LV
.isGlobalReg())
2037 return EmitLoadOfGlobalRegLValue(LV
);
2039 if (LV
.isMatrixElt()) {
2040 llvm::Value
*Idx
= LV
.getMatrixIdx();
2041 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2042 const auto *const MatTy
= LV
.getType()->castAs
<ConstantMatrixType
>();
2043 llvm::MatrixBuilder
MB(Builder
);
2044 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2046 llvm::LoadInst
*Load
=
2047 Builder
.CreateLoad(LV
.getMatrixAddress(), LV
.isVolatileQualified());
2048 return RValue::get(Builder
.CreateExtractElement(Load
, Idx
, "matrixext"));
2051 assert(LV
.isBitField() && "Unknown LValue type!");
2052 return EmitLoadOfBitfieldLValue(LV
, Loc
);
2055 RValue
CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV
,
2056 SourceLocation Loc
) {
2057 const CGBitFieldInfo
&Info
= LV
.getBitFieldInfo();
2059 // Get the output type.
2060 llvm::Type
*ResLTy
= ConvertType(LV
.getType());
2062 Address Ptr
= LV
.getBitFieldAddress();
2064 Builder
.CreateLoad(Ptr
, LV
.isVolatileQualified(), "bf.load");
2066 bool UseVolatile
= LV
.isVolatileQualified() &&
2067 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2068 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2069 const unsigned StorageSize
=
2070 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2071 if (Info
.IsSigned
) {
2072 assert(static_cast<unsigned>(Offset
+ Info
.Size
) <= StorageSize
);
2073 unsigned HighBits
= StorageSize
- Offset
- Info
.Size
;
2075 Val
= Builder
.CreateShl(Val
, HighBits
, "bf.shl");
2076 if (Offset
+ HighBits
)
2077 Val
= Builder
.CreateAShr(Val
, Offset
+ HighBits
, "bf.ashr");
2080 Val
= Builder
.CreateLShr(Val
, Offset
, "bf.lshr");
2081 if (static_cast<unsigned>(Offset
) + Info
.Size
< StorageSize
)
2082 Val
= Builder
.CreateAnd(
2083 Val
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
), "bf.clear");
2085 Val
= Builder
.CreateIntCast(Val
, ResLTy
, Info
.IsSigned
, "bf.cast");
2086 EmitScalarRangeCheck(Val
, LV
.getType(), Loc
);
2087 return RValue::get(Val
);
2090 // If this is a reference to a subset of the elements of a vector, create an
2091 // appropriate shufflevector.
2092 RValue
CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV
) {
2093 llvm::Value
*Vec
= Builder
.CreateLoad(LV
.getExtVectorAddress(),
2094 LV
.isVolatileQualified());
2096 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2098 // If the result of the expression is a non-vector type, we must be extracting
2099 // a single element. Just codegen as an extractelement.
2100 const VectorType
*ExprVT
= LV
.getType()->getAs
<VectorType
>();
2102 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2103 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2104 return RValue::get(Builder
.CreateExtractElement(Vec
, Elt
));
2107 // Always use shuffle vector to try to retain the original program structure
2108 unsigned NumResultElts
= ExprVT
->getNumElements();
2110 SmallVector
<int, 4> Mask
;
2111 for (unsigned i
= 0; i
!= NumResultElts
; ++i
)
2112 Mask
.push_back(getAccessedFieldNo(i
, Elts
));
2114 Vec
= Builder
.CreateShuffleVector(Vec
, Mask
);
2115 return RValue::get(Vec
);
2118 /// Generates lvalue for partial ext_vector access.
2119 Address
CodeGenFunction::EmitExtVectorElementLValue(LValue LV
) {
2120 Address VectorAddress
= LV
.getExtVectorAddress();
2121 QualType EQT
= LV
.getType()->castAs
<VectorType
>()->getElementType();
2122 llvm::Type
*VectorElementTy
= CGM
.getTypes().ConvertType(EQT
);
2124 Address CastToPointerElement
= VectorAddress
.withElementType(VectorElementTy
);
2126 const llvm::Constant
*Elts
= LV
.getExtVectorElts();
2127 unsigned ix
= getAccessedFieldNo(0, Elts
);
2129 Address VectorBasePtrPlusIx
=
2130 Builder
.CreateConstInBoundsGEP(CastToPointerElement
, ix
,
2133 return VectorBasePtrPlusIx
;
2136 /// Load of global gamed gegisters are always calls to intrinsics.
2137 RValue
CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV
) {
2138 assert((LV
.getType()->isIntegerType() || LV
.getType()->isPointerType()) &&
2139 "Bad type for register variable");
2140 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2141 cast
<llvm::MetadataAsValue
>(LV
.getGlobalReg())->getMetadata());
2143 // We accept integer and pointer types only
2144 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(LV
.getType());
2145 llvm::Type
*Ty
= OrigTy
;
2146 if (OrigTy
->isPointerTy())
2147 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2148 llvm::Type
*Types
[] = { Ty
};
2150 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::read_register
, Types
);
2151 llvm::Value
*Call
= Builder
.CreateCall(
2152 F
, llvm::MetadataAsValue::get(Ty
->getContext(), RegName
));
2153 if (OrigTy
->isPointerTy())
2154 Call
= Builder
.CreateIntToPtr(Call
, OrigTy
);
2155 return RValue::get(Call
);
2158 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2159 /// lvalue, where both are guaranteed to the have the same type, and that type
2161 void CodeGenFunction::EmitStoreThroughLValue(RValue Src
, LValue Dst
,
2163 if (!Dst
.isSimple()) {
2164 if (Dst
.isVectorElt()) {
2165 // Read/modify/write the vector, inserting the new element.
2166 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getVectorAddress(),
2167 Dst
.isVolatileQualified());
2168 auto *IRStoreTy
= dyn_cast
<llvm::IntegerType
>(Vec
->getType());
2170 auto *IRVecTy
= llvm::FixedVectorType::get(
2171 Builder
.getInt1Ty(), IRStoreTy
->getPrimitiveSizeInBits());
2172 Vec
= Builder
.CreateBitCast(Vec
, IRVecTy
);
2175 Vec
= Builder
.CreateInsertElement(Vec
, Src
.getScalarVal(),
2176 Dst
.getVectorIdx(), "vecins");
2178 // <N x i1> --> <iN>.
2179 Vec
= Builder
.CreateBitCast(Vec
, IRStoreTy
);
2181 Builder
.CreateStore(Vec
, Dst
.getVectorAddress(),
2182 Dst
.isVolatileQualified());
2186 // If this is an update of extended vector elements, insert them as
2188 if (Dst
.isExtVectorElt())
2189 return EmitStoreThroughExtVectorComponentLValue(Src
, Dst
);
2191 if (Dst
.isGlobalReg())
2192 return EmitStoreThroughGlobalRegLValue(Src
, Dst
);
2194 if (Dst
.isMatrixElt()) {
2195 llvm::Value
*Idx
= Dst
.getMatrixIdx();
2196 if (CGM
.getCodeGenOpts().OptimizationLevel
> 0) {
2197 const auto *const MatTy
= Dst
.getType()->castAs
<ConstantMatrixType
>();
2198 llvm::MatrixBuilder
MB(Builder
);
2199 MB
.CreateIndexAssumption(Idx
, MatTy
->getNumElementsFlattened());
2201 llvm::Instruction
*Load
= Builder
.CreateLoad(Dst
.getMatrixAddress());
2203 Builder
.CreateInsertElement(Load
, Src
.getScalarVal(), Idx
, "matins");
2204 Builder
.CreateStore(Vec
, Dst
.getMatrixAddress(),
2205 Dst
.isVolatileQualified());
2209 assert(Dst
.isBitField() && "Unknown LValue type");
2210 return EmitStoreThroughBitfieldLValue(Src
, Dst
);
2213 // There's special magic for assigning into an ARC-qualified l-value.
2214 if (Qualifiers::ObjCLifetime Lifetime
= Dst
.getQuals().getObjCLifetime()) {
2216 case Qualifiers::OCL_None
:
2217 llvm_unreachable("present but none");
2219 case Qualifiers::OCL_ExplicitNone
:
2223 case Qualifiers::OCL_Strong
:
2225 Src
= RValue::get(EmitARCRetain(Dst
.getType(), Src
.getScalarVal()));
2228 EmitARCStoreStrong(Dst
, Src
.getScalarVal(), /*ignore*/ true);
2231 case Qualifiers::OCL_Weak
:
2233 // Initialize and then skip the primitive store.
2234 EmitARCInitWeak(Dst
.getAddress(*this), Src
.getScalarVal());
2236 EmitARCStoreWeak(Dst
.getAddress(*this), Src
.getScalarVal(),
2240 case Qualifiers::OCL_Autoreleasing
:
2241 Src
= RValue::get(EmitObjCExtendObjectLifetime(Dst
.getType(),
2242 Src
.getScalarVal()));
2243 // fall into the normal path
2248 if (Dst
.isObjCWeak() && !Dst
.isNonGC()) {
2249 // load of a __weak object.
2250 Address LvalueDst
= Dst
.getAddress(*this);
2251 llvm::Value
*src
= Src
.getScalarVal();
2252 CGM
.getObjCRuntime().EmitObjCWeakAssign(*this, src
, LvalueDst
);
2256 if (Dst
.isObjCStrong() && !Dst
.isNonGC()) {
2257 // load of a __strong object.
2258 Address LvalueDst
= Dst
.getAddress(*this);
2259 llvm::Value
*src
= Src
.getScalarVal();
2260 if (Dst
.isObjCIvar()) {
2261 assert(Dst
.getBaseIvarExp() && "BaseIvarExp is NULL");
2262 llvm::Type
*ResultType
= IntPtrTy
;
2263 Address dst
= EmitPointerWithAlignment(Dst
.getBaseIvarExp());
2264 llvm::Value
*RHS
= dst
.getPointer();
2265 RHS
= Builder
.CreatePtrToInt(RHS
, ResultType
, "sub.ptr.rhs.cast");
2267 Builder
.CreatePtrToInt(LvalueDst
.getPointer(), ResultType
,
2268 "sub.ptr.lhs.cast");
2269 llvm::Value
*BytesBetween
= Builder
.CreateSub(LHS
, RHS
, "ivar.offset");
2270 CGM
.getObjCRuntime().EmitObjCIvarAssign(*this, src
, dst
,
2272 } else if (Dst
.isGlobalObjCRef()) {
2273 CGM
.getObjCRuntime().EmitObjCGlobalAssign(*this, src
, LvalueDst
,
2274 Dst
.isThreadLocalRef());
2277 CGM
.getObjCRuntime().EmitObjCStrongCastAssign(*this, src
, LvalueDst
);
2281 assert(Src
.isScalar() && "Can't emit an agg store with this method");
2282 EmitStoreOfScalar(Src
.getScalarVal(), Dst
, isInit
);
2285 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src
, LValue Dst
,
2286 llvm::Value
**Result
) {
2287 const CGBitFieldInfo
&Info
= Dst
.getBitFieldInfo();
2288 llvm::Type
*ResLTy
= ConvertTypeForMem(Dst
.getType());
2289 Address Ptr
= Dst
.getBitFieldAddress();
2291 // Get the source value, truncated to the width of the bit-field.
2292 llvm::Value
*SrcVal
= Src
.getScalarVal();
2294 // Cast the source to the storage type and shift it into place.
2295 SrcVal
= Builder
.CreateIntCast(SrcVal
, Ptr
.getElementType(),
2296 /*isSigned=*/false);
2297 llvm::Value
*MaskedVal
= SrcVal
;
2299 const bool UseVolatile
=
2300 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&& Dst
.isVolatileQualified() &&
2301 Info
.VolatileStorageSize
!= 0 && isAAPCS(CGM
.getTarget());
2302 const unsigned StorageSize
=
2303 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
2304 const unsigned Offset
= UseVolatile
? Info
.VolatileOffset
: Info
.Offset
;
2305 // See if there are other bits in the bitfield's storage we'll need to load
2306 // and mask together with source before storing.
2307 if (StorageSize
!= Info
.Size
) {
2308 assert(StorageSize
> Info
.Size
&& "Invalid bitfield size.");
2310 Builder
.CreateLoad(Ptr
, Dst
.isVolatileQualified(), "bf.load");
2312 // Mask the source value as needed.
2313 if (!hasBooleanRepresentation(Dst
.getType()))
2314 SrcVal
= Builder
.CreateAnd(
2315 SrcVal
, llvm::APInt::getLowBitsSet(StorageSize
, Info
.Size
),
2319 SrcVal
= Builder
.CreateShl(SrcVal
, Offset
, "bf.shl");
2321 // Mask out the original value.
2322 Val
= Builder
.CreateAnd(
2323 Val
, ~llvm::APInt::getBitsSet(StorageSize
, Offset
, Offset
+ Info
.Size
),
2326 // Or together the unchanged values and the source value.
2327 SrcVal
= Builder
.CreateOr(Val
, SrcVal
, "bf.set");
2329 assert(Offset
== 0);
2330 // According to the AACPS:
2331 // When a volatile bit-field is written, and its container does not overlap
2332 // with any non-bit-field member, its container must be read exactly once
2333 // and written exactly once using the access width appropriate to the type
2334 // of the container. The two accesses are not atomic.
2335 if (Dst
.isVolatileQualified() && isAAPCS(CGM
.getTarget()) &&
2336 CGM
.getCodeGenOpts().ForceAAPCSBitfieldLoad
)
2337 Builder
.CreateLoad(Ptr
, true, "bf.load");
2340 // Write the new value back out.
2341 Builder
.CreateStore(SrcVal
, Ptr
, Dst
.isVolatileQualified());
2343 // Return the new value of the bit-field, if requested.
2345 llvm::Value
*ResultVal
= MaskedVal
;
2347 // Sign extend the value if needed.
2348 if (Info
.IsSigned
) {
2349 assert(Info
.Size
<= StorageSize
);
2350 unsigned HighBits
= StorageSize
- Info
.Size
;
2352 ResultVal
= Builder
.CreateShl(ResultVal
, HighBits
, "bf.result.shl");
2353 ResultVal
= Builder
.CreateAShr(ResultVal
, HighBits
, "bf.result.ashr");
2357 ResultVal
= Builder
.CreateIntCast(ResultVal
, ResLTy
, Info
.IsSigned
,
2359 *Result
= EmitFromMemory(ResultVal
, Dst
.getType());
2363 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src
,
2365 // This access turns into a read/modify/write of the vector. Load the input
2367 llvm::Value
*Vec
= Builder
.CreateLoad(Dst
.getExtVectorAddress(),
2368 Dst
.isVolatileQualified());
2369 const llvm::Constant
*Elts
= Dst
.getExtVectorElts();
2371 llvm::Value
*SrcVal
= Src
.getScalarVal();
2373 if (const VectorType
*VTy
= Dst
.getType()->getAs
<VectorType
>()) {
2374 unsigned NumSrcElts
= VTy
->getNumElements();
2375 unsigned NumDstElts
=
2376 cast
<llvm::FixedVectorType
>(Vec
->getType())->getNumElements();
2377 if (NumDstElts
== NumSrcElts
) {
2378 // Use shuffle vector is the src and destination are the same number of
2379 // elements and restore the vector mask since it is on the side it will be
2381 SmallVector
<int, 4> Mask(NumDstElts
);
2382 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2383 Mask
[getAccessedFieldNo(i
, Elts
)] = i
;
2385 Vec
= Builder
.CreateShuffleVector(SrcVal
, Mask
);
2386 } else if (NumDstElts
> NumSrcElts
) {
2387 // Extended the source vector to the same length and then shuffle it
2388 // into the destination.
2389 // FIXME: since we're shuffling with undef, can we just use the indices
2390 // into that? This could be simpler.
2391 SmallVector
<int, 4> ExtMask
;
2392 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2393 ExtMask
.push_back(i
);
2394 ExtMask
.resize(NumDstElts
, -1);
2395 llvm::Value
*ExtSrcVal
= Builder
.CreateShuffleVector(SrcVal
, ExtMask
);
2397 SmallVector
<int, 4> Mask
;
2398 for (unsigned i
= 0; i
!= NumDstElts
; ++i
)
2401 // When the vector size is odd and .odd or .hi is used, the last element
2402 // of the Elts constant array will be one past the size of the vector.
2403 // Ignore the last element here, if it is greater than the mask size.
2404 if (getAccessedFieldNo(NumSrcElts
- 1, Elts
) == Mask
.size())
2407 // modify when what gets shuffled in
2408 for (unsigned i
= 0; i
!= NumSrcElts
; ++i
)
2409 Mask
[getAccessedFieldNo(i
, Elts
)] = i
+ NumDstElts
;
2410 Vec
= Builder
.CreateShuffleVector(Vec
, ExtSrcVal
, Mask
);
2412 // We should never shorten the vector
2413 llvm_unreachable("unexpected shorten vector length");
2416 // If the Src is a scalar (not a vector) it must be updating one element.
2417 unsigned InIdx
= getAccessedFieldNo(0, Elts
);
2418 llvm::Value
*Elt
= llvm::ConstantInt::get(SizeTy
, InIdx
);
2419 Vec
= Builder
.CreateInsertElement(Vec
, SrcVal
, Elt
);
2422 Builder
.CreateStore(Vec
, Dst
.getExtVectorAddress(),
2423 Dst
.isVolatileQualified());
2426 /// Store of global named registers are always calls to intrinsics.
2427 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src
, LValue Dst
) {
2428 assert((Dst
.getType()->isIntegerType() || Dst
.getType()->isPointerType()) &&
2429 "Bad type for register variable");
2430 llvm::MDNode
*RegName
= cast
<llvm::MDNode
>(
2431 cast
<llvm::MetadataAsValue
>(Dst
.getGlobalReg())->getMetadata());
2432 assert(RegName
&& "Register LValue is not metadata");
2434 // We accept integer and pointer types only
2435 llvm::Type
*OrigTy
= CGM
.getTypes().ConvertType(Dst
.getType());
2436 llvm::Type
*Ty
= OrigTy
;
2437 if (OrigTy
->isPointerTy())
2438 Ty
= CGM
.getTypes().getDataLayout().getIntPtrType(OrigTy
);
2439 llvm::Type
*Types
[] = { Ty
};
2441 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
2442 llvm::Value
*Value
= Src
.getScalarVal();
2443 if (OrigTy
->isPointerTy())
2444 Value
= Builder
.CreatePtrToInt(Value
, Ty
);
2446 F
, {llvm::MetadataAsValue::get(Ty
->getContext(), RegName
), Value
});
2449 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2450 // generating write-barries API. It is currently a global, ivar,
2452 static void setObjCGCLValueClass(const ASTContext
&Ctx
, const Expr
*E
,
2454 bool IsMemberAccess
=false) {
2455 if (Ctx
.getLangOpts().getGC() == LangOptions::NonGC
)
2458 if (isa
<ObjCIvarRefExpr
>(E
)) {
2459 QualType ExpTy
= E
->getType();
2460 if (IsMemberAccess
&& ExpTy
->isPointerType()) {
2461 // If ivar is a structure pointer, assigning to field of
2462 // this struct follows gcc's behavior and makes it a non-ivar
2463 // writer-barrier conservatively.
2464 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2465 if (ExpTy
->isRecordType()) {
2466 LV
.setObjCIvar(false);
2470 LV
.setObjCIvar(true);
2471 auto *Exp
= cast
<ObjCIvarRefExpr
>(const_cast<Expr
*>(E
));
2472 LV
.setBaseIvarExp(Exp
->getBase());
2473 LV
.setObjCArray(E
->getType()->isArrayType());
2477 if (const auto *Exp
= dyn_cast
<DeclRefExpr
>(E
)) {
2478 if (const auto *VD
= dyn_cast
<VarDecl
>(Exp
->getDecl())) {
2479 if (VD
->hasGlobalStorage()) {
2480 LV
.setGlobalObjCRef(true);
2481 LV
.setThreadLocalRef(VD
->getTLSKind() != VarDecl::TLS_None
);
2484 LV
.setObjCArray(E
->getType()->isArrayType());
2488 if (const auto *Exp
= dyn_cast
<UnaryOperator
>(E
)) {
2489 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2493 if (const auto *Exp
= dyn_cast
<ParenExpr
>(E
)) {
2494 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2495 if (LV
.isObjCIvar()) {
2496 // If cast is to a structure pointer, follow gcc's behavior and make it
2497 // a non-ivar write-barrier.
2498 QualType ExpTy
= E
->getType();
2499 if (ExpTy
->isPointerType())
2500 ExpTy
= ExpTy
->castAs
<PointerType
>()->getPointeeType();
2501 if (ExpTy
->isRecordType())
2502 LV
.setObjCIvar(false);
2507 if (const auto *Exp
= dyn_cast
<GenericSelectionExpr
>(E
)) {
2508 setObjCGCLValueClass(Ctx
, Exp
->getResultExpr(), LV
);
2512 if (const auto *Exp
= dyn_cast
<ImplicitCastExpr
>(E
)) {
2513 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2517 if (const auto *Exp
= dyn_cast
<CStyleCastExpr
>(E
)) {
2518 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2522 if (const auto *Exp
= dyn_cast
<ObjCBridgedCastExpr
>(E
)) {
2523 setObjCGCLValueClass(Ctx
, Exp
->getSubExpr(), LV
, IsMemberAccess
);
2527 if (const auto *Exp
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
2528 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
);
2529 if (LV
.isObjCIvar() && !LV
.isObjCArray())
2530 // Using array syntax to assigning to what an ivar points to is not
2531 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2532 LV
.setObjCIvar(false);
2533 else if (LV
.isGlobalObjCRef() && !LV
.isObjCArray())
2534 // Using array syntax to assigning to what global points to is not
2535 // same as assigning to the global itself. {id *G;} G[i] = 0;
2536 LV
.setGlobalObjCRef(false);
2540 if (const auto *Exp
= dyn_cast
<MemberExpr
>(E
)) {
2541 setObjCGCLValueClass(Ctx
, Exp
->getBase(), LV
, true);
2542 // We don't know if member is an 'ivar', but this flag is looked at
2543 // only in the context of LV.isObjCIvar().
2544 LV
.setObjCArray(E
->getType()->isArrayType());
2549 static LValue
EmitThreadPrivateVarDeclLValue(
2550 CodeGenFunction
&CGF
, const VarDecl
*VD
, QualType T
, Address Addr
,
2551 llvm::Type
*RealVarTy
, SourceLocation Loc
) {
2552 if (CGF
.CGM
.getLangOpts().OpenMPIRBuilder
)
2553 Addr
= CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2554 CGF
, VD
, Addr
, Loc
);
2557 CGF
.CGM
.getOpenMPRuntime().getAddrOfThreadPrivate(CGF
, VD
, Addr
, Loc
);
2559 Addr
= Addr
.withElementType(RealVarTy
);
2560 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2563 static Address
emitDeclTargetVarDeclLValue(CodeGenFunction
&CGF
,
2564 const VarDecl
*VD
, QualType T
) {
2565 std::optional
<OMPDeclareTargetDeclAttr::MapTypeTy
> Res
=
2566 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD
);
2567 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2568 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2569 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2570 if (!Res
|| ((*Res
== OMPDeclareTargetDeclAttr::MT_To
||
2571 *Res
== OMPDeclareTargetDeclAttr::MT_Enter
) &&
2572 !CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2573 return Address::invalid();
2574 assert(((*Res
== OMPDeclareTargetDeclAttr::MT_Link
) ||
2575 ((*Res
== OMPDeclareTargetDeclAttr::MT_To
||
2576 *Res
== OMPDeclareTargetDeclAttr::MT_Enter
) &&
2577 CGF
.CGM
.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2578 "Expected link clause OR to clause with unified memory enabled.");
2579 QualType PtrTy
= CGF
.getContext().getPointerType(VD
->getType());
2580 Address Addr
= CGF
.CGM
.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD
);
2581 return CGF
.EmitLoadOfPointer(Addr
, PtrTy
->castAs
<PointerType
>());
2585 CodeGenFunction::EmitLoadOfReference(LValue RefLVal
,
2586 LValueBaseInfo
*PointeeBaseInfo
,
2587 TBAAAccessInfo
*PointeeTBAAInfo
) {
2588 llvm::LoadInst
*Load
=
2589 Builder
.CreateLoad(RefLVal
.getAddress(*this), RefLVal
.isVolatile());
2590 CGM
.DecorateInstructionWithTBAA(Load
, RefLVal
.getTBAAInfo());
2592 QualType PointeeType
= RefLVal
.getType()->getPointeeType();
2593 CharUnits Align
= CGM
.getNaturalTypeAlignment(
2594 PointeeType
, PointeeBaseInfo
, PointeeTBAAInfo
,
2595 /* forPointeeType= */ true);
2596 return Address(Load
, ConvertTypeForMem(PointeeType
), Align
);
2599 LValue
CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal
) {
2600 LValueBaseInfo PointeeBaseInfo
;
2601 TBAAAccessInfo PointeeTBAAInfo
;
2602 Address PointeeAddr
= EmitLoadOfReference(RefLVal
, &PointeeBaseInfo
,
2604 return MakeAddrLValue(PointeeAddr
, RefLVal
.getType()->getPointeeType(),
2605 PointeeBaseInfo
, PointeeTBAAInfo
);
2608 Address
CodeGenFunction::EmitLoadOfPointer(Address Ptr
,
2609 const PointerType
*PtrTy
,
2610 LValueBaseInfo
*BaseInfo
,
2611 TBAAAccessInfo
*TBAAInfo
) {
2612 llvm::Value
*Addr
= Builder
.CreateLoad(Ptr
);
2613 return Address(Addr
, ConvertTypeForMem(PtrTy
->getPointeeType()),
2614 CGM
.getNaturalTypeAlignment(PtrTy
->getPointeeType(), BaseInfo
,
2616 /*forPointeeType=*/true));
2619 LValue
CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr
,
2620 const PointerType
*PtrTy
) {
2621 LValueBaseInfo BaseInfo
;
2622 TBAAAccessInfo TBAAInfo
;
2623 Address Addr
= EmitLoadOfPointer(PtrAddr
, PtrTy
, &BaseInfo
, &TBAAInfo
);
2624 return MakeAddrLValue(Addr
, PtrTy
->getPointeeType(), BaseInfo
, TBAAInfo
);
2627 static LValue
EmitGlobalVarDeclLValue(CodeGenFunction
&CGF
,
2628 const Expr
*E
, const VarDecl
*VD
) {
2629 QualType T
= E
->getType();
2631 // If it's thread_local, emit a call to its wrapper function instead.
2632 if (VD
->getTLSKind() == VarDecl::TLS_Dynamic
&&
2633 CGF
.CGM
.getCXXABI().usesThreadWrapperFunction(VD
))
2634 return CGF
.CGM
.getCXXABI().EmitThreadLocalVarDeclLValue(CGF
, VD
, T
);
2635 // Check if the variable is marked as declare target with link clause in
2637 if (CGF
.getLangOpts().OpenMPIsTargetDevice
) {
2638 Address Addr
= emitDeclTargetVarDeclLValue(CGF
, VD
, T
);
2640 return CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2643 llvm::Value
*V
= CGF
.CGM
.GetAddrOfGlobalVar(VD
);
2645 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2646 V
= CGF
.Builder
.CreateThreadLocalAddress(V
);
2648 llvm::Type
*RealVarTy
= CGF
.getTypes().ConvertTypeForMem(VD
->getType());
2649 CharUnits Alignment
= CGF
.getContext().getDeclAlign(VD
);
2650 Address
Addr(V
, RealVarTy
, Alignment
);
2651 // Emit reference to the private copy of the variable if it is an OpenMP
2652 // threadprivate variable.
2653 if (CGF
.getLangOpts().OpenMP
&& !CGF
.getLangOpts().OpenMPSimd
&&
2654 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2655 return EmitThreadPrivateVarDeclLValue(CGF
, VD
, T
, Addr
, RealVarTy
,
2658 LValue LV
= VD
->getType()->isReferenceType() ?
2659 CGF
.EmitLoadOfReferenceLValue(Addr
, VD
->getType(),
2660 AlignmentSource::Decl
) :
2661 CGF
.MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2662 setObjCGCLValueClass(CGF
.getContext(), E
, LV
);
2666 static llvm::Constant
*EmitFunctionDeclPointer(CodeGenModule
&CGM
,
2668 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2669 if (FD
->hasAttr
<WeakRefAttr
>()) {
2670 ConstantAddress aliasee
= CGM
.GetWeakRefReference(FD
);
2671 return aliasee
.getPointer();
2674 llvm::Constant
*V
= CGM
.GetAddrOfFunction(GD
);
2678 static LValue
EmitFunctionDeclLValue(CodeGenFunction
&CGF
, const Expr
*E
,
2680 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
2681 llvm::Value
*V
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
2682 CharUnits Alignment
= CGF
.getContext().getDeclAlign(FD
);
2683 return CGF
.MakeAddrLValue(V
, E
->getType(), Alignment
,
2684 AlignmentSource::Decl
);
2687 static LValue
EmitCapturedFieldLValue(CodeGenFunction
&CGF
, const FieldDecl
*FD
,
2688 llvm::Value
*ThisValue
) {
2690 return CGF
.EmitLValueForLambdaField(FD
, ThisValue
);
2693 /// Named Registers are named metadata pointing to the register name
2694 /// which will be read from/written to as an argument to the intrinsic
2695 /// @llvm.read/write_register.
2696 /// So far, only the name is being passed down, but other options such as
2697 /// register type, allocation type or even optimization options could be
2698 /// passed down via the metadata node.
2699 static LValue
EmitGlobalNamedRegister(const VarDecl
*VD
, CodeGenModule
&CGM
) {
2700 SmallString
<64> Name("llvm.named.register.");
2701 AsmLabelAttr
*Asm
= VD
->getAttr
<AsmLabelAttr
>();
2702 assert(Asm
->getLabel().size() < 64-Name
.size() &&
2703 "Register name too big");
2704 Name
.append(Asm
->getLabel());
2705 llvm::NamedMDNode
*M
=
2706 CGM
.getModule().getOrInsertNamedMetadata(Name
);
2707 if (M
->getNumOperands() == 0) {
2708 llvm::MDString
*Str
= llvm::MDString::get(CGM
.getLLVMContext(),
2710 llvm::Metadata
*Ops
[] = {Str
};
2711 M
->addOperand(llvm::MDNode::get(CGM
.getLLVMContext(), Ops
));
2714 CharUnits Alignment
= CGM
.getContext().getDeclAlign(VD
);
2717 llvm::MetadataAsValue::get(CGM
.getLLVMContext(), M
->getOperand(0));
2718 return LValue::MakeGlobalReg(Ptr
, Alignment
, VD
->getType());
2721 /// Determine whether we can emit a reference to \p VD from the current
2722 /// context, despite not necessarily having seen an odr-use of the variable in
2724 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction
&CGF
,
2725 const DeclRefExpr
*E
,
2726 const VarDecl
*VD
) {
2727 // For a variable declared in an enclosing scope, do not emit a spurious
2728 // reference even if we have a capture, as that will emit an unwarranted
2729 // reference to our capture state, and will likely generate worse code than
2730 // emitting a local copy.
2731 if (E
->refersToEnclosingVariableOrCapture())
2734 // For a local declaration declared in this function, we can always reference
2735 // it even if we don't have an odr-use.
2736 if (VD
->hasLocalStorage()) {
2737 return VD
->getDeclContext() ==
2738 dyn_cast_or_null
<DeclContext
>(CGF
.CurCodeDecl
);
2741 // For a global declaration, we can emit a reference to it if we know
2742 // for sure that we are able to emit a definition of it.
2743 VD
= VD
->getDefinition(CGF
.getContext());
2747 // Don't emit a spurious reference if it might be to a variable that only
2748 // exists on a different device / target.
2749 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2750 // cross-target reference.
2751 if (CGF
.getLangOpts().OpenMP
|| CGF
.getLangOpts().CUDA
||
2752 CGF
.getLangOpts().OpenCL
) {
2756 // We can emit a spurious reference only if the linkage implies that we'll
2757 // be emitting a non-interposable symbol that will be retained until link
2759 switch (CGF
.CGM
.getLLVMLinkageVarDefinition(VD
)) {
2760 case llvm::GlobalValue::ExternalLinkage
:
2761 case llvm::GlobalValue::LinkOnceODRLinkage
:
2762 case llvm::GlobalValue::WeakODRLinkage
:
2763 case llvm::GlobalValue::InternalLinkage
:
2764 case llvm::GlobalValue::PrivateLinkage
:
2771 LValue
CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr
*E
) {
2772 const NamedDecl
*ND
= E
->getDecl();
2773 QualType T
= E
->getType();
2775 assert(E
->isNonOdrUse() != NOUR_Unevaluated
&&
2776 "should not emit an unevaluated operand");
2778 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2779 // Global Named registers access via intrinsics only
2780 if (VD
->getStorageClass() == SC_Register
&&
2781 VD
->hasAttr
<AsmLabelAttr
>() && !VD
->isLocalVarDecl())
2782 return EmitGlobalNamedRegister(VD
, CGM
);
2784 // If this DeclRefExpr does not constitute an odr-use of the variable,
2785 // we're not permitted to emit a reference to it in general, and it might
2786 // not be captured if capture would be necessary for a use. Emit the
2787 // constant value directly instead.
2788 if (E
->isNonOdrUse() == NOUR_Constant
&&
2789 (VD
->getType()->isReferenceType() ||
2790 !canEmitSpuriousReferenceToVariable(*this, E
, VD
))) {
2791 VD
->getAnyInitializer(VD
);
2792 llvm::Constant
*Val
= ConstantEmitter(*this).emitAbstract(
2793 E
->getLocation(), *VD
->evaluateValue(), VD
->getType());
2794 assert(Val
&& "failed to emit constant expression");
2796 Address Addr
= Address::invalid();
2797 if (!VD
->getType()->isReferenceType()) {
2798 // Spill the constant value to a global.
2799 Addr
= CGM
.createUnnamedGlobalFrom(*VD
, Val
,
2800 getContext().getDeclAlign(VD
));
2801 llvm::Type
*VarTy
= getTypes().ConvertTypeForMem(VD
->getType());
2802 auto *PTy
= llvm::PointerType::get(
2803 VarTy
, getTypes().getTargetAddressSpace(VD
->getType()));
2804 Addr
= Builder
.CreatePointerBitCastOrAddrSpaceCast(Addr
, PTy
, VarTy
);
2806 // Should we be using the alignment of the constant pointer we emitted?
2807 CharUnits Alignment
=
2808 CGM
.getNaturalTypeAlignment(E
->getType(),
2809 /* BaseInfo= */ nullptr,
2810 /* TBAAInfo= */ nullptr,
2811 /* forPointeeType= */ true);
2812 Addr
= Address(Val
, ConvertTypeForMem(E
->getType()), Alignment
);
2814 return MakeAddrLValue(Addr
, T
, AlignmentSource::Decl
);
2817 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2819 // Check for captured variables.
2820 if (E
->refersToEnclosingVariableOrCapture()) {
2821 VD
= VD
->getCanonicalDecl();
2822 if (auto *FD
= LambdaCaptureFields
.lookup(VD
))
2823 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2824 if (CapturedStmtInfo
) {
2825 auto I
= LocalDeclMap
.find(VD
);
2826 if (I
!= LocalDeclMap
.end()) {
2828 if (VD
->getType()->isReferenceType())
2829 CapLVal
= EmitLoadOfReferenceLValue(I
->second
, VD
->getType(),
2830 AlignmentSource::Decl
);
2832 CapLVal
= MakeAddrLValue(I
->second
, T
);
2833 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2835 if (getLangOpts().OpenMP
&&
2836 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2837 CapLVal
.setNontemporal(/*Value=*/true);
2841 EmitCapturedFieldLValue(*this, CapturedStmtInfo
->lookup(VD
),
2842 CapturedStmtInfo
->getContextValue());
2843 Address LValueAddress
= CapLVal
.getAddress(*this);
2844 CapLVal
= MakeAddrLValue(
2845 Address(LValueAddress
.getPointer(), LValueAddress
.getElementType(),
2846 getContext().getDeclAlign(VD
)),
2847 CapLVal
.getType(), LValueBaseInfo(AlignmentSource::Decl
),
2848 CapLVal
.getTBAAInfo());
2849 // Mark lvalue as nontemporal if the variable is marked as nontemporal
2851 if (getLangOpts().OpenMP
&&
2852 CGM
.getOpenMPRuntime().isNontemporalDecl(VD
))
2853 CapLVal
.setNontemporal(/*Value=*/true);
2857 assert(isa
<BlockDecl
>(CurCodeDecl
));
2858 Address addr
= GetAddrOfBlockDecl(VD
);
2859 return MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2863 // FIXME: We should be able to assert this for FunctionDecls as well!
2864 // FIXME: We should be able to assert this for all DeclRefExprs, not just
2865 // those with a valid source location.
2866 assert((ND
->isUsed(false) || !isa
<VarDecl
>(ND
) || E
->isNonOdrUse() ||
2867 !E
->getLocation().isValid()) &&
2868 "Should not use decl without marking it used!");
2870 if (ND
->hasAttr
<WeakRefAttr
>()) {
2871 const auto *VD
= cast
<ValueDecl
>(ND
);
2872 ConstantAddress Aliasee
= CGM
.GetWeakRefReference(VD
);
2873 return MakeAddrLValue(Aliasee
, T
, AlignmentSource::Decl
);
2876 if (const auto *VD
= dyn_cast
<VarDecl
>(ND
)) {
2877 // Check if this is a global variable.
2878 if (VD
->hasLinkage() || VD
->isStaticDataMember())
2879 return EmitGlobalVarDeclLValue(*this, E
, VD
);
2881 Address addr
= Address::invalid();
2883 // The variable should generally be present in the local decl map.
2884 auto iter
= LocalDeclMap
.find(VD
);
2885 if (iter
!= LocalDeclMap
.end()) {
2886 addr
= iter
->second
;
2888 // Otherwise, it might be static local we haven't emitted yet for
2889 // some reason; most likely, because it's in an outer function.
2890 } else if (VD
->isStaticLocal()) {
2891 llvm::Constant
*var
= CGM
.getOrCreateStaticVarDecl(
2892 *VD
, CGM
.getLLVMLinkageVarDefinition(VD
));
2894 var
, ConvertTypeForMem(VD
->getType()), getContext().getDeclAlign(VD
));
2896 // No other cases for now.
2898 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2901 // Handle threadlocal function locals.
2902 if (VD
->getTLSKind() != VarDecl::TLS_None
)
2903 addr
= addr
.withPointer(
2904 Builder
.CreateThreadLocalAddress(addr
.getPointer()), NotKnownNonNull
);
2906 // Check for OpenMP threadprivate variables.
2907 if (getLangOpts().OpenMP
&& !getLangOpts().OpenMPSimd
&&
2908 VD
->hasAttr
<OMPThreadPrivateDeclAttr
>()) {
2909 return EmitThreadPrivateVarDeclLValue(
2910 *this, VD
, T
, addr
, getTypes().ConvertTypeForMem(VD
->getType()),
2914 // Drill into block byref variables.
2915 bool isBlockByref
= VD
->isEscapingByref();
2917 addr
= emitBlockByrefAddress(addr
, VD
);
2920 // Drill into reference types.
2921 LValue LV
= VD
->getType()->isReferenceType() ?
2922 EmitLoadOfReferenceLValue(addr
, VD
->getType(), AlignmentSource::Decl
) :
2923 MakeAddrLValue(addr
, T
, AlignmentSource::Decl
);
2925 bool isLocalStorage
= VD
->hasLocalStorage();
2927 bool NonGCable
= isLocalStorage
&&
2928 !VD
->getType()->isReferenceType() &&
2931 LV
.getQuals().removeObjCGCAttr();
2935 bool isImpreciseLifetime
=
2936 (isLocalStorage
&& !VD
->hasAttr
<ObjCPreciseLifetimeAttr
>());
2937 if (isImpreciseLifetime
)
2938 LV
.setARCPreciseLifetime(ARCImpreciseLifetime
);
2939 setObjCGCLValueClass(getContext(), E
, LV
);
2943 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
)) {
2944 LValue LV
= EmitFunctionDeclLValue(*this, E
, FD
);
2946 // Emit debuginfo for the function declaration if the target wants to.
2947 if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
2948 if (CGDebugInfo
*DI
= CGM
.getModuleDebugInfo()) {
2950 cast
<llvm::Function
>(LV
.getPointer(*this)->stripPointerCasts());
2951 if (!Fn
->getSubprogram())
2952 DI
->EmitFunctionDecl(FD
, FD
->getLocation(), T
, Fn
);
2959 // FIXME: While we're emitting a binding from an enclosing scope, all other
2960 // DeclRefExprs we see should be implicitly treated as if they also refer to
2961 // an enclosing scope.
2962 if (const auto *BD
= dyn_cast
<BindingDecl
>(ND
)) {
2963 if (E
->refersToEnclosingVariableOrCapture()) {
2964 auto *FD
= LambdaCaptureFields
.lookup(BD
);
2965 return EmitCapturedFieldLValue(*this, FD
, CXXABIThisValue
);
2967 return EmitLValue(BD
->getBinding());
2970 // We can form DeclRefExprs naming GUID declarations when reconstituting
2971 // non-type template parameters into expressions.
2972 if (const auto *GD
= dyn_cast
<MSGuidDecl
>(ND
))
2973 return MakeAddrLValue(CGM
.GetAddrOfMSGuidDecl(GD
), T
,
2974 AlignmentSource::Decl
);
2976 if (const auto *TPO
= dyn_cast
<TemplateParamObjectDecl
>(ND
))
2977 return MakeAddrLValue(CGM
.GetAddrOfTemplateParamObject(TPO
), T
,
2978 AlignmentSource::Decl
);
2980 llvm_unreachable("Unhandled DeclRefExpr");
2983 LValue
CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator
*E
) {
2984 // __extension__ doesn't affect lvalue-ness.
2985 if (E
->getOpcode() == UO_Extension
)
2986 return EmitLValue(E
->getSubExpr());
2988 QualType ExprTy
= getContext().getCanonicalType(E
->getSubExpr()->getType());
2989 switch (E
->getOpcode()) {
2990 default: llvm_unreachable("Unknown unary operator lvalue!");
2992 QualType T
= E
->getSubExpr()->getType()->getPointeeType();
2993 assert(!T
.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2995 LValueBaseInfo BaseInfo
;
2996 TBAAAccessInfo TBAAInfo
;
2997 Address Addr
= EmitPointerWithAlignment(E
->getSubExpr(), &BaseInfo
,
2999 LValue LV
= MakeAddrLValue(Addr
, T
, BaseInfo
, TBAAInfo
);
3000 LV
.getQuals().setAddressSpace(ExprTy
.getAddressSpace());
3002 // We should not generate __weak write barrier on indirect reference
3003 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3004 // But, we continue to generate __strong write barrier on indirect write
3005 // into a pointer to object.
3006 if (getLangOpts().ObjC
&&
3007 getLangOpts().getGC() != LangOptions::NonGC
&&
3009 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
3014 LValue LV
= EmitLValue(E
->getSubExpr());
3015 assert(LV
.isSimple() && "real/imag on non-ordinary l-value");
3017 // __real is valid on scalars. This is a faster way of testing that.
3018 // __imag can only produce an rvalue on scalars.
3019 if (E
->getOpcode() == UO_Real
&&
3020 !LV
.getAddress(*this).getElementType()->isStructTy()) {
3021 assert(E
->getSubExpr()->getType()->isArithmeticType());
3025 QualType T
= ExprTy
->castAs
<ComplexType
>()->getElementType();
3028 (E
->getOpcode() == UO_Real
3029 ? emitAddrOfRealComponent(LV
.getAddress(*this), LV
.getType())
3030 : emitAddrOfImagComponent(LV
.getAddress(*this), LV
.getType()));
3031 LValue ElemLV
= MakeAddrLValue(Component
, T
, LV
.getBaseInfo(),
3032 CGM
.getTBAAInfoForSubobject(LV
, T
));
3033 ElemLV
.getQuals().addQualifiers(LV
.getQuals());
3038 LValue LV
= EmitLValue(E
->getSubExpr());
3039 bool isInc
= E
->getOpcode() == UO_PreInc
;
3041 if (E
->getType()->isAnyComplexType())
3042 EmitComplexPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3044 EmitScalarPrePostIncDec(E
, LV
, isInc
, true/*isPre*/);
3050 LValue
CodeGenFunction::EmitStringLiteralLValue(const StringLiteral
*E
) {
3051 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromLiteral(E
),
3052 E
->getType(), AlignmentSource::Decl
);
3055 LValue
CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr
*E
) {
3056 return MakeAddrLValue(CGM
.GetAddrOfConstantStringFromObjCEncode(E
),
3057 E
->getType(), AlignmentSource::Decl
);
3060 LValue
CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr
*E
) {
3061 auto SL
= E
->getFunctionName();
3062 assert(SL
!= nullptr && "No StringLiteral name in PredefinedExpr");
3063 StringRef FnName
= CurFn
->getName();
3064 if (FnName
.startswith("\01"))
3065 FnName
= FnName
.substr(1);
3066 StringRef NameItems
[] = {
3067 PredefinedExpr::getIdentKindName(E
->getIdentKind()), FnName
};
3068 std::string GVName
= llvm::join(NameItems
, NameItems
+ 2, ".");
3069 if (auto *BD
= dyn_cast_or_null
<BlockDecl
>(CurCodeDecl
)) {
3070 std::string Name
= std::string(SL
->getString());
3071 if (!Name
.empty()) {
3072 unsigned Discriminator
=
3073 CGM
.getCXXABI().getMangleContext().getBlockId(BD
, true);
3075 Name
+= "_" + Twine(Discriminator
+ 1).str();
3076 auto C
= CGM
.GetAddrOfConstantCString(Name
, GVName
.c_str());
3077 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3080 CGM
.GetAddrOfConstantCString(std::string(FnName
), GVName
.c_str());
3081 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3084 auto C
= CGM
.GetAddrOfConstantStringFromLiteral(SL
, GVName
);
3085 return MakeAddrLValue(C
, E
->getType(), AlignmentSource::Decl
);
3088 /// Emit a type description suitable for use by a runtime sanitizer library. The
3089 /// format of a type descriptor is
3092 /// { i16 TypeKind, i16 TypeInfo }
3095 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3096 /// integer, 1 for a floating point value, and -1 for anything else.
3097 llvm::Constant
*CodeGenFunction::EmitCheckTypeDescriptor(QualType T
) {
3098 // Only emit each type's descriptor once.
3099 if (llvm::Constant
*C
= CGM
.getTypeDescriptorFromMap(T
))
3102 uint16_t TypeKind
= -1;
3103 uint16_t TypeInfo
= 0;
3105 if (T
->isIntegerType()) {
3107 TypeInfo
= (llvm::Log2_32(getContext().getTypeSize(T
)) << 1) |
3108 (T
->isSignedIntegerType() ? 1 : 0);
3109 } else if (T
->isFloatingType()) {
3111 TypeInfo
= getContext().getTypeSize(T
);
3114 // Format the type name as if for a diagnostic, including quotes and
3115 // optionally an 'aka'.
3116 SmallString
<32> Buffer
;
3117 CGM
.getDiags().ConvertArgToString(
3118 DiagnosticsEngine::ak_qualtype
, (intptr_t)T
.getAsOpaquePtr(), StringRef(),
3119 StringRef(), std::nullopt
, Buffer
, std::nullopt
);
3121 llvm::Constant
*Components
[] = {
3122 Builder
.getInt16(TypeKind
), Builder
.getInt16(TypeInfo
),
3123 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer
)
3125 llvm::Constant
*Descriptor
= llvm::ConstantStruct::getAnon(Components
);
3127 auto *GV
= new llvm::GlobalVariable(
3128 CGM
.getModule(), Descriptor
->getType(),
3129 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage
, Descriptor
);
3130 GV
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3131 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(GV
);
3133 // Remember the descriptor for this type.
3134 CGM
.setTypeDescriptorInMap(T
, GV
);
3139 llvm::Value
*CodeGenFunction::EmitCheckValue(llvm::Value
*V
) {
3140 llvm::Type
*TargetTy
= IntPtrTy
;
3142 if (V
->getType() == TargetTy
)
3145 // Floating-point types which fit into intptr_t are bitcast to integers
3146 // and then passed directly (after zero-extension, if necessary).
3147 if (V
->getType()->isFloatingPointTy()) {
3148 unsigned Bits
= V
->getType()->getPrimitiveSizeInBits().getFixedValue();
3149 if (Bits
<= TargetTy
->getIntegerBitWidth())
3150 V
= Builder
.CreateBitCast(V
, llvm::Type::getIntNTy(getLLVMContext(),
3154 // Integers which fit in intptr_t are zero-extended and passed directly.
3155 if (V
->getType()->isIntegerTy() &&
3156 V
->getType()->getIntegerBitWidth() <= TargetTy
->getIntegerBitWidth())
3157 return Builder
.CreateZExt(V
, TargetTy
);
3159 // Pointers are passed directly, everything else is passed by address.
3160 if (!V
->getType()->isPointerTy()) {
3161 Address Ptr
= CreateDefaultAlignTempAlloca(V
->getType());
3162 Builder
.CreateStore(V
, Ptr
);
3163 V
= Ptr
.getPointer();
3165 return Builder
.CreatePtrToInt(V
, TargetTy
);
3168 /// Emit a representation of a SourceLocation for passing to a handler
3169 /// in a sanitizer runtime library. The format for this data is:
3171 /// struct SourceLocation {
3172 /// const char *Filename;
3173 /// int32_t Line, Column;
3176 /// For an invalid SourceLocation, the Filename pointer is null.
3177 llvm::Constant
*CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc
) {
3178 llvm::Constant
*Filename
;
3181 PresumedLoc PLoc
= getContext().getSourceManager().getPresumedLoc(Loc
);
3182 if (PLoc
.isValid()) {
3183 StringRef FilenameString
= PLoc
.getFilename();
3185 int PathComponentsToStrip
=
3186 CGM
.getCodeGenOpts().EmitCheckPathComponentsToStrip
;
3187 if (PathComponentsToStrip
< 0) {
3188 assert(PathComponentsToStrip
!= INT_MIN
);
3189 int PathComponentsToKeep
= -PathComponentsToStrip
;
3190 auto I
= llvm::sys::path::rbegin(FilenameString
);
3191 auto E
= llvm::sys::path::rend(FilenameString
);
3192 while (I
!= E
&& --PathComponentsToKeep
)
3195 FilenameString
= FilenameString
.substr(I
- E
);
3196 } else if (PathComponentsToStrip
> 0) {
3197 auto I
= llvm::sys::path::begin(FilenameString
);
3198 auto E
= llvm::sys::path::end(FilenameString
);
3199 while (I
!= E
&& PathComponentsToStrip
--)
3204 FilenameString
.substr(I
- llvm::sys::path::begin(FilenameString
));
3206 FilenameString
= llvm::sys::path::filename(FilenameString
);
3210 CGM
.GetAddrOfConstantCString(std::string(FilenameString
), ".src");
3211 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(
3212 cast
<llvm::GlobalVariable
>(
3213 FilenameGV
.getPointer()->stripPointerCasts()));
3214 Filename
= FilenameGV
.getPointer();
3215 Line
= PLoc
.getLine();
3216 Column
= PLoc
.getColumn();
3218 Filename
= llvm::Constant::getNullValue(Int8PtrTy
);
3222 llvm::Constant
*Data
[] = {Filename
, Builder
.getInt32(Line
),
3223 Builder
.getInt32(Column
)};
3225 return llvm::ConstantStruct::getAnon(Data
);
3229 /// Specify under what conditions this check can be recovered
3230 enum class CheckRecoverableKind
{
3231 /// Always terminate program execution if this check fails.
3233 /// Check supports recovering, runtime has both fatal (noreturn) and
3234 /// non-fatal handlers for this check.
3236 /// Runtime conditionally aborts, always need to support recovery.
3241 static CheckRecoverableKind
getRecoverableKind(SanitizerMask Kind
) {
3242 assert(Kind
.countPopulation() == 1);
3243 if (Kind
== SanitizerKind::Vptr
)
3244 return CheckRecoverableKind::AlwaysRecoverable
;
3245 else if (Kind
== SanitizerKind::Return
|| Kind
== SanitizerKind::Unreachable
)
3246 return CheckRecoverableKind::Unrecoverable
;
3248 return CheckRecoverableKind::Recoverable
;
3252 struct SanitizerHandlerInfo
{
3253 char const *const Name
;
3258 const SanitizerHandlerInfo SanitizerHandlers
[] = {
3259 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3260 LIST_SANITIZER_CHECKS
3261 #undef SANITIZER_CHECK
3264 static void emitCheckHandlerCall(CodeGenFunction
&CGF
,
3265 llvm::FunctionType
*FnType
,
3266 ArrayRef
<llvm::Value
*> FnArgs
,
3267 SanitizerHandler CheckHandler
,
3268 CheckRecoverableKind RecoverKind
, bool IsFatal
,
3269 llvm::BasicBlock
*ContBB
) {
3270 assert(IsFatal
|| RecoverKind
!= CheckRecoverableKind::Unrecoverable
);
3271 std::optional
<ApplyDebugLocation
> DL
;
3272 if (!CGF
.Builder
.getCurrentDebugLocation()) {
3273 // Ensure that the call has at least an artificial debug location.
3274 DL
.emplace(CGF
, SourceLocation());
3276 bool NeedsAbortSuffix
=
3277 IsFatal
&& RecoverKind
!= CheckRecoverableKind::Unrecoverable
;
3278 bool MinimalRuntime
= CGF
.CGM
.getCodeGenOpts().SanitizeMinimalRuntime
;
3279 const SanitizerHandlerInfo
&CheckInfo
= SanitizerHandlers
[CheckHandler
];
3280 const StringRef CheckName
= CheckInfo
.Name
;
3281 std::string FnName
= "__ubsan_handle_" + CheckName
.str();
3282 if (CheckInfo
.Version
&& !MinimalRuntime
)
3283 FnName
+= "_v" + llvm::utostr(CheckInfo
.Version
);
3285 FnName
+= "_minimal";
3286 if (NeedsAbortSuffix
)
3289 !IsFatal
|| RecoverKind
== CheckRecoverableKind::AlwaysRecoverable
;
3291 llvm::AttrBuilder
B(CGF
.getLLVMContext());
3293 B
.addAttribute(llvm::Attribute::NoReturn
)
3294 .addAttribute(llvm::Attribute::NoUnwind
);
3296 B
.addUWTableAttr(llvm::UWTableKind::Default
);
3298 llvm::FunctionCallee Fn
= CGF
.CGM
.CreateRuntimeFunction(
3300 llvm::AttributeList::get(CGF
.getLLVMContext(),
3301 llvm::AttributeList::FunctionIndex
, B
),
3303 llvm::CallInst
*HandlerCall
= CGF
.EmitNounwindRuntimeCall(Fn
, FnArgs
);
3305 HandlerCall
->setDoesNotReturn();
3306 CGF
.Builder
.CreateUnreachable();
3308 CGF
.Builder
.CreateBr(ContBB
);
3312 void CodeGenFunction::EmitCheck(
3313 ArrayRef
<std::pair
<llvm::Value
*, SanitizerMask
>> Checked
,
3314 SanitizerHandler CheckHandler
, ArrayRef
<llvm::Constant
*> StaticArgs
,
3315 ArrayRef
<llvm::Value
*> DynamicArgs
) {
3316 assert(IsSanitizerScope
);
3317 assert(Checked
.size() > 0);
3318 assert(CheckHandler
>= 0 &&
3319 size_t(CheckHandler
) < std::size(SanitizerHandlers
));
3320 const StringRef CheckName
= SanitizerHandlers
[CheckHandler
].Name
;
3322 llvm::Value
*FatalCond
= nullptr;
3323 llvm::Value
*RecoverableCond
= nullptr;
3324 llvm::Value
*TrapCond
= nullptr;
3325 for (int i
= 0, n
= Checked
.size(); i
< n
; ++i
) {
3326 llvm::Value
*Check
= Checked
[i
].first
;
3327 // -fsanitize-trap= overrides -fsanitize-recover=.
3328 llvm::Value
*&Cond
=
3329 CGM
.getCodeGenOpts().SanitizeTrap
.has(Checked
[i
].second
)
3331 : CGM
.getCodeGenOpts().SanitizeRecover
.has(Checked
[i
].second
)
3334 Cond
= Cond
? Builder
.CreateAnd(Cond
, Check
) : Check
;
3338 EmitTrapCheck(TrapCond
, CheckHandler
);
3339 if (!FatalCond
&& !RecoverableCond
)
3342 llvm::Value
*JointCond
;
3343 if (FatalCond
&& RecoverableCond
)
3344 JointCond
= Builder
.CreateAnd(FatalCond
, RecoverableCond
);
3346 JointCond
= FatalCond
? FatalCond
: RecoverableCond
;
3349 CheckRecoverableKind RecoverKind
= getRecoverableKind(Checked
[0].second
);
3350 assert(SanOpts
.has(Checked
[0].second
));
3352 for (int i
= 1, n
= Checked
.size(); i
< n
; ++i
) {
3353 assert(RecoverKind
== getRecoverableKind(Checked
[i
].second
) &&
3354 "All recoverable kinds in a single check must be same!");
3355 assert(SanOpts
.has(Checked
[i
].second
));
3359 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3360 llvm::BasicBlock
*Handlers
= createBasicBlock("handler." + CheckName
);
3361 llvm::Instruction
*Branch
= Builder
.CreateCondBr(JointCond
, Cont
, Handlers
);
3362 // Give hint that we very much don't expect to execute the handler
3363 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3364 llvm::MDBuilder
MDHelper(getLLVMContext());
3365 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3366 Branch
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3367 EmitBlock(Handlers
);
3369 // Handler functions take an i8* pointing to the (handler-specific) static
3370 // information block, followed by a sequence of intptr_t arguments
3371 // representing operand values.
3372 SmallVector
<llvm::Value
*, 4> Args
;
3373 SmallVector
<llvm::Type
*, 4> ArgTypes
;
3374 if (!CGM
.getCodeGenOpts().SanitizeMinimalRuntime
) {
3375 Args
.reserve(DynamicArgs
.size() + 1);
3376 ArgTypes
.reserve(DynamicArgs
.size() + 1);
3378 // Emit handler arguments and create handler function type.
3379 if (!StaticArgs
.empty()) {
3380 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3381 auto *InfoPtr
= new llvm::GlobalVariable(
3382 CGM
.getModule(), Info
->getType(), false,
3383 llvm::GlobalVariable::PrivateLinkage
, Info
, "", nullptr,
3384 llvm::GlobalVariable::NotThreadLocal
,
3385 CGM
.getDataLayout().getDefaultGlobalsAddressSpace());
3386 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3387 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3388 Args
.push_back(InfoPtr
);
3389 ArgTypes
.push_back(Args
.back()->getType());
3392 for (size_t i
= 0, n
= DynamicArgs
.size(); i
!= n
; ++i
) {
3393 Args
.push_back(EmitCheckValue(DynamicArgs
[i
]));
3394 ArgTypes
.push_back(IntPtrTy
);
3398 llvm::FunctionType
*FnType
=
3399 llvm::FunctionType::get(CGM
.VoidTy
, ArgTypes
, false);
3401 if (!FatalCond
|| !RecoverableCond
) {
3402 // Simple case: we need to generate a single handler call, either
3403 // fatal, or non-fatal.
3404 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
,
3405 (FatalCond
!= nullptr), Cont
);
3407 // Emit two handler calls: first one for set of unrecoverable checks,
3408 // another one for recoverable.
3409 llvm::BasicBlock
*NonFatalHandlerBB
=
3410 createBasicBlock("non_fatal." + CheckName
);
3411 llvm::BasicBlock
*FatalHandlerBB
= createBasicBlock("fatal." + CheckName
);
3412 Builder
.CreateCondBr(FatalCond
, NonFatalHandlerBB
, FatalHandlerBB
);
3413 EmitBlock(FatalHandlerBB
);
3414 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, true,
3416 EmitBlock(NonFatalHandlerBB
);
3417 emitCheckHandlerCall(*this, FnType
, Args
, CheckHandler
, RecoverKind
, false,
3424 void CodeGenFunction::EmitCfiSlowPathCheck(
3425 SanitizerMask Kind
, llvm::Value
*Cond
, llvm::ConstantInt
*TypeId
,
3426 llvm::Value
*Ptr
, ArrayRef
<llvm::Constant
*> StaticArgs
) {
3427 llvm::BasicBlock
*Cont
= createBasicBlock("cfi.cont");
3429 llvm::BasicBlock
*CheckBB
= createBasicBlock("cfi.slowpath");
3430 llvm::BranchInst
*BI
= Builder
.CreateCondBr(Cond
, Cont
, CheckBB
);
3432 llvm::MDBuilder
MDHelper(getLLVMContext());
3433 llvm::MDNode
*Node
= MDHelper
.createBranchWeights((1U << 20) - 1, 1);
3434 BI
->setMetadata(llvm::LLVMContext::MD_prof
, Node
);
3438 bool WithDiag
= !CGM
.getCodeGenOpts().SanitizeTrap
.has(Kind
);
3440 llvm::CallInst
*CheckCall
;
3441 llvm::FunctionCallee SlowPathFn
;
3443 llvm::Constant
*Info
= llvm::ConstantStruct::getAnon(StaticArgs
);
3445 new llvm::GlobalVariable(CGM
.getModule(), Info
->getType(), false,
3446 llvm::GlobalVariable::PrivateLinkage
, Info
);
3447 InfoPtr
->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global
);
3448 CGM
.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr
);
3450 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3451 "__cfi_slowpath_diag",
3452 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
},
3454 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
, InfoPtr
});
3456 SlowPathFn
= CGM
.getModule().getOrInsertFunction(
3458 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
}, false));
3459 CheckCall
= Builder
.CreateCall(SlowPathFn
, {TypeId
, Ptr
});
3463 cast
<llvm::GlobalValue
>(SlowPathFn
.getCallee()->stripPointerCasts()));
3464 CheckCall
->setDoesNotThrow();
3469 // Emit a stub for __cfi_check function so that the linker knows about this
3470 // symbol in LTO mode.
3471 void CodeGenFunction::EmitCfiCheckStub() {
3472 llvm::Module
*M
= &CGM
.getModule();
3473 auto &Ctx
= M
->getContext();
3474 llvm::Function
*F
= llvm::Function::Create(
3475 llvm::FunctionType::get(VoidTy
, {Int64Ty
, Int8PtrTy
, Int8PtrTy
}, false),
3476 llvm::GlobalValue::WeakAnyLinkage
, "__cfi_check", M
);
3477 F
->setAlignment(llvm::Align(4096));
3479 llvm::BasicBlock
*BB
= llvm::BasicBlock::Create(Ctx
, "entry", F
);
3480 // CrossDSOCFI pass is not executed if there is no executable code.
3481 SmallVector
<llvm::Value
*> Args
{F
->getArg(2), F
->getArg(1)};
3482 llvm::CallInst::Create(M
->getFunction("__cfi_check_fail"), Args
, "", BB
);
3483 llvm::ReturnInst::Create(Ctx
, nullptr, BB
);
3486 // This function is basically a switch over the CFI failure kind, which is
3487 // extracted from CFICheckFailData (1st function argument). Each case is either
3488 // llvm.trap or a call to one of the two runtime handlers, based on
3489 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3490 // failure kind) traps, but this should really never happen. CFICheckFailData
3491 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3492 // check kind; in this case __cfi_check_fail traps as well.
3493 void CodeGenFunction::EmitCfiCheckFail() {
3494 SanitizerScope
SanScope(this);
3495 FunctionArgList Args
;
3496 ImplicitParamDecl
ArgData(getContext(), getContext().VoidPtrTy
,
3497 ImplicitParamDecl::Other
);
3498 ImplicitParamDecl
ArgAddr(getContext(), getContext().VoidPtrTy
,
3499 ImplicitParamDecl::Other
);
3500 Args
.push_back(&ArgData
);
3501 Args
.push_back(&ArgAddr
);
3503 const CGFunctionInfo
&FI
=
3504 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy
, Args
);
3506 llvm::Function
*F
= llvm::Function::Create(
3507 llvm::FunctionType::get(VoidTy
, {VoidPtrTy
, VoidPtrTy
}, false),
3508 llvm::GlobalValue::WeakODRLinkage
, "__cfi_check_fail", &CGM
.getModule());
3510 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, F
, /*IsThunk=*/false);
3511 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, F
);
3512 F
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
3514 StartFunction(GlobalDecl(), CGM
.getContext().VoidTy
, F
, FI
, Args
,
3517 // This function is not affected by NoSanitizeList. This function does
3518 // not have a source location, but "src:*" would still apply. Revert any
3519 // changes to SanOpts made in StartFunction.
3520 SanOpts
= CGM
.getLangOpts().Sanitize
;
3523 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData
), /*Volatile=*/false,
3524 CGM
.getContext().VoidPtrTy
, ArgData
.getLocation());
3526 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr
), /*Volatile=*/false,
3527 CGM
.getContext().VoidPtrTy
, ArgAddr
.getLocation());
3529 // Data == nullptr means the calling module has trap behaviour for this check.
3530 llvm::Value
*DataIsNotNullPtr
=
3531 Builder
.CreateICmpNE(Data
, llvm::ConstantPointerNull::get(Int8PtrTy
));
3532 EmitTrapCheck(DataIsNotNullPtr
, SanitizerHandler::CFICheckFail
);
3534 llvm::StructType
*SourceLocationTy
=
3535 llvm::StructType::get(VoidPtrTy
, Int32Ty
, Int32Ty
);
3536 llvm::StructType
*CfiCheckFailDataTy
=
3537 llvm::StructType::get(Int8Ty
, SourceLocationTy
, VoidPtrTy
);
3539 llvm::Value
*V
= Builder
.CreateConstGEP2_32(
3541 Builder
.CreatePointerCast(Data
, CfiCheckFailDataTy
->getPointerTo(0)), 0,
3544 Address
CheckKindAddr(V
, Int8Ty
, getIntAlign());
3545 llvm::Value
*CheckKind
= Builder
.CreateLoad(CheckKindAddr
);
3547 llvm::Value
*AllVtables
= llvm::MetadataAsValue::get(
3548 CGM
.getLLVMContext(),
3549 llvm::MDString::get(CGM
.getLLVMContext(), "all-vtables"));
3550 llvm::Value
*ValidVtable
= Builder
.CreateZExt(
3551 Builder
.CreateCall(CGM
.getIntrinsic(llvm::Intrinsic::type_test
),
3552 {Addr
, AllVtables
}),
3555 const std::pair
<int, SanitizerMask
> CheckKinds
[] = {
3556 {CFITCK_VCall
, SanitizerKind::CFIVCall
},
3557 {CFITCK_NVCall
, SanitizerKind::CFINVCall
},
3558 {CFITCK_DerivedCast
, SanitizerKind::CFIDerivedCast
},
3559 {CFITCK_UnrelatedCast
, SanitizerKind::CFIUnrelatedCast
},
3560 {CFITCK_ICall
, SanitizerKind::CFIICall
}};
3562 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 5> Checks
;
3563 for (auto CheckKindMaskPair
: CheckKinds
) {
3564 int Kind
= CheckKindMaskPair
.first
;
3565 SanitizerMask Mask
= CheckKindMaskPair
.second
;
3567 Builder
.CreateICmpNE(CheckKind
, llvm::ConstantInt::get(Int8Ty
, Kind
));
3568 if (CGM
.getLangOpts().Sanitize
.has(Mask
))
3569 EmitCheck(std::make_pair(Cond
, Mask
), SanitizerHandler::CFICheckFail
, {},
3570 {Data
, Addr
, ValidVtable
});
3572 EmitTrapCheck(Cond
, SanitizerHandler::CFICheckFail
);
3576 // The only reference to this function will be created during LTO link.
3577 // Make sure it survives until then.
3578 CGM
.addUsedGlobal(F
);
3581 void CodeGenFunction::EmitUnreachable(SourceLocation Loc
) {
3582 if (SanOpts
.has(SanitizerKind::Unreachable
)) {
3583 SanitizerScope
SanScope(this);
3584 EmitCheck(std::make_pair(static_cast<llvm::Value
*>(Builder
.getFalse()),
3585 SanitizerKind::Unreachable
),
3586 SanitizerHandler::BuiltinUnreachable
,
3587 EmitCheckSourceLocation(Loc
), std::nullopt
);
3589 Builder
.CreateUnreachable();
3592 void CodeGenFunction::EmitTrapCheck(llvm::Value
*Checked
,
3593 SanitizerHandler CheckHandlerID
) {
3594 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
3596 // If we're optimizing, collapse all calls to trap down to just one per
3597 // check-type per function to save on code size.
3598 if (TrapBBs
.size() <= CheckHandlerID
)
3599 TrapBBs
.resize(CheckHandlerID
+ 1);
3601 llvm::BasicBlock
*&TrapBB
= TrapBBs
[CheckHandlerID
];
3603 if (!ClSanitizeDebugDeoptimization
&&
3604 CGM
.getCodeGenOpts().OptimizationLevel
&& TrapBB
&&
3605 (!CurCodeDecl
|| !CurCodeDecl
->hasAttr
<OptimizeNoneAttr
>())) {
3606 auto Call
= TrapBB
->begin();
3607 assert(isa
<llvm::CallInst
>(Call
) && "Expected call in trap BB");
3609 Call
->applyMergedLocation(Call
->getDebugLoc(),
3610 Builder
.getCurrentDebugLocation());
3611 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3613 TrapBB
= createBasicBlock("trap");
3614 Builder
.CreateCondBr(Checked
, Cont
, TrapBB
);
3617 llvm::CallInst
*TrapCall
= Builder
.CreateCall(
3618 CGM
.getIntrinsic(llvm::Intrinsic::ubsantrap
),
3619 llvm::ConstantInt::get(CGM
.Int8Ty
, ClSanitizeDebugDeoptimization
3620 ? TrapBB
->getParent()->size()
3623 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3624 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3625 CGM
.getCodeGenOpts().TrapFuncName
);
3626 TrapCall
->addFnAttr(A
);
3628 TrapCall
->setDoesNotReturn();
3629 TrapCall
->setDoesNotThrow();
3630 Builder
.CreateUnreachable();
3636 llvm::CallInst
*CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID
) {
3637 llvm::CallInst
*TrapCall
=
3638 Builder
.CreateCall(CGM
.getIntrinsic(IntrID
));
3640 if (!CGM
.getCodeGenOpts().TrapFuncName
.empty()) {
3641 auto A
= llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3642 CGM
.getCodeGenOpts().TrapFuncName
);
3643 TrapCall
->addFnAttr(A
);
3649 Address
CodeGenFunction::EmitArrayToPointerDecay(const Expr
*E
,
3650 LValueBaseInfo
*BaseInfo
,
3651 TBAAAccessInfo
*TBAAInfo
) {
3652 assert(E
->getType()->isArrayType() &&
3653 "Array to pointer decay must have array source type!");
3655 // Expressions of array type can't be bitfields or vector elements.
3656 LValue LV
= EmitLValue(E
);
3657 Address Addr
= LV
.getAddress(*this);
3659 // If the array type was an incomplete type, we need to make sure
3660 // the decay ends up being the right type.
3661 llvm::Type
*NewTy
= ConvertType(E
->getType());
3662 Addr
= Addr
.withElementType(NewTy
);
3664 // Note that VLA pointers are always decayed, so we don't need to do
3666 if (!E
->getType()->isVariableArrayType()) {
3667 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
3668 "Expected pointer to array");
3669 Addr
= Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
3672 // The result of this decay conversion points to an array element within the
3673 // base lvalue. However, since TBAA currently does not support representing
3674 // accesses to elements of member arrays, we conservatively represent accesses
3675 // to the pointee object as if it had no any base lvalue specified.
3676 // TODO: Support TBAA for member arrays.
3677 QualType EltType
= E
->getType()->castAsArrayTypeUnsafe()->getElementType();
3678 if (BaseInfo
) *BaseInfo
= LV
.getBaseInfo();
3679 if (TBAAInfo
) *TBAAInfo
= CGM
.getTBAAAccessInfo(EltType
);
3681 return Addr
.withElementType(ConvertTypeForMem(EltType
));
3684 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3685 /// array to pointer, return the array subexpression.
3686 static const Expr
*isSimpleArrayDecayOperand(const Expr
*E
) {
3687 // If this isn't just an array->pointer decay, bail out.
3688 const auto *CE
= dyn_cast
<CastExpr
>(E
);
3689 if (!CE
|| CE
->getCastKind() != CK_ArrayToPointerDecay
)
3692 // If this is a decay from variable width array, bail out.
3693 const Expr
*SubExpr
= CE
->getSubExpr();
3694 if (SubExpr
->getType()->isVariableArrayType())
3700 static llvm::Value
*emitArraySubscriptGEP(CodeGenFunction
&CGF
,
3701 llvm::Type
*elemType
,
3703 ArrayRef
<llvm::Value
*> indices
,
3707 const llvm::Twine
&name
= "arrayidx") {
3709 return CGF
.EmitCheckedInBoundsGEP(elemType
, ptr
, indices
, signedIndices
,
3710 CodeGenFunction::NotSubtraction
, loc
,
3713 return CGF
.Builder
.CreateGEP(elemType
, ptr
, indices
, name
);
3717 static CharUnits
getArrayElementAlign(CharUnits arrayAlign
,
3719 CharUnits eltSize
) {
3720 // If we have a constant index, we can use the exact offset of the
3721 // element we're accessing.
3722 if (auto constantIdx
= dyn_cast
<llvm::ConstantInt
>(idx
)) {
3723 CharUnits offset
= constantIdx
->getZExtValue() * eltSize
;
3724 return arrayAlign
.alignmentAtOffset(offset
);
3726 // Otherwise, use the worst-case alignment for any element.
3728 return arrayAlign
.alignmentOfArrayElement(eltSize
);
3732 static QualType
getFixedSizeElementType(const ASTContext
&ctx
,
3733 const VariableArrayType
*vla
) {
3736 eltType
= vla
->getElementType();
3737 } while ((vla
= ctx
.getAsVariableArrayType(eltType
)));
3741 /// Given an array base, check whether its member access belongs to a record
3742 /// with preserve_access_index attribute or not.
3743 static bool IsPreserveAIArrayBase(CodeGenFunction
&CGF
, const Expr
*ArrayBase
) {
3744 if (!ArrayBase
|| !CGF
.getDebugInfo())
3747 // Only support base as either a MemberExpr or DeclRefExpr.
3748 // DeclRefExpr to cover cases like:
3749 // struct s { int a; int b[10]; };
3752 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3753 // p->b[5] is a MemberExpr example.
3754 const Expr
*E
= ArrayBase
->IgnoreImpCasts();
3755 if (const auto *ME
= dyn_cast
<MemberExpr
>(E
))
3756 return ME
->getMemberDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3758 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
3759 const auto *VarDef
= dyn_cast
<VarDecl
>(DRE
->getDecl());
3763 const auto *PtrT
= VarDef
->getType()->getAs
<PointerType
>();
3767 const auto *PointeeT
= PtrT
->getPointeeType()
3768 ->getUnqualifiedDesugaredType();
3769 if (const auto *RecT
= dyn_cast
<RecordType
>(PointeeT
))
3770 return RecT
->getDecl()->hasAttr
<BPFPreserveAccessIndexAttr
>();
3777 static Address
emitArraySubscriptGEP(CodeGenFunction
&CGF
, Address addr
,
3778 ArrayRef
<llvm::Value
*> indices
,
3779 QualType eltType
, bool inbounds
,
3780 bool signedIndices
, SourceLocation loc
,
3781 QualType
*arrayType
= nullptr,
3782 const Expr
*Base
= nullptr,
3783 const llvm::Twine
&name
= "arrayidx") {
3784 // All the indices except that last must be zero.
3786 for (auto *idx
: indices
.drop_back())
3787 assert(isa
<llvm::ConstantInt
>(idx
) &&
3788 cast
<llvm::ConstantInt
>(idx
)->isZero());
3791 // Determine the element size of the statically-sized base. This is
3792 // the thing that the indices are expressed in terms of.
3793 if (auto vla
= CGF
.getContext().getAsVariableArrayType(eltType
)) {
3794 eltType
= getFixedSizeElementType(CGF
.getContext(), vla
);
3797 // We can use that to compute the best alignment of the element.
3798 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(eltType
);
3799 CharUnits eltAlign
=
3800 getArrayElementAlign(addr
.getAlignment(), indices
.back(), eltSize
);
3802 llvm::Value
*eltPtr
;
3803 auto LastIndex
= dyn_cast
<llvm::ConstantInt
>(indices
.back());
3805 (!CGF
.IsInPreservedAIRegion
&& !IsPreserveAIArrayBase(CGF
, Base
))) {
3806 eltPtr
= emitArraySubscriptGEP(
3807 CGF
, addr
.getElementType(), addr
.getPointer(), indices
, inbounds
,
3808 signedIndices
, loc
, name
);
3810 // Remember the original array subscript for bpf target
3811 unsigned idx
= LastIndex
->getZExtValue();
3812 llvm::DIType
*DbgInfo
= nullptr;
3814 DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(*arrayType
, loc
);
3815 eltPtr
= CGF
.Builder
.CreatePreserveArrayAccessIndex(addr
.getElementType(),
3821 return Address(eltPtr
, CGF
.ConvertTypeForMem(eltType
), eltAlign
);
3824 LValue
CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr
*E
,
3826 // The index must always be an integer, which is not an aggregate. Emit it
3827 // in lexical order (this complexity is, sadly, required by C++17).
3828 llvm::Value
*IdxPre
=
3829 (E
->getLHS() == E
->getIdx()) ? EmitScalarExpr(E
->getIdx()) : nullptr;
3830 bool SignedIndices
= false;
3831 auto EmitIdxAfterBase
= [&, IdxPre
](bool Promote
) -> llvm::Value
* {
3833 if (E
->getLHS() != E
->getIdx()) {
3834 assert(E
->getRHS() == E
->getIdx() && "index was neither LHS nor RHS");
3835 Idx
= EmitScalarExpr(E
->getIdx());
3838 QualType IdxTy
= E
->getIdx()->getType();
3839 bool IdxSigned
= IdxTy
->isSignedIntegerOrEnumerationType();
3840 SignedIndices
|= IdxSigned
;
3842 if (SanOpts
.has(SanitizerKind::ArrayBounds
))
3843 EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, Accessed
);
3845 // Extend or truncate the index type to 32 or 64-bits.
3846 if (Promote
&& Idx
->getType() != IntPtrTy
)
3847 Idx
= Builder
.CreateIntCast(Idx
, IntPtrTy
, IdxSigned
, "idxprom");
3853 // If the base is a vector type, then we are forming a vector element lvalue
3854 // with this subscript.
3855 if (E
->getBase()->getType()->isVectorType() &&
3856 !isa
<ExtVectorElementExpr
>(E
->getBase())) {
3857 // Emit the vector as an lvalue to get its address.
3858 LValue LHS
= EmitLValue(E
->getBase());
3859 auto *Idx
= EmitIdxAfterBase(/*Promote*/false);
3860 assert(LHS
.isSimple() && "Can only subscript lvalue vectors here!");
3861 return LValue::MakeVectorElt(LHS
.getAddress(*this), Idx
,
3862 E
->getBase()->getType(), LHS
.getBaseInfo(),
3866 // All the other cases basically behave like simple offsetting.
3868 // Handle the extvector case we ignored above.
3869 if (isa
<ExtVectorElementExpr
>(E
->getBase())) {
3870 LValue LV
= EmitLValue(E
->getBase());
3871 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3872 Address Addr
= EmitExtVectorElementLValue(LV
);
3874 QualType EltType
= LV
.getType()->castAs
<VectorType
>()->getElementType();
3875 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, EltType
, /*inbounds*/ true,
3876 SignedIndices
, E
->getExprLoc());
3877 return MakeAddrLValue(Addr
, EltType
, LV
.getBaseInfo(),
3878 CGM
.getTBAAInfoForSubobject(LV
, EltType
));
3881 LValueBaseInfo EltBaseInfo
;
3882 TBAAAccessInfo EltTBAAInfo
;
3883 Address Addr
= Address::invalid();
3884 if (const VariableArrayType
*vla
=
3885 getContext().getAsVariableArrayType(E
->getType())) {
3886 // The base must be a pointer, which is not an aggregate. Emit
3887 // it. It needs to be emitted first in case it's what captures
3889 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3890 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3892 // The element count here is the total number of non-VLA elements.
3893 llvm::Value
*numElements
= getVLASize(vla
).NumElts
;
3895 // Effectively, the multiply by the VLA size is part of the GEP.
3896 // GEP indexes are signed, and scaling an index isn't permitted to
3897 // signed-overflow, so we use the same semantics for our explicit
3898 // multiply. We suppress this if overflow is not undefined behavior.
3899 if (getLangOpts().isSignedOverflowDefined()) {
3900 Idx
= Builder
.CreateMul(Idx
, numElements
);
3902 Idx
= Builder
.CreateNSWMul(Idx
, numElements
);
3905 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, vla
->getElementType(),
3906 !getLangOpts().isSignedOverflowDefined(),
3907 SignedIndices
, E
->getExprLoc());
3909 } else if (const ObjCObjectType
*OIT
= E
->getType()->getAs
<ObjCObjectType
>()){
3910 // Indexing over an interface, as in "NSString *P; P[4];"
3912 // Emit the base pointer.
3913 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3914 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3916 CharUnits InterfaceSize
= getContext().getTypeSizeInChars(OIT
);
3917 llvm::Value
*InterfaceSizeVal
=
3918 llvm::ConstantInt::get(Idx
->getType(), InterfaceSize
.getQuantity());
3920 llvm::Value
*ScaledIdx
= Builder
.CreateMul(Idx
, InterfaceSizeVal
);
3922 // We don't necessarily build correct LLVM struct types for ObjC
3923 // interfaces, so we can't rely on GEP to do this scaling
3924 // correctly, so we need to cast to i8*. FIXME: is this actually
3925 // true? A lot of other things in the fragile ABI would break...
3926 llvm::Type
*OrigBaseElemTy
= Addr
.getElementType();
3929 CharUnits EltAlign
=
3930 getArrayElementAlign(Addr
.getAlignment(), Idx
, InterfaceSize
);
3931 llvm::Value
*EltPtr
=
3932 emitArraySubscriptGEP(*this, Int8Ty
, Addr
.getPointer(), ScaledIdx
,
3933 false, SignedIndices
, E
->getExprLoc());
3934 Addr
= Address(EltPtr
, OrigBaseElemTy
, EltAlign
);
3935 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
3936 // If this is A[i] where A is an array, the frontend will have decayed the
3937 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3938 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3939 // "gep x, i" here. Emit one "gep A, 0, i".
3940 assert(Array
->getType()->isArrayType() &&
3941 "Array to pointer decay must have array source type!");
3943 // For simple multidimensional array indexing, set the 'accessed' flag for
3944 // better bounds-checking of the base expression.
3945 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
3946 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
3948 ArrayLV
= EmitLValue(Array
);
3949 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3951 // Propagate the alignment from the array itself to the result.
3952 QualType arrayType
= Array
->getType();
3953 Addr
= emitArraySubscriptGEP(
3954 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
3955 E
->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices
,
3956 E
->getExprLoc(), &arrayType
, E
->getBase());
3957 EltBaseInfo
= ArrayLV
.getBaseInfo();
3958 EltTBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, E
->getType());
3960 // The base must be a pointer; emit it with an estimate of its alignment.
3961 Addr
= EmitPointerWithAlignment(E
->getBase(), &EltBaseInfo
, &EltTBAAInfo
);
3962 auto *Idx
= EmitIdxAfterBase(/*Promote*/true);
3963 QualType ptrType
= E
->getBase()->getType();
3964 Addr
= emitArraySubscriptGEP(*this, Addr
, Idx
, E
->getType(),
3965 !getLangOpts().isSignedOverflowDefined(),
3966 SignedIndices
, E
->getExprLoc(), &ptrType
,
3970 LValue LV
= MakeAddrLValue(Addr
, E
->getType(), EltBaseInfo
, EltTBAAInfo
);
3972 if (getLangOpts().ObjC
&&
3973 getLangOpts().getGC() != LangOptions::NonGC
) {
3974 LV
.setNonGC(!E
->isOBJCGCCandidate(getContext()));
3975 setObjCGCLValueClass(getContext(), E
, LV
);
3980 LValue
CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr
*E
) {
3982 !E
->isIncomplete() &&
3983 "incomplete matrix subscript expressions should be rejected during Sema");
3984 LValue Base
= EmitLValue(E
->getBase());
3985 llvm::Value
*RowIdx
= EmitScalarExpr(E
->getRowIdx());
3986 llvm::Value
*ColIdx
= EmitScalarExpr(E
->getColumnIdx());
3987 llvm::Value
*NumRows
= Builder
.getIntN(
3988 RowIdx
->getType()->getScalarSizeInBits(),
3989 E
->getBase()->getType()->castAs
<ConstantMatrixType
>()->getNumRows());
3990 llvm::Value
*FinalIdx
=
3991 Builder
.CreateAdd(Builder
.CreateMul(ColIdx
, NumRows
), RowIdx
);
3992 return LValue::MakeMatrixElt(
3993 MaybeConvertMatrixAddress(Base
.getAddress(*this), *this), FinalIdx
,
3994 E
->getBase()->getType(), Base
.getBaseInfo(), TBAAAccessInfo());
3997 static Address
emitOMPArraySectionBase(CodeGenFunction
&CGF
, const Expr
*Base
,
3998 LValueBaseInfo
&BaseInfo
,
3999 TBAAAccessInfo
&TBAAInfo
,
4000 QualType BaseTy
, QualType ElTy
,
4001 bool IsLowerBound
) {
4003 if (auto *ASE
= dyn_cast
<OMPArraySectionExpr
>(Base
->IgnoreParenImpCasts())) {
4004 BaseLVal
= CGF
.EmitOMPArraySectionExpr(ASE
, IsLowerBound
);
4005 if (BaseTy
->isArrayType()) {
4006 Address Addr
= BaseLVal
.getAddress(CGF
);
4007 BaseInfo
= BaseLVal
.getBaseInfo();
4009 // If the array type was an incomplete type, we need to make sure
4010 // the decay ends up being the right type.
4011 llvm::Type
*NewTy
= CGF
.ConvertType(BaseTy
);
4012 Addr
= Addr
.withElementType(NewTy
);
4014 // Note that VLA pointers are always decayed, so we don't need to do
4016 if (!BaseTy
->isVariableArrayType()) {
4017 assert(isa
<llvm::ArrayType
>(Addr
.getElementType()) &&
4018 "Expected pointer to array");
4019 Addr
= CGF
.Builder
.CreateConstArrayGEP(Addr
, 0, "arraydecay");
4022 return Addr
.withElementType(CGF
.ConvertTypeForMem(ElTy
));
4024 LValueBaseInfo TypeBaseInfo
;
4025 TBAAAccessInfo TypeTBAAInfo
;
4027 CGF
.CGM
.getNaturalTypeAlignment(ElTy
, &TypeBaseInfo
, &TypeTBAAInfo
);
4028 BaseInfo
.mergeForCast(TypeBaseInfo
);
4029 TBAAInfo
= CGF
.CGM
.mergeTBAAInfoForCast(TBAAInfo
, TypeTBAAInfo
);
4030 return Address(CGF
.Builder
.CreateLoad(BaseLVal
.getAddress(CGF
)),
4031 CGF
.ConvertTypeForMem(ElTy
), Align
);
4033 return CGF
.EmitPointerWithAlignment(Base
, &BaseInfo
, &TBAAInfo
);
4036 LValue
CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr
*E
,
4037 bool IsLowerBound
) {
4038 QualType BaseTy
= OMPArraySectionExpr::getBaseOriginalType(E
->getBase());
4039 QualType ResultExprTy
;
4040 if (auto *AT
= getContext().getAsArrayType(BaseTy
))
4041 ResultExprTy
= AT
->getElementType();
4043 ResultExprTy
= BaseTy
->getPointeeType();
4044 llvm::Value
*Idx
= nullptr;
4045 if (IsLowerBound
|| E
->getColonLocFirst().isInvalid()) {
4046 // Requesting lower bound or upper bound, but without provided length and
4047 // without ':' symbol for the default length -> length = 1.
4048 // Idx = LowerBound ?: 0;
4049 if (auto *LowerBound
= E
->getLowerBound()) {
4050 Idx
= Builder
.CreateIntCast(
4051 EmitScalarExpr(LowerBound
), IntPtrTy
,
4052 LowerBound
->getType()->hasSignedIntegerRepresentation());
4054 Idx
= llvm::ConstantInt::getNullValue(IntPtrTy
);
4056 // Try to emit length or lower bound as constant. If this is possible, 1
4057 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4058 // IR (LB + Len) - 1.
4059 auto &C
= CGM
.getContext();
4060 auto *Length
= E
->getLength();
4061 llvm::APSInt ConstLength
;
4063 // Idx = LowerBound + Length - 1;
4064 if (std::optional
<llvm::APSInt
> CL
= Length
->getIntegerConstantExpr(C
)) {
4065 ConstLength
= CL
->zextOrTrunc(PointerWidthInBits
);
4068 auto *LowerBound
= E
->getLowerBound();
4069 llvm::APSInt
ConstLowerBound(PointerWidthInBits
, /*isUnsigned=*/false);
4071 if (std::optional
<llvm::APSInt
> LB
=
4072 LowerBound
->getIntegerConstantExpr(C
)) {
4073 ConstLowerBound
= LB
->zextOrTrunc(PointerWidthInBits
);
4074 LowerBound
= nullptr;
4079 else if (!LowerBound
)
4082 if (Length
|| LowerBound
) {
4083 auto *LowerBoundVal
=
4085 ? Builder
.CreateIntCast(
4086 EmitScalarExpr(LowerBound
), IntPtrTy
,
4087 LowerBound
->getType()->hasSignedIntegerRepresentation())
4088 : llvm::ConstantInt::get(IntPtrTy
, ConstLowerBound
);
4091 ? Builder
.CreateIntCast(
4092 EmitScalarExpr(Length
), IntPtrTy
,
4093 Length
->getType()->hasSignedIntegerRepresentation())
4094 : llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4095 Idx
= Builder
.CreateAdd(LowerBoundVal
, LengthVal
, "lb_add_len",
4097 !getLangOpts().isSignedOverflowDefined());
4098 if (Length
&& LowerBound
) {
4099 Idx
= Builder
.CreateSub(
4100 Idx
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "idx_sub_1",
4101 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4104 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
+ ConstLowerBound
);
4106 // Idx = ArraySize - 1;
4107 QualType ArrayTy
= BaseTy
->isPointerType()
4108 ? E
->getBase()->IgnoreParenImpCasts()->getType()
4110 if (auto *VAT
= C
.getAsVariableArrayType(ArrayTy
)) {
4111 Length
= VAT
->getSizeExpr();
4112 if (std::optional
<llvm::APSInt
> L
= Length
->getIntegerConstantExpr(C
)) {
4117 auto *CAT
= C
.getAsConstantArrayType(ArrayTy
);
4118 assert(CAT
&& "unexpected type for array initializer");
4119 ConstLength
= CAT
->getSize();
4122 auto *LengthVal
= Builder
.CreateIntCast(
4123 EmitScalarExpr(Length
), IntPtrTy
,
4124 Length
->getType()->hasSignedIntegerRepresentation());
4125 Idx
= Builder
.CreateSub(
4126 LengthVal
, llvm::ConstantInt::get(IntPtrTy
, /*V=*/1), "len_sub_1",
4127 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4129 ConstLength
= ConstLength
.zextOrTrunc(PointerWidthInBits
);
4131 Idx
= llvm::ConstantInt::get(IntPtrTy
, ConstLength
);
4137 Address EltPtr
= Address::invalid();
4138 LValueBaseInfo BaseInfo
;
4139 TBAAAccessInfo TBAAInfo
;
4140 if (auto *VLA
= getContext().getAsVariableArrayType(ResultExprTy
)) {
4141 // The base must be a pointer, which is not an aggregate. Emit
4142 // it. It needs to be emitted first in case it's what captures
4145 emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
, TBAAInfo
,
4146 BaseTy
, VLA
->getElementType(), IsLowerBound
);
4147 // The element count here is the total number of non-VLA elements.
4148 llvm::Value
*NumElements
= getVLASize(VLA
).NumElts
;
4150 // Effectively, the multiply by the VLA size is part of the GEP.
4151 // GEP indexes are signed, and scaling an index isn't permitted to
4152 // signed-overflow, so we use the same semantics for our explicit
4153 // multiply. We suppress this if overflow is not undefined behavior.
4154 if (getLangOpts().isSignedOverflowDefined())
4155 Idx
= Builder
.CreateMul(Idx
, NumElements
);
4157 Idx
= Builder
.CreateNSWMul(Idx
, NumElements
);
4158 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, VLA
->getElementType(),
4159 !getLangOpts().isSignedOverflowDefined(),
4160 /*signedIndices=*/false, E
->getExprLoc());
4161 } else if (const Expr
*Array
= isSimpleArrayDecayOperand(E
->getBase())) {
4162 // If this is A[i] where A is an array, the frontend will have decayed the
4163 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4164 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4165 // "gep x, i" here. Emit one "gep A, 0, i".
4166 assert(Array
->getType()->isArrayType() &&
4167 "Array to pointer decay must have array source type!");
4169 // For simple multidimensional array indexing, set the 'accessed' flag for
4170 // better bounds-checking of the base expression.
4171 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Array
))
4172 ArrayLV
= EmitArraySubscriptExpr(ASE
, /*Accessed*/ true);
4174 ArrayLV
= EmitLValue(Array
);
4176 // Propagate the alignment from the array itself to the result.
4177 EltPtr
= emitArraySubscriptGEP(
4178 *this, ArrayLV
.getAddress(*this), {CGM
.getSize(CharUnits::Zero()), Idx
},
4179 ResultExprTy
, !getLangOpts().isSignedOverflowDefined(),
4180 /*signedIndices=*/false, E
->getExprLoc());
4181 BaseInfo
= ArrayLV
.getBaseInfo();
4182 TBAAInfo
= CGM
.getTBAAInfoForSubobject(ArrayLV
, ResultExprTy
);
4184 Address Base
= emitOMPArraySectionBase(*this, E
->getBase(), BaseInfo
,
4185 TBAAInfo
, BaseTy
, ResultExprTy
,
4187 EltPtr
= emitArraySubscriptGEP(*this, Base
, Idx
, ResultExprTy
,
4188 !getLangOpts().isSignedOverflowDefined(),
4189 /*signedIndices=*/false, E
->getExprLoc());
4192 return MakeAddrLValue(EltPtr
, ResultExprTy
, BaseInfo
, TBAAInfo
);
4195 LValue
CodeGenFunction::
4196 EmitExtVectorElementExpr(const ExtVectorElementExpr
*E
) {
4197 // Emit the base vector as an l-value.
4200 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4202 // If it is a pointer to a vector, emit the address and form an lvalue with
4204 LValueBaseInfo BaseInfo
;
4205 TBAAAccessInfo TBAAInfo
;
4206 Address Ptr
= EmitPointerWithAlignment(E
->getBase(), &BaseInfo
, &TBAAInfo
);
4207 const auto *PT
= E
->getBase()->getType()->castAs
<PointerType
>();
4208 Base
= MakeAddrLValue(Ptr
, PT
->getPointeeType(), BaseInfo
, TBAAInfo
);
4209 Base
.getQuals().removeObjCGCAttr();
4210 } else if (E
->getBase()->isGLValue()) {
4211 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4212 // emit the base as an lvalue.
4213 assert(E
->getBase()->getType()->isVectorType());
4214 Base
= EmitLValue(E
->getBase());
4216 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4217 assert(E
->getBase()->getType()->isVectorType() &&
4218 "Result must be a vector");
4219 llvm::Value
*Vec
= EmitScalarExpr(E
->getBase());
4221 // Store the vector to memory (because LValue wants an address).
4222 Address VecMem
= CreateMemTemp(E
->getBase()->getType());
4223 Builder
.CreateStore(Vec
, VecMem
);
4224 Base
= MakeAddrLValue(VecMem
, E
->getBase()->getType(),
4225 AlignmentSource::Decl
);
4229 E
->getType().withCVRQualifiers(Base
.getQuals().getCVRQualifiers());
4231 // Encode the element access list into a vector of unsigned indices.
4232 SmallVector
<uint32_t, 4> Indices
;
4233 E
->getEncodedElementAccess(Indices
);
4235 if (Base
.isSimple()) {
4236 llvm::Constant
*CV
=
4237 llvm::ConstantDataVector::get(getLLVMContext(), Indices
);
4238 return LValue::MakeExtVectorElt(Base
.getAddress(*this), CV
, type
,
4239 Base
.getBaseInfo(), TBAAAccessInfo());
4241 assert(Base
.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4243 llvm::Constant
*BaseElts
= Base
.getExtVectorElts();
4244 SmallVector
<llvm::Constant
*, 4> CElts
;
4246 for (unsigned i
= 0, e
= Indices
.size(); i
!= e
; ++i
)
4247 CElts
.push_back(BaseElts
->getAggregateElement(Indices
[i
]));
4248 llvm::Constant
*CV
= llvm::ConstantVector::get(CElts
);
4249 return LValue::MakeExtVectorElt(Base
.getExtVectorAddress(), CV
, type
,
4250 Base
.getBaseInfo(), TBAAAccessInfo());
4253 LValue
CodeGenFunction::EmitMemberExpr(const MemberExpr
*E
) {
4254 if (DeclRefExpr
*DRE
= tryToConvertMemberExprToDeclRefExpr(*this, E
)) {
4255 EmitIgnoredExpr(E
->getBase());
4256 return EmitDeclRefLValue(DRE
);
4259 Expr
*BaseExpr
= E
->getBase();
4260 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4263 LValueBaseInfo BaseInfo
;
4264 TBAAAccessInfo TBAAInfo
;
4265 Address Addr
= EmitPointerWithAlignment(BaseExpr
, &BaseInfo
, &TBAAInfo
);
4266 QualType PtrTy
= BaseExpr
->getType()->getPointeeType();
4267 SanitizerSet SkippedChecks
;
4268 bool IsBaseCXXThis
= IsWrappedCXXThis(BaseExpr
);
4270 SkippedChecks
.set(SanitizerKind::Alignment
, true);
4271 if (IsBaseCXXThis
|| isa
<DeclRefExpr
>(BaseExpr
))
4272 SkippedChecks
.set(SanitizerKind::Null
, true);
4273 EmitTypeCheck(TCK_MemberAccess
, E
->getExprLoc(), Addr
.getPointer(), PtrTy
,
4274 /*Alignment=*/CharUnits::Zero(), SkippedChecks
);
4275 BaseLV
= MakeAddrLValue(Addr
, PtrTy
, BaseInfo
, TBAAInfo
);
4277 BaseLV
= EmitCheckedLValue(BaseExpr
, TCK_MemberAccess
);
4279 NamedDecl
*ND
= E
->getMemberDecl();
4280 if (auto *Field
= dyn_cast
<FieldDecl
>(ND
)) {
4281 LValue LV
= EmitLValueForField(BaseLV
, Field
);
4282 setObjCGCLValueClass(getContext(), E
, LV
);
4283 if (getLangOpts().OpenMP
) {
4284 // If the member was explicitly marked as nontemporal, mark it as
4285 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4286 // to children as nontemporal too.
4287 if ((IsWrappedCXXThis(BaseExpr
) &&
4288 CGM
.getOpenMPRuntime().isNontemporalDecl(Field
)) ||
4289 BaseLV
.isNontemporal())
4290 LV
.setNontemporal(/*Value=*/true);
4295 if (const auto *FD
= dyn_cast
<FunctionDecl
>(ND
))
4296 return EmitFunctionDeclLValue(*this, E
, FD
);
4298 llvm_unreachable("Unhandled member declaration!");
4301 /// Given that we are currently emitting a lambda, emit an l-value for
4302 /// one of its members.
4304 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
,
4305 llvm::Value
*ThisValue
) {
4306 bool HasExplicitObjectParameter
= false;
4307 if (const auto *MD
= dyn_cast_if_present
<CXXMethodDecl
>(CurCodeDecl
)) {
4308 HasExplicitObjectParameter
= MD
->isExplicitObjectMemberFunction();
4309 assert(MD
->getParent()->isLambda());
4310 assert(MD
->getParent() == Field
->getParent());
4313 if (HasExplicitObjectParameter
) {
4314 const VarDecl
*D
= cast
<CXXMethodDecl
>(CurCodeDecl
)->getParamDecl(0);
4315 auto It
= LocalDeclMap
.find(D
);
4316 assert(It
!= LocalDeclMap
.end() && "explicit parameter not loaded?");
4317 Address AddrOfExplicitObject
= It
->getSecond();
4318 if (D
->getType()->isReferenceType())
4319 LambdaLV
= EmitLoadOfReferenceLValue(AddrOfExplicitObject
, D
->getType(),
4320 AlignmentSource::Decl
);
4322 LambdaLV
= MakeNaturalAlignAddrLValue(AddrOfExplicitObject
.getPointer(),
4323 D
->getType().getNonReferenceType());
4325 QualType LambdaTagType
= getContext().getTagDeclType(Field
->getParent());
4326 LambdaLV
= MakeNaturalAlignAddrLValue(ThisValue
, LambdaTagType
);
4328 return EmitLValueForField(LambdaLV
, Field
);
4331 LValue
CodeGenFunction::EmitLValueForLambdaField(const FieldDecl
*Field
) {
4332 return EmitLValueForLambdaField(Field
, CXXABIThisValue
);
4335 /// Get the field index in the debug info. The debug info structure/union
4336 /// will ignore the unnamed bitfields.
4337 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl
*Rec
,
4338 unsigned FieldIndex
) {
4339 unsigned I
= 0, Skipped
= 0;
4341 for (auto *F
: Rec
->getDefinition()->fields()) {
4342 if (I
== FieldIndex
)
4344 if (F
->isUnnamedBitfield())
4349 return FieldIndex
- Skipped
;
4352 /// Get the address of a zero-sized field within a record. The resulting
4353 /// address doesn't necessarily have the right type.
4354 static Address
emitAddrOfZeroSizeField(CodeGenFunction
&CGF
, Address Base
,
4355 const FieldDecl
*Field
) {
4356 CharUnits Offset
= CGF
.getContext().toCharUnitsFromBits(
4357 CGF
.getContext().getFieldOffset(Field
));
4358 if (Offset
.isZero())
4360 Base
= Base
.withElementType(CGF
.Int8Ty
);
4361 return CGF
.Builder
.CreateConstInBoundsByteGEP(Base
, Offset
);
4364 /// Drill down to the storage of a field without walking into
4365 /// reference types.
4367 /// The resulting address doesn't necessarily have the right type.
4368 static Address
emitAddrOfFieldStorage(CodeGenFunction
&CGF
, Address base
,
4369 const FieldDecl
*field
) {
4370 if (field
->isZeroSize(CGF
.getContext()))
4371 return emitAddrOfZeroSizeField(CGF
, base
, field
);
4373 const RecordDecl
*rec
= field
->getParent();
4376 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4378 return CGF
.Builder
.CreateStructGEP(base
, idx
, field
->getName());
4381 static Address
emitPreserveStructAccess(CodeGenFunction
&CGF
, LValue base
,
4382 Address addr
, const FieldDecl
*field
) {
4383 const RecordDecl
*rec
= field
->getParent();
4384 llvm::DIType
*DbgInfo
= CGF
.getDebugInfo()->getOrCreateStandaloneType(
4385 base
.getType(), rec
->getLocation());
4388 CGF
.CGM
.getTypes().getCGRecordLayout(rec
).getLLVMFieldNo(field
);
4390 return CGF
.Builder
.CreatePreserveStructAccessIndex(
4391 addr
, idx
, CGF
.getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
);
4394 static bool hasAnyVptr(const QualType Type
, const ASTContext
&Context
) {
4395 const auto *RD
= Type
.getTypePtr()->getAsCXXRecordDecl();
4399 if (RD
->isDynamicClass())
4402 for (const auto &Base
: RD
->bases())
4403 if (hasAnyVptr(Base
.getType(), Context
))
4406 for (const FieldDecl
*Field
: RD
->fields())
4407 if (hasAnyVptr(Field
->getType(), Context
))
4413 LValue
CodeGenFunction::EmitLValueForField(LValue base
,
4414 const FieldDecl
*field
) {
4415 LValueBaseInfo BaseInfo
= base
.getBaseInfo();
4417 if (field
->isBitField()) {
4418 const CGRecordLayout
&RL
=
4419 CGM
.getTypes().getCGRecordLayout(field
->getParent());
4420 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(field
);
4421 const bool UseVolatile
= isAAPCS(CGM
.getTarget()) &&
4422 CGM
.getCodeGenOpts().AAPCSBitfieldWidth
&&
4423 Info
.VolatileStorageSize
!= 0 &&
4425 .withCVRQualifiers(base
.getVRQualifiers())
4426 .isVolatileQualified();
4427 Address Addr
= base
.getAddress(*this);
4428 unsigned Idx
= RL
.getLLVMFieldNo(field
);
4429 const RecordDecl
*rec
= field
->getParent();
4431 if (!IsInPreservedAIRegion
&&
4432 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4434 // For structs, we GEP to the field that the record layout suggests.
4435 Addr
= Builder
.CreateStructGEP(Addr
, Idx
, field
->getName());
4437 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateRecordType(
4438 getContext().getRecordType(rec
), rec
->getLocation());
4439 Addr
= Builder
.CreatePreserveStructAccessIndex(
4440 Addr
, Idx
, getDebugInfoFIndex(rec
, field
->getFieldIndex()),
4445 UseVolatile
? Info
.VolatileStorageSize
: Info
.StorageSize
;
4446 // Get the access type.
4447 llvm::Type
*FieldIntTy
= llvm::Type::getIntNTy(getLLVMContext(), SS
);
4448 Addr
= Addr
.withElementType(FieldIntTy
);
4450 const unsigned VolatileOffset
= Info
.VolatileStorageOffset
.getQuantity();
4452 Addr
= Builder
.CreateConstInBoundsGEP(Addr
, VolatileOffset
);
4455 QualType fieldType
=
4456 field
->getType().withCVRQualifiers(base
.getVRQualifiers());
4457 // TODO: Support TBAA for bit fields.
4458 LValueBaseInfo
FieldBaseInfo(BaseInfo
.getAlignmentSource());
4459 return LValue::MakeBitfield(Addr
, Info
, fieldType
, FieldBaseInfo
,
4463 // Fields of may-alias structures are may-alias themselves.
4464 // FIXME: this should get propagated down through anonymous structs
4466 QualType FieldType
= field
->getType();
4467 const RecordDecl
*rec
= field
->getParent();
4468 AlignmentSource BaseAlignSource
= BaseInfo
.getAlignmentSource();
4469 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource
));
4470 TBAAAccessInfo FieldTBAAInfo
;
4471 if (base
.getTBAAInfo().isMayAlias() ||
4472 rec
->hasAttr
<MayAliasAttr
>() || FieldType
->isVectorType()) {
4473 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4474 } else if (rec
->isUnion()) {
4475 // TODO: Support TBAA for unions.
4476 FieldTBAAInfo
= TBAAAccessInfo::getMayAliasInfo();
4478 // If no base type been assigned for the base access, then try to generate
4479 // one for this base lvalue.
4480 FieldTBAAInfo
= base
.getTBAAInfo();
4481 if (!FieldTBAAInfo
.BaseType
) {
4482 FieldTBAAInfo
.BaseType
= CGM
.getTBAABaseTypeInfo(base
.getType());
4483 assert(!FieldTBAAInfo
.Offset
&&
4484 "Nonzero offset for an access with no base type!");
4487 // Adjust offset to be relative to the base type.
4488 const ASTRecordLayout
&Layout
=
4489 getContext().getASTRecordLayout(field
->getParent());
4490 unsigned CharWidth
= getContext().getCharWidth();
4491 if (FieldTBAAInfo
.BaseType
)
4492 FieldTBAAInfo
.Offset
+=
4493 Layout
.getFieldOffset(field
->getFieldIndex()) / CharWidth
;
4495 // Update the final access type and size.
4496 FieldTBAAInfo
.AccessType
= CGM
.getTBAATypeInfo(FieldType
);
4497 FieldTBAAInfo
.Size
=
4498 getContext().getTypeSizeInChars(FieldType
).getQuantity();
4501 Address addr
= base
.getAddress(*this);
4502 if (auto *ClassDef
= dyn_cast
<CXXRecordDecl
>(rec
)) {
4503 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4504 ClassDef
->isDynamicClass()) {
4505 // Getting to any field of dynamic object requires stripping dynamic
4506 // information provided by invariant.group. This is because accessing
4507 // fields may leak the real address of dynamic object, which could result
4508 // in miscompilation when leaked pointer would be compared.
4509 auto *stripped
= Builder
.CreateStripInvariantGroup(addr
.getPointer());
4510 addr
= Address(stripped
, addr
.getElementType(), addr
.getAlignment());
4514 unsigned RecordCVR
= base
.getVRQualifiers();
4515 if (rec
->isUnion()) {
4516 // For unions, there is no pointer adjustment.
4517 if (CGM
.getCodeGenOpts().StrictVTablePointers
&&
4518 hasAnyVptr(FieldType
, getContext()))
4519 // Because unions can easily skip invariant.barriers, we need to add
4520 // a barrier every time CXXRecord field with vptr is referenced.
4521 addr
= Builder
.CreateLaunderInvariantGroup(addr
);
4523 if (IsInPreservedAIRegion
||
4524 (getDebugInfo() && rec
->hasAttr
<BPFPreserveAccessIndexAttr
>())) {
4525 // Remember the original union field index
4526 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(base
.getType(),
4527 rec
->getLocation());
4529 Builder
.CreatePreserveUnionAccessIndex(
4530 addr
.getPointer(), getDebugInfoFIndex(rec
, field
->getFieldIndex()), DbgInfo
),
4531 addr
.getElementType(), addr
.getAlignment());
4534 if (FieldType
->isReferenceType())
4535 addr
= addr
.withElementType(CGM
.getTypes().ConvertTypeForMem(FieldType
));
4537 if (!IsInPreservedAIRegion
&&
4538 (!getDebugInfo() || !rec
->hasAttr
<BPFPreserveAccessIndexAttr
>()))
4539 // For structs, we GEP to the field that the record layout suggests.
4540 addr
= emitAddrOfFieldStorage(*this, addr
, field
);
4542 // Remember the original struct field index
4543 addr
= emitPreserveStructAccess(*this, base
, addr
, field
);
4546 // If this is a reference field, load the reference right now.
4547 if (FieldType
->isReferenceType()) {
4549 MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4550 if (RecordCVR
& Qualifiers::Volatile
)
4551 RefLVal
.getQuals().addVolatile();
4552 addr
= EmitLoadOfReference(RefLVal
, &FieldBaseInfo
, &FieldTBAAInfo
);
4554 // Qualifiers on the struct don't apply to the referencee.
4556 FieldType
= FieldType
->getPointeeType();
4559 // Make sure that the address is pointing to the right type. This is critical
4560 // for both unions and structs.
4561 addr
= addr
.withElementType(CGM
.getTypes().ConvertTypeForMem(FieldType
));
4563 if (field
->hasAttr
<AnnotateAttr
>())
4564 addr
= EmitFieldAnnotations(field
, addr
);
4566 LValue LV
= MakeAddrLValue(addr
, FieldType
, FieldBaseInfo
, FieldTBAAInfo
);
4567 LV
.getQuals().addCVRQualifiers(RecordCVR
);
4569 // __weak attribute on a field is ignored.
4570 if (LV
.getQuals().getObjCGCAttr() == Qualifiers::Weak
)
4571 LV
.getQuals().removeObjCGCAttr();
4577 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base
,
4578 const FieldDecl
*Field
) {
4579 QualType FieldType
= Field
->getType();
4581 if (!FieldType
->isReferenceType())
4582 return EmitLValueForField(Base
, Field
);
4584 Address V
= emitAddrOfFieldStorage(*this, Base
.getAddress(*this), Field
);
4586 // Make sure that the address is pointing to the right type.
4587 llvm::Type
*llvmType
= ConvertTypeForMem(FieldType
);
4588 V
= V
.withElementType(llvmType
);
4590 // TODO: Generate TBAA information that describes this access as a structure
4591 // member access and not just an access to an object of the field's type. This
4592 // should be similar to what we do in EmitLValueForField().
4593 LValueBaseInfo BaseInfo
= Base
.getBaseInfo();
4594 AlignmentSource FieldAlignSource
= BaseInfo
.getAlignmentSource();
4595 LValueBaseInfo
FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource
));
4596 return MakeAddrLValue(V
, FieldType
, FieldBaseInfo
,
4597 CGM
.getTBAAInfoForSubobject(Base
, FieldType
));
4600 LValue
CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr
*E
){
4601 if (E
->isFileScope()) {
4602 ConstantAddress GlobalPtr
= CGM
.GetAddrOfConstantCompoundLiteral(E
);
4603 return MakeAddrLValue(GlobalPtr
, E
->getType(), AlignmentSource::Decl
);
4605 if (E
->getType()->isVariablyModifiedType())
4606 // make sure to emit the VLA size.
4607 EmitVariablyModifiedType(E
->getType());
4609 Address DeclPtr
= CreateMemTemp(E
->getType(), ".compoundliteral");
4610 const Expr
*InitExpr
= E
->getInitializer();
4611 LValue Result
= MakeAddrLValue(DeclPtr
, E
->getType(), AlignmentSource::Decl
);
4613 EmitAnyExprToMem(InitExpr
, DeclPtr
, E
->getType().getQualifiers(),
4616 // Block-scope compound literals are destroyed at the end of the enclosing
4618 if (!getLangOpts().CPlusPlus
)
4619 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
4620 pushLifetimeExtendedDestroy(getCleanupKind(DtorKind
), DeclPtr
,
4621 E
->getType(), getDestroyer(DtorKind
),
4622 DtorKind
& EHCleanup
);
4627 LValue
CodeGenFunction::EmitInitListLValue(const InitListExpr
*E
) {
4628 if (!E
->isGLValue())
4629 // Initializing an aggregate temporary in C++11: T{...}.
4630 return EmitAggExprToLValue(E
);
4632 // An lvalue initializer list must be initializing a reference.
4633 assert(E
->isTransparent() && "non-transparent glvalue init list");
4634 return EmitLValue(E
->getInit(0));
4637 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4638 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4639 /// LValue is returned and the current block has been terminated.
4640 static std::optional
<LValue
> EmitLValueOrThrowExpression(CodeGenFunction
&CGF
,
4641 const Expr
*Operand
) {
4642 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Operand
->IgnoreParens())) {
4643 CGF
.EmitCXXThrowExpr(ThrowExpr
, /*KeepInsertionPoint*/false);
4644 return std::nullopt
;
4647 return CGF
.EmitLValue(Operand
);
4651 // Handle the case where the condition is a constant evaluatable simple integer,
4652 // which means we don't have to separately handle the true/false blocks.
4653 std::optional
<LValue
> HandleConditionalOperatorLValueSimpleCase(
4654 CodeGenFunction
&CGF
, const AbstractConditionalOperator
*E
) {
4655 const Expr
*condExpr
= E
->getCond();
4657 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
4658 const Expr
*Live
= E
->getTrueExpr(), *Dead
= E
->getFalseExpr();
4660 std::swap(Live
, Dead
);
4662 if (!CGF
.ContainsLabel(Dead
)) {
4663 // If the true case is live, we need to track its region.
4665 CGF
.incrementProfileCounter(E
);
4666 // If a throw expression we emit it and return an undefined lvalue
4667 // because it can't be used.
4668 if (auto *ThrowExpr
= dyn_cast
<CXXThrowExpr
>(Live
->IgnoreParens())) {
4669 CGF
.EmitCXXThrowExpr(ThrowExpr
);
4670 llvm::Type
*ElemTy
= CGF
.ConvertType(Dead
->getType());
4671 llvm::Type
*Ty
= CGF
.UnqualPtrTy
;
4672 return CGF
.MakeAddrLValue(
4673 Address(llvm::UndefValue::get(Ty
), ElemTy
, CharUnits::One()),
4676 return CGF
.EmitLValue(Live
);
4679 return std::nullopt
;
4681 struct ConditionalInfo
{
4682 llvm::BasicBlock
*lhsBlock
, *rhsBlock
;
4683 std::optional
<LValue
> LHS
, RHS
;
4686 // Create and generate the 3 blocks for a conditional operator.
4687 // Leaves the 'current block' in the continuation basic block.
4688 template<typename FuncTy
>
4689 ConditionalInfo
EmitConditionalBlocks(CodeGenFunction
&CGF
,
4690 const AbstractConditionalOperator
*E
,
4691 const FuncTy
&BranchGenFunc
) {
4692 ConditionalInfo Info
{CGF
.createBasicBlock("cond.true"),
4693 CGF
.createBasicBlock("cond.false"), std::nullopt
,
4695 llvm::BasicBlock
*endBlock
= CGF
.createBasicBlock("cond.end");
4697 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
4698 CGF
.EmitBranchOnBoolExpr(E
->getCond(), Info
.lhsBlock
, Info
.rhsBlock
,
4699 CGF
.getProfileCount(E
));
4701 // Any temporaries created here are conditional.
4702 CGF
.EmitBlock(Info
.lhsBlock
);
4703 CGF
.incrementProfileCounter(E
);
4705 Info
.LHS
= BranchGenFunc(CGF
, E
->getTrueExpr());
4707 Info
.lhsBlock
= CGF
.Builder
.GetInsertBlock();
4710 CGF
.Builder
.CreateBr(endBlock
);
4712 // Any temporaries created here are conditional.
4713 CGF
.EmitBlock(Info
.rhsBlock
);
4715 Info
.RHS
= BranchGenFunc(CGF
, E
->getFalseExpr());
4717 Info
.rhsBlock
= CGF
.Builder
.GetInsertBlock();
4718 CGF
.EmitBlock(endBlock
);
4724 void CodeGenFunction::EmitIgnoredConditionalOperator(
4725 const AbstractConditionalOperator
*E
) {
4726 if (!E
->isGLValue()) {
4727 // ?: here should be an aggregate.
4728 assert(hasAggregateEvaluationKind(E
->getType()) &&
4729 "Unexpected conditional operator!");
4730 return (void)EmitAggExprToLValue(E
);
4733 OpaqueValueMapping
binding(*this, E
);
4734 if (HandleConditionalOperatorLValueSimpleCase(*this, E
))
4737 EmitConditionalBlocks(*this, E
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4738 CGF
.EmitIgnoredExpr(E
);
4742 LValue
CodeGenFunction::EmitConditionalOperatorLValue(
4743 const AbstractConditionalOperator
*expr
) {
4744 if (!expr
->isGLValue()) {
4745 // ?: here should be an aggregate.
4746 assert(hasAggregateEvaluationKind(expr
->getType()) &&
4747 "Unexpected conditional operator!");
4748 return EmitAggExprToLValue(expr
);
4751 OpaqueValueMapping
binding(*this, expr
);
4752 if (std::optional
<LValue
> Res
=
4753 HandleConditionalOperatorLValueSimpleCase(*this, expr
))
4756 ConditionalInfo Info
= EmitConditionalBlocks(
4757 *this, expr
, [](CodeGenFunction
&CGF
, const Expr
*E
) {
4758 return EmitLValueOrThrowExpression(CGF
, E
);
4761 if ((Info
.LHS
&& !Info
.LHS
->isSimple()) ||
4762 (Info
.RHS
&& !Info
.RHS
->isSimple()))
4763 return EmitUnsupportedLValue(expr
, "conditional operator");
4765 if (Info
.LHS
&& Info
.RHS
) {
4766 Address lhsAddr
= Info
.LHS
->getAddress(*this);
4767 Address rhsAddr
= Info
.RHS
->getAddress(*this);
4768 llvm::PHINode
*phi
= Builder
.CreatePHI(lhsAddr
.getType(), 2, "cond-lvalue");
4769 phi
->addIncoming(lhsAddr
.getPointer(), Info
.lhsBlock
);
4770 phi
->addIncoming(rhsAddr
.getPointer(), Info
.rhsBlock
);
4771 Address
result(phi
, lhsAddr
.getElementType(),
4772 std::min(lhsAddr
.getAlignment(), rhsAddr
.getAlignment()));
4773 AlignmentSource alignSource
=
4774 std::max(Info
.LHS
->getBaseInfo().getAlignmentSource(),
4775 Info
.RHS
->getBaseInfo().getAlignmentSource());
4776 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForConditionalOperator(
4777 Info
.LHS
->getTBAAInfo(), Info
.RHS
->getTBAAInfo());
4778 return MakeAddrLValue(result
, expr
->getType(), LValueBaseInfo(alignSource
),
4781 assert((Info
.LHS
|| Info
.RHS
) &&
4782 "both operands of glvalue conditional are throw-expressions?");
4783 return Info
.LHS
? *Info
.LHS
: *Info
.RHS
;
4787 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4788 /// type. If the cast is to a reference, we can have the usual lvalue result,
4789 /// otherwise if a cast is needed by the code generator in an lvalue context,
4790 /// then it must mean that we need the address of an aggregate in order to
4791 /// access one of its members. This can happen for all the reasons that casts
4792 /// are permitted with aggregate result, including noop aggregate casts, and
4793 /// cast from scalar to union.
4794 LValue
CodeGenFunction::EmitCastLValue(const CastExpr
*E
) {
4795 switch (E
->getCastKind()) {
4798 case CK_LValueToRValueBitCast
:
4799 case CK_ArrayToPointerDecay
:
4800 case CK_FunctionToPointerDecay
:
4801 case CK_NullToMemberPointer
:
4802 case CK_NullToPointer
:
4803 case CK_IntegralToPointer
:
4804 case CK_PointerToIntegral
:
4805 case CK_PointerToBoolean
:
4806 case CK_VectorSplat
:
4807 case CK_IntegralCast
:
4808 case CK_BooleanToSignedIntegral
:
4809 case CK_IntegralToBoolean
:
4810 case CK_IntegralToFloating
:
4811 case CK_FloatingToIntegral
:
4812 case CK_FloatingToBoolean
:
4813 case CK_FloatingCast
:
4814 case CK_FloatingRealToComplex
:
4815 case CK_FloatingComplexToReal
:
4816 case CK_FloatingComplexToBoolean
:
4817 case CK_FloatingComplexCast
:
4818 case CK_FloatingComplexToIntegralComplex
:
4819 case CK_IntegralRealToComplex
:
4820 case CK_IntegralComplexToReal
:
4821 case CK_IntegralComplexToBoolean
:
4822 case CK_IntegralComplexCast
:
4823 case CK_IntegralComplexToFloatingComplex
:
4824 case CK_DerivedToBaseMemberPointer
:
4825 case CK_BaseToDerivedMemberPointer
:
4826 case CK_MemberPointerToBoolean
:
4827 case CK_ReinterpretMemberPointer
:
4828 case CK_AnyPointerToBlockPointerCast
:
4829 case CK_ARCProduceObject
:
4830 case CK_ARCConsumeObject
:
4831 case CK_ARCReclaimReturnedObject
:
4832 case CK_ARCExtendBlockObject
:
4833 case CK_CopyAndAutoreleaseBlockObject
:
4834 case CK_IntToOCLSampler
:
4835 case CK_FloatingToFixedPoint
:
4836 case CK_FixedPointToFloating
:
4837 case CK_FixedPointCast
:
4838 case CK_FixedPointToBoolean
:
4839 case CK_FixedPointToIntegral
:
4840 case CK_IntegralToFixedPoint
:
4842 return EmitUnsupportedLValue(E
, "unexpected cast lvalue");
4845 llvm_unreachable("dependent cast kind in IR gen!");
4847 case CK_BuiltinFnToFnPtr
:
4848 llvm_unreachable("builtin functions are handled elsewhere");
4850 // These are never l-values; just use the aggregate emission code.
4851 case CK_NonAtomicToAtomic
:
4852 case CK_AtomicToNonAtomic
:
4853 return EmitAggExprToLValue(E
);
4856 LValue LV
= EmitLValue(E
->getSubExpr());
4857 Address V
= LV
.getAddress(*this);
4858 const auto *DCE
= cast
<CXXDynamicCastExpr
>(E
);
4859 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V
, DCE
), E
->getType());
4862 case CK_ConstructorConversion
:
4863 case CK_UserDefinedConversion
:
4864 case CK_CPointerToObjCPointerCast
:
4865 case CK_BlockPointerToObjCPointerCast
:
4866 case CK_LValueToRValue
:
4867 return EmitLValue(E
->getSubExpr());
4870 // CK_NoOp can model a qualification conversion, which can remove an array
4871 // bound and change the IR type.
4872 // FIXME: Once pointee types are removed from IR, remove this.
4873 LValue LV
= EmitLValue(E
->getSubExpr());
4874 // Propagate the volatile qualifer to LValue, if exist in E.
4875 if (E
->changesVolatileQualification())
4876 LV
.getQuals() = E
->getType().getQualifiers();
4877 if (LV
.isSimple()) {
4878 Address V
= LV
.getAddress(*this);
4880 llvm::Type
*T
= ConvertTypeForMem(E
->getType());
4881 if (V
.getElementType() != T
)
4882 LV
.setAddress(V
.withElementType(T
));
4888 case CK_UncheckedDerivedToBase
:
4889 case CK_DerivedToBase
: {
4890 const auto *DerivedClassTy
=
4891 E
->getSubExpr()->getType()->castAs
<RecordType
>();
4892 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4894 LValue LV
= EmitLValue(E
->getSubExpr());
4895 Address This
= LV
.getAddress(*this);
4897 // Perform the derived-to-base conversion
4898 Address Base
= GetAddressOfBaseClass(
4899 This
, DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4900 /*NullCheckValue=*/false, E
->getExprLoc());
4902 // TODO: Support accesses to members of base classes in TBAA. For now, we
4903 // conservatively pretend that the complete object is of the base class
4905 return MakeAddrLValue(Base
, E
->getType(), LV
.getBaseInfo(),
4906 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4909 return EmitAggExprToLValue(E
);
4910 case CK_BaseToDerived
: {
4911 const auto *DerivedClassTy
= E
->getType()->castAs
<RecordType
>();
4912 auto *DerivedClassDecl
= cast
<CXXRecordDecl
>(DerivedClassTy
->getDecl());
4914 LValue LV
= EmitLValue(E
->getSubExpr());
4916 // Perform the base-to-derived conversion
4917 Address Derived
= GetAddressOfDerivedClass(
4918 LV
.getAddress(*this), DerivedClassDecl
, E
->path_begin(), E
->path_end(),
4919 /*NullCheckValue=*/false);
4921 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
4922 // performed and the object is not of the derived type.
4923 if (sanitizePerformTypeCheck())
4924 EmitTypeCheck(TCK_DowncastReference
, E
->getExprLoc(),
4925 Derived
.getPointer(), E
->getType());
4927 if (SanOpts
.has(SanitizerKind::CFIDerivedCast
))
4928 EmitVTablePtrCheckForCast(E
->getType(), Derived
,
4929 /*MayBeNull=*/false, CFITCK_DerivedCast
,
4932 return MakeAddrLValue(Derived
, E
->getType(), LV
.getBaseInfo(),
4933 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4935 case CK_LValueBitCast
: {
4936 // This must be a reinterpret_cast (or c-style equivalent).
4937 const auto *CE
= cast
<ExplicitCastExpr
>(E
);
4939 CGM
.EmitExplicitCastExprType(CE
, this);
4940 LValue LV
= EmitLValue(E
->getSubExpr());
4941 Address V
= LV
.getAddress(*this).withElementType(
4942 ConvertTypeForMem(CE
->getTypeAsWritten()->getPointeeType()));
4944 if (SanOpts
.has(SanitizerKind::CFIUnrelatedCast
))
4945 EmitVTablePtrCheckForCast(E
->getType(), V
,
4946 /*MayBeNull=*/false, CFITCK_UnrelatedCast
,
4949 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4950 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4952 case CK_AddressSpaceConversion
: {
4953 LValue LV
= EmitLValue(E
->getSubExpr());
4954 QualType DestTy
= getContext().getPointerType(E
->getType());
4955 llvm::Value
*V
= getTargetHooks().performAddrSpaceCast(
4956 *this, LV
.getPointer(*this),
4957 E
->getSubExpr()->getType().getAddressSpace(),
4958 E
->getType().getAddressSpace(), ConvertType(DestTy
));
4959 return MakeAddrLValue(Address(V
, ConvertTypeForMem(E
->getType()),
4960 LV
.getAddress(*this).getAlignment()),
4961 E
->getType(), LV
.getBaseInfo(), LV
.getTBAAInfo());
4963 case CK_ObjCObjectLValueCast
: {
4964 LValue LV
= EmitLValue(E
->getSubExpr());
4965 Address V
= LV
.getAddress(*this).withElementType(ConvertType(E
->getType()));
4966 return MakeAddrLValue(V
, E
->getType(), LV
.getBaseInfo(),
4967 CGM
.getTBAAInfoForSubobject(LV
, E
->getType()));
4969 case CK_ZeroToOCLOpaqueType
:
4970 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
4973 llvm_unreachable("Unhandled lvalue cast kind?");
4976 LValue
CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr
*e
) {
4977 assert(OpaqueValueMappingData::shouldBindAsLValue(e
));
4978 return getOrCreateOpaqueLValueMapping(e
);
4982 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr
*e
) {
4983 assert(OpaqueValueMapping::shouldBindAsLValue(e
));
4985 llvm::DenseMap
<const OpaqueValueExpr
*,LValue
>::iterator
4986 it
= OpaqueLValues
.find(e
);
4988 if (it
!= OpaqueLValues
.end())
4991 assert(e
->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
4992 return EmitLValue(e
->getSourceExpr());
4996 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr
*e
) {
4997 assert(!OpaqueValueMapping::shouldBindAsLValue(e
));
4999 llvm::DenseMap
<const OpaqueValueExpr
*,RValue
>::iterator
5000 it
= OpaqueRValues
.find(e
);
5002 if (it
!= OpaqueRValues
.end())
5005 assert(e
->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5006 return EmitAnyExpr(e
->getSourceExpr());
5009 RValue
CodeGenFunction::EmitRValueForField(LValue LV
,
5010 const FieldDecl
*FD
,
5011 SourceLocation Loc
) {
5012 QualType FT
= FD
->getType();
5013 LValue FieldLV
= EmitLValueForField(LV
, FD
);
5014 switch (getEvaluationKind(FT
)) {
5016 return RValue::getComplex(EmitLoadOfComplex(FieldLV
, Loc
));
5018 return FieldLV
.asAggregateRValue(*this);
5020 // This routine is used to load fields one-by-one to perform a copy, so
5021 // don't load reference fields.
5022 if (FD
->getType()->isReferenceType())
5023 return RValue::get(FieldLV
.getPointer(*this));
5024 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5026 if (FieldLV
.isBitField())
5027 return EmitLoadOfLValue(FieldLV
, Loc
);
5028 return RValue::get(EmitLoadOfScalar(FieldLV
, Loc
));
5030 llvm_unreachable("bad evaluation kind");
5033 //===--------------------------------------------------------------------===//
5034 // Expression Emission
5035 //===--------------------------------------------------------------------===//
5037 RValue
CodeGenFunction::EmitCallExpr(const CallExpr
*E
,
5038 ReturnValueSlot ReturnValue
) {
5039 // Builtins never have block type.
5040 if (E
->getCallee()->getType()->isBlockPointerType())
5041 return EmitBlockCallExpr(E
, ReturnValue
);
5043 if (const auto *CE
= dyn_cast
<CXXMemberCallExpr
>(E
))
5044 return EmitCXXMemberCallExpr(CE
, ReturnValue
);
5046 if (const auto *CE
= dyn_cast
<CUDAKernelCallExpr
>(E
))
5047 return EmitCUDAKernelCallExpr(CE
, ReturnValue
);
5049 // A CXXOperatorCallExpr is created even for explicit object methods, but
5050 // these should be treated like static function call.
5051 if (const auto *CE
= dyn_cast
<CXXOperatorCallExpr
>(E
))
5052 if (const auto *MD
=
5053 dyn_cast_if_present
<CXXMethodDecl
>(CE
->getCalleeDecl());
5054 MD
&& MD
->isImplicitObjectMemberFunction())
5055 return EmitCXXOperatorMemberCallExpr(CE
, MD
, ReturnValue
);
5057 CGCallee callee
= EmitCallee(E
->getCallee());
5059 if (callee
.isBuiltin()) {
5060 return EmitBuiltinExpr(callee
.getBuiltinDecl(), callee
.getBuiltinID(),
5064 if (callee
.isPseudoDestructor()) {
5065 return EmitCXXPseudoDestructorExpr(callee
.getPseudoDestructorExpr());
5068 return EmitCall(E
->getCallee()->getType(), callee
, E
, ReturnValue
);
5071 /// Emit a CallExpr without considering whether it might be a subclass.
5072 RValue
CodeGenFunction::EmitSimpleCallExpr(const CallExpr
*E
,
5073 ReturnValueSlot ReturnValue
) {
5074 CGCallee Callee
= EmitCallee(E
->getCallee());
5075 return EmitCall(E
->getCallee()->getType(), Callee
, E
, ReturnValue
);
5078 // Detect the unusual situation where an inline version is shadowed by a
5079 // non-inline version. In that case we should pick the external one
5080 // everywhere. That's GCC behavior too.
5081 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl
*FD
) {
5082 for (const FunctionDecl
*PD
= FD
; PD
; PD
= PD
->getPreviousDecl())
5083 if (!PD
->isInlineBuiltinDeclaration())
5088 static CGCallee
EmitDirectCallee(CodeGenFunction
&CGF
, GlobalDecl GD
) {
5089 const FunctionDecl
*FD
= cast
<FunctionDecl
>(GD
.getDecl());
5091 if (auto builtinID
= FD
->getBuiltinID()) {
5092 std::string NoBuiltinFD
= ("no-builtin-" + FD
->getName()).str();
5093 std::string NoBuiltins
= "no-builtins";
5095 StringRef Ident
= CGF
.CGM
.getMangledName(GD
);
5096 std::string FDInlineName
= (Ident
+ ".inline").str();
5098 bool IsPredefinedLibFunction
=
5099 CGF
.getContext().BuiltinInfo
.isPredefinedLibFunction(builtinID
);
5100 bool HasAttributeNoBuiltin
=
5101 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltinFD
) ||
5102 CGF
.CurFn
->getAttributes().hasFnAttr(NoBuiltins
);
5104 // When directing calling an inline builtin, call it through it's mangled
5105 // name to make it clear it's not the actual builtin.
5106 if (CGF
.CurFn
->getName() != FDInlineName
&&
5107 OnlyHasInlineBuiltinDeclaration(FD
)) {
5108 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5109 llvm::Function
*Fn
= llvm::cast
<llvm::Function
>(CalleePtr
);
5110 llvm::Module
*M
= Fn
->getParent();
5111 llvm::Function
*Clone
= M
->getFunction(FDInlineName
);
5113 Clone
= llvm::Function::Create(Fn
->getFunctionType(),
5114 llvm::GlobalValue::InternalLinkage
,
5115 Fn
->getAddressSpace(), FDInlineName
, M
);
5116 Clone
->addFnAttr(llvm::Attribute::AlwaysInline
);
5118 return CGCallee::forDirect(Clone
, GD
);
5121 // Replaceable builtins provide their own implementation of a builtin. If we
5122 // are in an inline builtin implementation, avoid trivial infinite
5123 // recursion. Honor __attribute__((no_builtin("foo"))) or
5124 // __attribute__((no_builtin)) on the current function unless foo is
5125 // not a predefined library function which means we must generate the
5126 // builtin no matter what.
5127 else if (!IsPredefinedLibFunction
|| !HasAttributeNoBuiltin
)
5128 return CGCallee::forBuiltin(builtinID
, FD
);
5131 llvm::Constant
*CalleePtr
= EmitFunctionDeclPointer(CGF
.CGM
, GD
);
5132 if (CGF
.CGM
.getLangOpts().CUDA
&& !CGF
.CGM
.getLangOpts().CUDAIsDevice
&&
5133 FD
->hasAttr
<CUDAGlobalAttr
>())
5134 CalleePtr
= CGF
.CGM
.getCUDARuntime().getKernelStub(
5135 cast
<llvm::GlobalValue
>(CalleePtr
->stripPointerCasts()));
5137 return CGCallee::forDirect(CalleePtr
, GD
);
5140 CGCallee
CodeGenFunction::EmitCallee(const Expr
*E
) {
5141 E
= E
->IgnoreParens();
5143 // Look through function-to-pointer decay.
5144 if (auto ICE
= dyn_cast
<ImplicitCastExpr
>(E
)) {
5145 if (ICE
->getCastKind() == CK_FunctionToPointerDecay
||
5146 ICE
->getCastKind() == CK_BuiltinFnToFnPtr
) {
5147 return EmitCallee(ICE
->getSubExpr());
5150 // Resolve direct calls.
5151 } else if (auto DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
5152 if (auto FD
= dyn_cast
<FunctionDecl
>(DRE
->getDecl())) {
5153 return EmitDirectCallee(*this, FD
);
5155 } else if (auto ME
= dyn_cast
<MemberExpr
>(E
)) {
5156 if (auto FD
= dyn_cast
<FunctionDecl
>(ME
->getMemberDecl())) {
5157 EmitIgnoredExpr(ME
->getBase());
5158 return EmitDirectCallee(*this, FD
);
5161 // Look through template substitutions.
5162 } else if (auto NTTP
= dyn_cast
<SubstNonTypeTemplateParmExpr
>(E
)) {
5163 return EmitCallee(NTTP
->getReplacement());
5165 // Treat pseudo-destructor calls differently.
5166 } else if (auto PDE
= dyn_cast
<CXXPseudoDestructorExpr
>(E
)) {
5167 return CGCallee::forPseudoDestructor(PDE
);
5170 // Otherwise, we have an indirect reference.
5171 llvm::Value
*calleePtr
;
5172 QualType functionType
;
5173 if (auto ptrType
= E
->getType()->getAs
<PointerType
>()) {
5174 calleePtr
= EmitScalarExpr(E
);
5175 functionType
= ptrType
->getPointeeType();
5177 functionType
= E
->getType();
5178 calleePtr
= EmitLValue(E
, KnownNonNull
).getPointer(*this);
5180 assert(functionType
->isFunctionType());
5183 if (const auto *VD
=
5184 dyn_cast_or_null
<VarDecl
>(E
->getReferencedDeclOfCallee()))
5185 GD
= GlobalDecl(VD
);
5187 CGCalleeInfo
calleeInfo(functionType
->getAs
<FunctionProtoType
>(), GD
);
5188 CGCallee
callee(calleeInfo
, calleePtr
);
5192 LValue
CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator
*E
) {
5193 // Comma expressions just emit their LHS then their RHS as an l-value.
5194 if (E
->getOpcode() == BO_Comma
) {
5195 EmitIgnoredExpr(E
->getLHS());
5196 EnsureInsertPoint();
5197 return EmitLValue(E
->getRHS());
5200 if (E
->getOpcode() == BO_PtrMemD
||
5201 E
->getOpcode() == BO_PtrMemI
)
5202 return EmitPointerToDataMemberBinaryExpr(E
);
5204 assert(E
->getOpcode() == BO_Assign
&& "unexpected binary l-value");
5206 // Note that in all of these cases, __block variables need the RHS
5207 // evaluated first just in case the variable gets moved by the RHS.
5209 switch (getEvaluationKind(E
->getType())) {
5211 switch (E
->getLHS()->getType().getObjCLifetime()) {
5212 case Qualifiers::OCL_Strong
:
5213 return EmitARCStoreStrong(E
, /*ignored*/ false).first
;
5215 case Qualifiers::OCL_Autoreleasing
:
5216 return EmitARCStoreAutoreleasing(E
).first
;
5218 // No reason to do any of these differently.
5219 case Qualifiers::OCL_None
:
5220 case Qualifiers::OCL_ExplicitNone
:
5221 case Qualifiers::OCL_Weak
:
5225 RValue RV
= EmitAnyExpr(E
->getRHS());
5226 LValue LV
= EmitCheckedLValue(E
->getLHS(), TCK_Store
);
5228 EmitNullabilityCheck(LV
, RV
.getScalarVal(), E
->getExprLoc());
5229 EmitStoreThroughLValue(RV
, LV
);
5230 if (getLangOpts().OpenMP
)
5231 CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5237 return EmitComplexAssignmentLValue(E
);
5240 return EmitAggExprToLValue(E
);
5242 llvm_unreachable("bad evaluation kind");
5245 LValue
CodeGenFunction::EmitCallExprLValue(const CallExpr
*E
) {
5246 RValue RV
= EmitCallExpr(E
);
5249 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5250 AlignmentSource::Decl
);
5252 assert(E
->getCallReturnType(getContext())->isReferenceType() &&
5253 "Can't have a scalar return unless the return type is a "
5256 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5259 LValue
CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr
*E
) {
5260 // FIXME: This shouldn't require another copy.
5261 return EmitAggExprToLValue(E
);
5264 LValue
CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr
*E
) {
5265 assert(E
->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5266 && "binding l-value to type which needs a temporary");
5267 AggValueSlot Slot
= CreateAggTemp(E
->getType());
5268 EmitCXXConstructExpr(E
, Slot
);
5269 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5273 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr
*E
) {
5274 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E
), E
->getType());
5277 Address
CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr
*E
) {
5278 return CGM
.GetAddrOfMSGuidDecl(E
->getGuidDecl())
5279 .withElementType(ConvertType(E
->getType()));
5282 LValue
CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr
*E
) {
5283 return MakeAddrLValue(EmitCXXUuidofExpr(E
), E
->getType(),
5284 AlignmentSource::Decl
);
5288 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr
*E
) {
5289 AggValueSlot Slot
= CreateAggTemp(E
->getType(), "temp.lvalue");
5290 Slot
.setExternallyDestructed();
5291 EmitAggExpr(E
->getSubExpr(), Slot
);
5292 EmitCXXTemporary(E
->getTemporary(), E
->getType(), Slot
.getAddress());
5293 return MakeAddrLValue(Slot
.getAddress(), E
->getType(), AlignmentSource::Decl
);
5296 LValue
CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr
*E
) {
5297 RValue RV
= EmitObjCMessageExpr(E
);
5300 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5301 AlignmentSource::Decl
);
5303 assert(E
->getMethodDecl()->getReturnType()->isReferenceType() &&
5304 "Can't have a scalar return unless the return type is a "
5307 return MakeNaturalAlignPointeeAddrLValue(RV
.getScalarVal(), E
->getType());
5310 LValue
CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr
*E
) {
5312 CGM
.getObjCRuntime().GetAddrOfSelector(*this, E
->getSelector());
5313 return MakeAddrLValue(V
, E
->getType(), AlignmentSource::Decl
);
5316 llvm::Value
*CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl
*Interface
,
5317 const ObjCIvarDecl
*Ivar
) {
5318 return CGM
.getObjCRuntime().EmitIvarOffset(*this, Interface
, Ivar
);
5322 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl
*Interface
,
5323 const ObjCIvarDecl
*Ivar
) {
5324 llvm::Value
*OffsetValue
= EmitIvarOffset(Interface
, Ivar
);
5325 QualType PointerDiffType
= getContext().getPointerDiffType();
5326 return Builder
.CreateZExtOrTrunc(OffsetValue
,
5327 getTypes().ConvertType(PointerDiffType
));
5330 LValue
CodeGenFunction::EmitLValueForIvar(QualType ObjectTy
,
5331 llvm::Value
*BaseValue
,
5332 const ObjCIvarDecl
*Ivar
,
5333 unsigned CVRQualifiers
) {
5334 return CGM
.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy
, BaseValue
,
5335 Ivar
, CVRQualifiers
);
5338 LValue
CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr
*E
) {
5339 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5340 llvm::Value
*BaseValue
= nullptr;
5341 const Expr
*BaseExpr
= E
->getBase();
5342 Qualifiers BaseQuals
;
5345 BaseValue
= EmitScalarExpr(BaseExpr
);
5346 ObjectTy
= BaseExpr
->getType()->getPointeeType();
5347 BaseQuals
= ObjectTy
.getQualifiers();
5349 LValue BaseLV
= EmitLValue(BaseExpr
);
5350 BaseValue
= BaseLV
.getPointer(*this);
5351 ObjectTy
= BaseExpr
->getType();
5352 BaseQuals
= ObjectTy
.getQualifiers();
5356 EmitLValueForIvar(ObjectTy
, BaseValue
, E
->getDecl(),
5357 BaseQuals
.getCVRQualifiers());
5358 setObjCGCLValueClass(getContext(), E
, LV
);
5362 LValue
CodeGenFunction::EmitStmtExprLValue(const StmtExpr
*E
) {
5363 // Can only get l-value for message expression returning aggregate type
5364 RValue RV
= EmitAnyExprToTemp(E
);
5365 return MakeAddrLValue(RV
.getAggregateAddress(), E
->getType(),
5366 AlignmentSource::Decl
);
5369 RValue
CodeGenFunction::EmitCall(QualType CalleeType
, const CGCallee
&OrigCallee
,
5370 const CallExpr
*E
, ReturnValueSlot ReturnValue
,
5371 llvm::Value
*Chain
) {
5372 // Get the actual function type. The callee type will always be a pointer to
5373 // function type or a block pointer type.
5374 assert(CalleeType
->isFunctionPointerType() &&
5375 "Call must have function pointer type!");
5377 const Decl
*TargetDecl
=
5378 OrigCallee
.getAbstractInfo().getCalleeDecl().getDecl();
5380 assert((!isa_and_present
<FunctionDecl
>(TargetDecl
) ||
5381 !cast
<FunctionDecl
>(TargetDecl
)->isImmediateFunction()) &&
5382 "trying to emit a call to an immediate function");
5384 CalleeType
= getContext().getCanonicalType(CalleeType
);
5386 auto PointeeType
= cast
<PointerType
>(CalleeType
)->getPointeeType();
5388 CGCallee Callee
= OrigCallee
;
5390 if (SanOpts
.has(SanitizerKind::Function
) &&
5391 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
)) &&
5392 !isa
<FunctionNoProtoType
>(PointeeType
)) {
5393 if (llvm::Constant
*PrefixSig
=
5394 CGM
.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM
)) {
5395 SanitizerScope
SanScope(this);
5396 auto *TypeHash
= getUBSanFunctionTypeHash(PointeeType
);
5398 llvm::Type
*PrefixSigType
= PrefixSig
->getType();
5399 llvm::StructType
*PrefixStructTy
= llvm::StructType::get(
5400 CGM
.getLLVMContext(), {PrefixSigType
, Int32Ty
}, /*isPacked=*/true);
5402 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5404 // On 32-bit Arm, the low bit of a function pointer indicates whether
5405 // it's using the Arm or Thumb instruction set. The actual first
5406 // instruction lives at the same address either way, so we must clear
5407 // that low bit before using the function address to find the prefix
5410 // This applies to both Arm and Thumb target triples, because
5411 // either one could be used in an interworking context where it
5412 // might be passed function pointers of both types.
5413 llvm::Value
*AlignedCalleePtr
;
5414 if (CGM
.getTriple().isARM() || CGM
.getTriple().isThumb()) {
5415 llvm::Value
*CalleeAddress
=
5416 Builder
.CreatePtrToInt(CalleePtr
, IntPtrTy
);
5417 llvm::Value
*Mask
= llvm::ConstantInt::get(IntPtrTy
, ~1);
5418 llvm::Value
*AlignedCalleeAddress
=
5419 Builder
.CreateAnd(CalleeAddress
, Mask
);
5421 Builder
.CreateIntToPtr(AlignedCalleeAddress
, CalleePtr
->getType());
5423 AlignedCalleePtr
= CalleePtr
;
5426 llvm::Value
*CalleePrefixStruct
= AlignedCalleePtr
;
5427 llvm::Value
*CalleeSigPtr
=
5428 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, -1, 0);
5429 llvm::Value
*CalleeSig
=
5430 Builder
.CreateAlignedLoad(PrefixSigType
, CalleeSigPtr
, getIntAlign());
5431 llvm::Value
*CalleeSigMatch
= Builder
.CreateICmpEQ(CalleeSig
, PrefixSig
);
5433 llvm::BasicBlock
*Cont
= createBasicBlock("cont");
5434 llvm::BasicBlock
*TypeCheck
= createBasicBlock("typecheck");
5435 Builder
.CreateCondBr(CalleeSigMatch
, TypeCheck
, Cont
);
5437 EmitBlock(TypeCheck
);
5438 llvm::Value
*CalleeTypeHash
= Builder
.CreateAlignedLoad(
5440 Builder
.CreateConstGEP2_32(PrefixStructTy
, CalleePrefixStruct
, -1, 1),
5442 llvm::Value
*CalleeTypeHashMatch
=
5443 Builder
.CreateICmpEQ(CalleeTypeHash
, TypeHash
);
5444 llvm::Constant
*StaticData
[] = {EmitCheckSourceLocation(E
->getBeginLoc()),
5445 EmitCheckTypeDescriptor(CalleeType
)};
5446 EmitCheck(std::make_pair(CalleeTypeHashMatch
, SanitizerKind::Function
),
5447 SanitizerHandler::FunctionTypeMismatch
, StaticData
,
5450 Builder
.CreateBr(Cont
);
5455 const auto *FnType
= cast
<FunctionType
>(PointeeType
);
5457 // If we are checking indirect calls and this call is indirect, check that the
5458 // function pointer is a member of the bit set for the function type.
5459 if (SanOpts
.has(SanitizerKind::CFIICall
) &&
5460 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5461 SanitizerScope
SanScope(this);
5462 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall
);
5465 if (CGM
.getCodeGenOpts().SanitizeCfiICallGeneralizePointers
)
5466 MD
= CGM
.CreateMetadataIdentifierGeneralized(QualType(FnType
, 0));
5468 MD
= CGM
.CreateMetadataIdentifierForType(QualType(FnType
, 0));
5470 llvm::Value
*TypeId
= llvm::MetadataAsValue::get(getLLVMContext(), MD
);
5472 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5473 llvm::Value
*TypeTest
= Builder
.CreateCall(
5474 CGM
.getIntrinsic(llvm::Intrinsic::type_test
), {CalleePtr
, TypeId
});
5476 auto CrossDsoTypeId
= CGM
.CreateCrossDsoCfiTypeId(MD
);
5477 llvm::Constant
*StaticData
[] = {
5478 llvm::ConstantInt::get(Int8Ty
, CFITCK_ICall
),
5479 EmitCheckSourceLocation(E
->getBeginLoc()),
5480 EmitCheckTypeDescriptor(QualType(FnType
, 0)),
5482 if (CGM
.getCodeGenOpts().SanitizeCfiCrossDso
&& CrossDsoTypeId
) {
5483 EmitCfiSlowPathCheck(SanitizerKind::CFIICall
, TypeTest
, CrossDsoTypeId
,
5484 CalleePtr
, StaticData
);
5486 EmitCheck(std::make_pair(TypeTest
, SanitizerKind::CFIICall
),
5487 SanitizerHandler::CFICheckFail
, StaticData
,
5488 {CalleePtr
, llvm::UndefValue::get(IntPtrTy
)});
5494 Args
.add(RValue::get(Chain
), CGM
.getContext().VoidPtrTy
);
5496 // C++17 requires that we evaluate arguments to a call using assignment syntax
5497 // right-to-left, and that we evaluate arguments to certain other operators
5498 // left-to-right. Note that we allow this to override the order dictated by
5499 // the calling convention on the MS ABI, which means that parameter
5500 // destruction order is not necessarily reverse construction order.
5501 // FIXME: Revisit this based on C++ committee response to unimplementability.
5502 EvaluationOrder Order
= EvaluationOrder::Default
;
5503 if (auto *OCE
= dyn_cast
<CXXOperatorCallExpr
>(E
)) {
5504 if (OCE
->isAssignmentOp())
5505 Order
= EvaluationOrder::ForceRightToLeft
;
5507 switch (OCE
->getOperator()) {
5509 case OO_GreaterGreater
:
5514 Order
= EvaluationOrder::ForceLeftToRight
;
5522 EmitCallArgs(Args
, dyn_cast
<FunctionProtoType
>(FnType
), E
->arguments(),
5523 E
->getDirectCallee(), /*ParamsToSkip*/ 0, Order
);
5525 const CGFunctionInfo
&FnInfo
= CGM
.getTypes().arrangeFreeFunctionCall(
5526 Args
, FnType
, /*ChainCall=*/Chain
);
5529 // If the expression that denotes the called function has a type
5530 // that does not include a prototype, [the default argument
5531 // promotions are performed]. If the number of arguments does not
5532 // equal the number of parameters, the behavior is undefined. If
5533 // the function is defined with a type that includes a prototype,
5534 // and either the prototype ends with an ellipsis (, ...) or the
5535 // types of the arguments after promotion are not compatible with
5536 // the types of the parameters, the behavior is undefined. If the
5537 // function is defined with a type that does not include a
5538 // prototype, and the types of the arguments after promotion are
5539 // not compatible with those of the parameters after promotion,
5540 // the behavior is undefined [except in some trivial cases].
5541 // That is, in the general case, we should assume that a call
5542 // through an unprototyped function type works like a *non-variadic*
5543 // call. The way we make this work is to cast to the exact type
5544 // of the promoted arguments.
5546 // Chain calls use this same code path to add the invisible chain parameter
5547 // to the function type.
5548 if (isa
<FunctionNoProtoType
>(FnType
) || Chain
) {
5549 llvm::Type
*CalleeTy
= getTypes().GetFunctionType(FnInfo
);
5550 int AS
= Callee
.getFunctionPointer()->getType()->getPointerAddressSpace();
5551 CalleeTy
= CalleeTy
->getPointerTo(AS
);
5553 llvm::Value
*CalleePtr
= Callee
.getFunctionPointer();
5554 CalleePtr
= Builder
.CreateBitCast(CalleePtr
, CalleeTy
, "callee.knr.cast");
5555 Callee
.setFunctionPointer(CalleePtr
);
5558 // HIP function pointer contains kernel handle when it is used in triple
5559 // chevron. The kernel stub needs to be loaded from kernel handle and used
5561 if (CGM
.getLangOpts().HIP
&& !CGM
.getLangOpts().CUDAIsDevice
&&
5562 isa
<CUDAKernelCallExpr
>(E
) &&
5563 (!TargetDecl
|| !isa
<FunctionDecl
>(TargetDecl
))) {
5564 llvm::Value
*Handle
= Callee
.getFunctionPointer();
5565 auto *Stub
= Builder
.CreateLoad(
5566 Address(Handle
, Handle
->getType(), CGM
.getPointerAlign()));
5567 Callee
.setFunctionPointer(Stub
);
5569 llvm::CallBase
*CallOrInvoke
= nullptr;
5570 RValue Call
= EmitCall(FnInfo
, Callee
, ReturnValue
, Args
, &CallOrInvoke
,
5571 E
== MustTailCall
, E
->getExprLoc());
5573 // Generate function declaration DISuprogram in order to be used
5574 // in debug info about call sites.
5575 if (CGDebugInfo
*DI
= getDebugInfo()) {
5576 if (auto *CalleeDecl
= dyn_cast_or_null
<FunctionDecl
>(TargetDecl
)) {
5577 FunctionArgList Args
;
5578 QualType ResTy
= BuildFunctionArgList(CalleeDecl
, Args
);
5579 DI
->EmitFuncDeclForCallSite(CallOrInvoke
,
5580 DI
->getFunctionType(CalleeDecl
, ResTy
, Args
),
5588 LValue
CodeGenFunction::
5589 EmitPointerToDataMemberBinaryExpr(const BinaryOperator
*E
) {
5590 Address BaseAddr
= Address::invalid();
5591 if (E
->getOpcode() == BO_PtrMemI
) {
5592 BaseAddr
= EmitPointerWithAlignment(E
->getLHS());
5594 BaseAddr
= EmitLValue(E
->getLHS()).getAddress(*this);
5597 llvm::Value
*OffsetV
= EmitScalarExpr(E
->getRHS());
5598 const auto *MPT
= E
->getRHS()->getType()->castAs
<MemberPointerType
>();
5600 LValueBaseInfo BaseInfo
;
5601 TBAAAccessInfo TBAAInfo
;
5602 Address MemberAddr
=
5603 EmitCXXMemberDataPointerAddress(E
, BaseAddr
, OffsetV
, MPT
, &BaseInfo
,
5606 return MakeAddrLValue(MemberAddr
, MPT
->getPointeeType(), BaseInfo
, TBAAInfo
);
5609 /// Given the address of a temporary variable, produce an r-value of
5611 RValue
CodeGenFunction::convertTempToRValue(Address addr
,
5613 SourceLocation loc
) {
5614 LValue lvalue
= MakeAddrLValue(addr
, type
, AlignmentSource::Decl
);
5615 switch (getEvaluationKind(type
)) {
5617 return RValue::getComplex(EmitLoadOfComplex(lvalue
, loc
));
5619 return lvalue
.asAggregateRValue(*this);
5621 return RValue::get(EmitLoadOfScalar(lvalue
, loc
));
5623 llvm_unreachable("bad evaluation kind");
5626 void CodeGenFunction::SetFPAccuracy(llvm::Value
*Val
, float Accuracy
) {
5627 assert(Val
->getType()->isFPOrFPVectorTy());
5628 if (Accuracy
== 0.0 || !isa
<llvm::Instruction
>(Val
))
5631 llvm::MDBuilder
MDHelper(getLLVMContext());
5632 llvm::MDNode
*Node
= MDHelper
.createFPMath(Accuracy
);
5634 cast
<llvm::Instruction
>(Val
)->setMetadata(llvm::LLVMContext::MD_fpmath
, Node
);
5637 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value
*Val
) {
5638 llvm::Type
*EltTy
= Val
->getType()->getScalarType();
5639 if (!EltTy
->isFloatTy())
5642 if ((getLangOpts().OpenCL
&&
5643 !CGM
.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt
) ||
5644 (getLangOpts().HIP
&& getLangOpts().CUDAIsDevice
&&
5645 !CGM
.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt
)) {
5646 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
5648 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
5649 // build option allows an application to specify that single precision
5650 // floating-point divide (x/y and 1/x) and sqrt used in the program
5651 // source are correctly rounded.
5653 // TODO: CUDA has a prec-sqrt flag
5654 SetFPAccuracy(Val
, 3.0f
);
5658 void CodeGenFunction::SetDivFPAccuracy(llvm::Value
*Val
) {
5659 llvm::Type
*EltTy
= Val
->getType()->getScalarType();
5660 if (!EltTy
->isFloatTy())
5663 if ((getLangOpts().OpenCL
&&
5664 !CGM
.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt
) ||
5665 (getLangOpts().HIP
&& getLangOpts().CUDAIsDevice
&&
5666 !CGM
.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt
)) {
5667 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
5669 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
5670 // build option allows an application to specify that single precision
5671 // floating-point divide (x/y and 1/x) and sqrt used in the program
5672 // source are correctly rounded.
5674 // TODO: CUDA has a prec-div flag
5675 SetFPAccuracy(Val
, 2.5f
);
5680 struct LValueOrRValue
{
5686 static LValueOrRValue
emitPseudoObjectExpr(CodeGenFunction
&CGF
,
5687 const PseudoObjectExpr
*E
,
5689 AggValueSlot slot
) {
5690 SmallVector
<CodeGenFunction::OpaqueValueMappingData
, 4> opaques
;
5692 // Find the result expression, if any.
5693 const Expr
*resultExpr
= E
->getResultExpr();
5694 LValueOrRValue result
;
5696 for (PseudoObjectExpr::const_semantics_iterator
5697 i
= E
->semantics_begin(), e
= E
->semantics_end(); i
!= e
; ++i
) {
5698 const Expr
*semantic
= *i
;
5700 // If this semantic expression is an opaque value, bind it
5701 // to the result of its source expression.
5702 if (const auto *ov
= dyn_cast
<OpaqueValueExpr
>(semantic
)) {
5703 // Skip unique OVEs.
5704 if (ov
->isUnique()) {
5705 assert(ov
!= resultExpr
&&
5706 "A unique OVE cannot be used as the result expression");
5710 // If this is the result expression, we may need to evaluate
5711 // directly into the slot.
5712 typedef CodeGenFunction::OpaqueValueMappingData OVMA
;
5714 if (ov
== resultExpr
&& ov
->isPRValue() && !forLValue
&&
5715 CodeGenFunction::hasAggregateEvaluationKind(ov
->getType())) {
5716 CGF
.EmitAggExpr(ov
->getSourceExpr(), slot
);
5717 LValue LV
= CGF
.MakeAddrLValue(slot
.getAddress(), ov
->getType(),
5718 AlignmentSource::Decl
);
5719 opaqueData
= OVMA::bind(CGF
, ov
, LV
);
5720 result
.RV
= slot
.asRValue();
5722 // Otherwise, emit as normal.
5724 opaqueData
= OVMA::bind(CGF
, ov
, ov
->getSourceExpr());
5726 // If this is the result, also evaluate the result now.
5727 if (ov
== resultExpr
) {
5729 result
.LV
= CGF
.EmitLValue(ov
);
5731 result
.RV
= CGF
.EmitAnyExpr(ov
, slot
);
5735 opaques
.push_back(opaqueData
);
5737 // Otherwise, if the expression is the result, evaluate it
5738 // and remember the result.
5739 } else if (semantic
== resultExpr
) {
5741 result
.LV
= CGF
.EmitLValue(semantic
);
5743 result
.RV
= CGF
.EmitAnyExpr(semantic
, slot
);
5745 // Otherwise, evaluate the expression in an ignored context.
5747 CGF
.EmitIgnoredExpr(semantic
);
5751 // Unbind all the opaques now.
5752 for (unsigned i
= 0, e
= opaques
.size(); i
!= e
; ++i
)
5753 opaques
[i
].unbind(CGF
);
5758 RValue
CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr
*E
,
5759 AggValueSlot slot
) {
5760 return emitPseudoObjectExpr(*this, E
, false, slot
).RV
;
5763 LValue
CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr
*E
) {
5764 return emitPseudoObjectExpr(*this, E
, true, AggValueSlot::ignored()).LV
;