1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "ConstantEmitter.h"
19 #include "EHScopeStack.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/Attr.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclTemplate.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalVariable.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 using namespace clang
;
33 using namespace CodeGen
;
35 //===----------------------------------------------------------------------===//
36 // Aggregate Expression Emitter
37 //===----------------------------------------------------------------------===//
40 extern cl::opt
<bool> EnableSingleByteCoverage
;
44 class AggExprEmitter
: public StmtVisitor
<AggExprEmitter
> {
50 AggValueSlot
EnsureSlot(QualType T
) {
51 if (!Dest
.isIgnored()) return Dest
;
52 return CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
54 void EnsureDest(QualType T
) {
55 if (!Dest
.isIgnored()) return;
56 Dest
= CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
59 // Calls `Fn` with a valid return value slot, potentially creating a temporary
60 // to do so. If a temporary is created, an appropriate copy into `Dest` will
61 // be emitted, as will lifetime markers.
63 // The given function should take a ReturnValueSlot, and return an RValue that
64 // points to said slot.
65 void withReturnValueSlot(const Expr
*E
,
66 llvm::function_ref
<RValue(ReturnValueSlot
)> Fn
);
68 void DoZeroInitPadding(uint64_t &PaddingStart
, uint64_t PaddingEnd
,
69 const FieldDecl
*NextField
);
72 AggExprEmitter(CodeGenFunction
&cgf
, AggValueSlot Dest
, bool IsResultUnused
)
73 : CGF(cgf
), Builder(CGF
.Builder
), Dest(Dest
),
74 IsResultUnused(IsResultUnused
) { }
76 //===--------------------------------------------------------------------===//
78 //===--------------------------------------------------------------------===//
80 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81 /// represents a value lvalue, this method emits the address of the lvalue,
82 /// then loads the result into DestPtr.
83 void EmitAggLoadOfLValue(const Expr
*E
);
85 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86 /// SrcIsRValue is true if source comes from an RValue.
87 void EmitFinalDestCopy(QualType type
, const LValue
&src
,
88 CodeGenFunction::ExprValueKind SrcValueKind
=
89 CodeGenFunction::EVK_NonRValue
);
90 void EmitFinalDestCopy(QualType type
, RValue src
);
91 void EmitCopy(QualType type
, const AggValueSlot
&dest
,
92 const AggValueSlot
&src
);
94 void EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
, QualType ArrayQTy
,
95 Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
98 AggValueSlot::NeedsGCBarriers_t
needsGC(QualType T
) {
99 if (CGF
.getLangOpts().getGC() && TypeRequiresGCollection(T
))
100 return AggValueSlot::NeedsGCBarriers
;
101 return AggValueSlot::DoesNotNeedGCBarriers
;
104 bool TypeRequiresGCollection(QualType T
);
106 //===--------------------------------------------------------------------===//
108 //===--------------------------------------------------------------------===//
110 void Visit(Expr
*E
) {
111 ApplyDebugLocation
DL(CGF
, E
);
112 StmtVisitor
<AggExprEmitter
>::Visit(E
);
115 void VisitStmt(Stmt
*S
) {
116 CGF
.ErrorUnsupported(S
, "aggregate expression");
118 void VisitParenExpr(ParenExpr
*PE
) { Visit(PE
->getSubExpr()); }
119 void VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
120 Visit(GE
->getResultExpr());
122 void VisitCoawaitExpr(CoawaitExpr
*E
) {
123 CGF
.EmitCoawaitExpr(*E
, Dest
, IsResultUnused
);
125 void VisitCoyieldExpr(CoyieldExpr
*E
) {
126 CGF
.EmitCoyieldExpr(*E
, Dest
, IsResultUnused
);
128 void VisitUnaryCoawait(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
129 void VisitUnaryExtension(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
130 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
131 return Visit(E
->getReplacement());
134 void VisitConstantExpr(ConstantExpr
*E
) {
135 EnsureDest(E
->getType());
137 if (llvm::Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
138 CGF
.CreateCoercedStore(
139 Result
, Dest
.getAddress(),
140 llvm::TypeSize::getFixed(
141 Dest
.getPreferredSize(CGF
.getContext(), E
->getType())
143 E
->getType().isVolatileQualified());
146 return Visit(E
->getSubExpr());
150 void VisitDeclRefExpr(DeclRefExpr
*E
) { EmitAggLoadOfLValue(E
); }
151 void VisitMemberExpr(MemberExpr
*ME
) { EmitAggLoadOfLValue(ME
); }
152 void VisitUnaryDeref(UnaryOperator
*E
) { EmitAggLoadOfLValue(E
); }
153 void VisitStringLiteral(StringLiteral
*E
) { EmitAggLoadOfLValue(E
); }
154 void VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
);
155 void VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
156 EmitAggLoadOfLValue(E
);
158 void VisitPredefinedExpr(const PredefinedExpr
*E
) {
159 EmitAggLoadOfLValue(E
);
163 void VisitCastExpr(CastExpr
*E
);
164 void VisitCallExpr(const CallExpr
*E
);
165 void VisitStmtExpr(const StmtExpr
*E
);
166 void VisitBinaryOperator(const BinaryOperator
*BO
);
167 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator
*BO
);
168 void VisitBinAssign(const BinaryOperator
*E
);
169 void VisitBinComma(const BinaryOperator
*E
);
170 void VisitBinCmp(const BinaryOperator
*E
);
171 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
172 Visit(E
->getSemanticForm());
175 void VisitObjCMessageExpr(ObjCMessageExpr
*E
);
176 void VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
177 EmitAggLoadOfLValue(E
);
180 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
);
181 void VisitAbstractConditionalOperator(const AbstractConditionalOperator
*CO
);
182 void VisitChooseExpr(const ChooseExpr
*CE
);
183 void VisitInitListExpr(InitListExpr
*E
);
184 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
185 FieldDecl
*InitializedFieldInUnion
,
187 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
188 llvm::Value
*outerBegin
= nullptr);
189 void VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
);
190 void VisitNoInitExpr(NoInitExpr
*E
) { } // Do nothing.
191 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
192 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
193 Visit(DAE
->getExpr());
195 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
196 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
197 Visit(DIE
->getExpr());
199 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
);
200 void VisitCXXConstructExpr(const CXXConstructExpr
*E
);
201 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr
*E
);
202 void VisitLambdaExpr(LambdaExpr
*E
);
203 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
);
204 void VisitExprWithCleanups(ExprWithCleanups
*E
);
205 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
);
206 void VisitCXXTypeidExpr(CXXTypeidExpr
*E
) { EmitAggLoadOfLValue(E
); }
207 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
);
208 void VisitOpaqueValueExpr(OpaqueValueExpr
*E
);
210 void VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
211 if (E
->isGLValue()) {
212 LValue LV
= CGF
.EmitPseudoObjectLValue(E
);
213 return EmitFinalDestCopy(E
->getType(), LV
);
216 AggValueSlot Slot
= EnsureSlot(E
->getType());
217 bool NeedsDestruction
=
218 !Slot
.isExternallyDestructed() &&
219 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
220 if (NeedsDestruction
)
221 Slot
.setExternallyDestructed();
222 CGF
.EmitPseudoObjectRValue(E
, Slot
);
223 if (NeedsDestruction
)
224 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Slot
.getAddress(),
228 void VisitVAArgExpr(VAArgExpr
*E
);
229 void VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
);
230 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
233 void EmitInitializationToLValue(Expr
*E
, LValue Address
);
234 void EmitNullInitializationToLValue(LValue Address
);
235 // case Expr::ChooseExprClass:
236 void VisitCXXThrowExpr(const CXXThrowExpr
*E
) { CGF
.EmitCXXThrowExpr(E
); }
237 void VisitAtomicExpr(AtomicExpr
*E
) {
238 RValue Res
= CGF
.EmitAtomicExpr(E
);
239 EmitFinalDestCopy(E
->getType(), Res
);
241 void VisitPackIndexingExpr(PackIndexingExpr
*E
) {
242 Visit(E
->getSelectedExpr());
245 } // end anonymous namespace.
247 //===----------------------------------------------------------------------===//
249 //===----------------------------------------------------------------------===//
251 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
252 /// represents a value lvalue, this method emits the address of the lvalue,
253 /// then loads the result into DestPtr.
254 void AggExprEmitter::EmitAggLoadOfLValue(const Expr
*E
) {
255 LValue LV
= CGF
.EmitLValue(E
);
257 // If the type of the l-value is atomic, then do an atomic load.
258 if (LV
.getType()->isAtomicType() || CGF
.LValueIsSuitableForInlineAtomic(LV
)) {
259 CGF
.EmitAtomicLoad(LV
, E
->getExprLoc(), Dest
);
263 EmitFinalDestCopy(E
->getType(), LV
);
266 /// True if the given aggregate type requires special GC API calls.
267 bool AggExprEmitter::TypeRequiresGCollection(QualType T
) {
268 // Only record types have members that might require garbage collection.
269 const RecordType
*RecordTy
= T
->getAs
<RecordType
>();
270 if (!RecordTy
) return false;
272 // Don't mess with non-trivial C++ types.
273 RecordDecl
*Record
= RecordTy
->getDecl();
274 if (isa
<CXXRecordDecl
>(Record
) &&
275 (cast
<CXXRecordDecl
>(Record
)->hasNonTrivialCopyConstructor() ||
276 !cast
<CXXRecordDecl
>(Record
)->hasTrivialDestructor()))
279 // Check whether the type has an object member.
280 return Record
->hasObjectMember();
283 void AggExprEmitter::withReturnValueSlot(
284 const Expr
*E
, llvm::function_ref
<RValue(ReturnValueSlot
)> EmitCall
) {
285 QualType RetTy
= E
->getType();
286 bool RequiresDestruction
=
287 !Dest
.isExternallyDestructed() &&
288 RetTy
.isDestructedType() == QualType::DK_nontrivial_c_struct
;
290 // If it makes no observable difference, save a memcpy + temporary.
292 // We need to always provide our own temporary if destruction is required.
293 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
294 // its lifetime before we have the chance to emit a proper destructor call.
295 bool UseTemp
= Dest
.isPotentiallyAliased() || Dest
.requiresGCollection() ||
296 (RequiresDestruction
&& Dest
.isIgnored());
298 Address RetAddr
= Address::invalid();
299 RawAddress RetAllocaAddr
= RawAddress::invalid();
301 EHScopeStack::stable_iterator LifetimeEndBlock
;
302 llvm::Value
*LifetimeSizePtr
= nullptr;
303 llvm::IntrinsicInst
*LifetimeStartInst
= nullptr;
305 RetAddr
= Dest
.getAddress();
307 RetAddr
= CGF
.CreateMemTemp(RetTy
, "tmp", &RetAllocaAddr
);
308 llvm::TypeSize Size
=
309 CGF
.CGM
.getDataLayout().getTypeAllocSize(CGF
.ConvertTypeForMem(RetTy
));
310 LifetimeSizePtr
= CGF
.EmitLifetimeStart(Size
, RetAllocaAddr
.getPointer());
311 if (LifetimeSizePtr
) {
313 cast
<llvm::IntrinsicInst
>(std::prev(Builder
.GetInsertPoint()));
314 assert(LifetimeStartInst
->getIntrinsicID() ==
315 llvm::Intrinsic::lifetime_start
&&
316 "Last insertion wasn't a lifetime.start?");
318 CGF
.pushFullExprCleanup
<CodeGenFunction::CallLifetimeEnd
>(
319 NormalEHLifetimeMarker
, RetAllocaAddr
, LifetimeSizePtr
);
320 LifetimeEndBlock
= CGF
.EHStack
.stable_begin();
325 EmitCall(ReturnValueSlot(RetAddr
, Dest
.isVolatile(), IsResultUnused
,
326 Dest
.isExternallyDestructed()));
331 assert(Dest
.isIgnored() || Dest
.emitRawPointer(CGF
) !=
332 Src
.getAggregatePointer(E
->getType(), CGF
));
333 EmitFinalDestCopy(E
->getType(), Src
);
335 if (!RequiresDestruction
&& LifetimeStartInst
) {
336 // If there's no dtor to run, the copy was the last use of our temporary.
337 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
339 CGF
.DeactivateCleanupBlock(LifetimeEndBlock
, LifetimeStartInst
);
340 CGF
.EmitLifetimeEnd(LifetimeSizePtr
, RetAllocaAddr
.getPointer());
344 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
345 void AggExprEmitter::EmitFinalDestCopy(QualType type
, RValue src
) {
346 assert(src
.isAggregate() && "value must be aggregate value!");
347 LValue srcLV
= CGF
.MakeAddrLValue(src
.getAggregateAddress(), type
);
348 EmitFinalDestCopy(type
, srcLV
, CodeGenFunction::EVK_RValue
);
351 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
352 void AggExprEmitter::EmitFinalDestCopy(
353 QualType type
, const LValue
&src
,
354 CodeGenFunction::ExprValueKind SrcValueKind
) {
355 // If Dest is ignored, then we're evaluating an aggregate expression
356 // in a context that doesn't care about the result. Note that loads
357 // from volatile l-values force the existence of a non-ignored
359 if (Dest
.isIgnored())
362 // Copy non-trivial C structs here.
363 LValue DstLV
= CGF
.MakeAddrLValue(
364 Dest
.getAddress(), Dest
.isVolatile() ? type
.withVolatile() : type
);
366 if (SrcValueKind
== CodeGenFunction::EVK_RValue
) {
367 if (type
.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct
) {
368 if (Dest
.isPotentiallyAliased())
369 CGF
.callCStructMoveAssignmentOperator(DstLV
, src
);
371 CGF
.callCStructMoveConstructor(DstLV
, src
);
375 if (type
.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct
) {
376 if (Dest
.isPotentiallyAliased())
377 CGF
.callCStructCopyAssignmentOperator(DstLV
, src
);
379 CGF
.callCStructCopyConstructor(DstLV
, src
);
384 AggValueSlot srcAgg
= AggValueSlot::forLValue(
385 src
, AggValueSlot::IsDestructed
, needsGC(type
), AggValueSlot::IsAliased
,
386 AggValueSlot::MayOverlap
);
387 EmitCopy(type
, Dest
, srcAgg
);
390 /// Perform a copy from the source into the destination.
392 /// \param type - the type of the aggregate being copied; qualifiers are
394 void AggExprEmitter::EmitCopy(QualType type
, const AggValueSlot
&dest
,
395 const AggValueSlot
&src
) {
396 if (dest
.requiresGCollection()) {
397 CharUnits sz
= dest
.getPreferredSize(CGF
.getContext(), type
);
398 llvm::Value
*size
= llvm::ConstantInt::get(CGF
.SizeTy
, sz
.getQuantity());
399 CGF
.CGM
.getObjCRuntime().EmitGCMemmoveCollectable(CGF
,
406 // If the result of the assignment is used, copy the LHS there also.
407 // It's volatile if either side is. Use the minimum alignment of
409 LValue DestLV
= CGF
.MakeAddrLValue(dest
.getAddress(), type
);
410 LValue SrcLV
= CGF
.MakeAddrLValue(src
.getAddress(), type
);
411 CGF
.EmitAggregateCopy(DestLV
, SrcLV
, type
, dest
.mayOverlap(),
412 dest
.isVolatile() || src
.isVolatile());
415 /// Emit the initializer for a std::initializer_list initialized with a
416 /// real initializer list.
418 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
) {
419 // Emit an array containing the elements. The array is externally destructed
420 // if the std::initializer_list object is.
421 ASTContext
&Ctx
= CGF
.getContext();
422 LValue Array
= CGF
.EmitLValue(E
->getSubExpr());
423 assert(Array
.isSimple() && "initializer_list array not a simple lvalue");
424 Address ArrayPtr
= Array
.getAddress();
426 const ConstantArrayType
*ArrayType
=
427 Ctx
.getAsConstantArrayType(E
->getSubExpr()->getType());
428 assert(ArrayType
&& "std::initializer_list constructed from non-array");
430 RecordDecl
*Record
= E
->getType()->castAs
<RecordType
>()->getDecl();
431 RecordDecl::field_iterator Field
= Record
->field_begin();
432 assert(Field
!= Record
->field_end() &&
433 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
434 ArrayType
->getElementType()) &&
435 "Expected std::initializer_list first field to be const E *");
438 AggValueSlot Dest
= EnsureSlot(E
->getType());
439 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
440 LValue Start
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
441 llvm::Value
*ArrayStart
= ArrayPtr
.emitRawPointer(CGF
);
442 CGF
.EmitStoreThroughLValue(RValue::get(ArrayStart
), Start
);
444 assert(Field
!= Record
->field_end() &&
445 "Expected std::initializer_list to have two fields");
447 llvm::Value
*Size
= Builder
.getInt(ArrayType
->getSize());
448 LValue EndOrLength
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
449 if (Ctx
.hasSameType(Field
->getType(), Ctx
.getSizeType())) {
451 CGF
.EmitStoreThroughLValue(RValue::get(Size
), EndOrLength
);
455 assert(Field
->getType()->isPointerType() &&
456 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
457 ArrayType
->getElementType()) &&
458 "Expected std::initializer_list second field to be const E *");
459 llvm::Value
*Zero
= llvm::ConstantInt::get(CGF
.PtrDiffTy
, 0);
460 llvm::Value
*IdxEnd
[] = { Zero
, Size
};
461 llvm::Value
*ArrayEnd
= Builder
.CreateInBoundsGEP(
462 ArrayPtr
.getElementType(), ArrayPtr
.emitRawPointer(CGF
), IdxEnd
,
464 CGF
.EmitStoreThroughLValue(RValue::get(ArrayEnd
), EndOrLength
);
467 assert(++Field
== Record
->field_end() &&
468 "Expected std::initializer_list to only have two fields");
471 /// Determine if E is a trivial array filler, that is, one that is
472 /// equivalent to zero-initialization.
473 static bool isTrivialFiller(Expr
*E
) {
477 if (isa
<ImplicitValueInitExpr
>(E
))
480 if (auto *ILE
= dyn_cast
<InitListExpr
>(E
)) {
481 if (ILE
->getNumInits())
483 return isTrivialFiller(ILE
->getArrayFiller());
486 if (auto *Cons
= dyn_cast_or_null
<CXXConstructExpr
>(E
))
487 return Cons
->getConstructor()->isDefaultConstructor() &&
488 Cons
->getConstructor()->isTrivial();
490 // FIXME: Are there other cases where we can avoid emitting an initializer?
494 /// Emit initialization of an array from an initializer list. ExprToVisit must
495 /// be either an InitListEpxr a CXXParenInitListExpr.
496 void AggExprEmitter::EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
,
497 QualType ArrayQTy
, Expr
*ExprToVisit
,
498 ArrayRef
<Expr
*> Args
, Expr
*ArrayFiller
) {
499 uint64_t NumInitElements
= Args
.size();
501 uint64_t NumArrayElements
= AType
->getNumElements();
502 for (const auto *Init
: Args
) {
503 if (const auto *Embed
= dyn_cast
<EmbedExpr
>(Init
->IgnoreParenImpCasts())) {
504 NumInitElements
+= Embed
->getDataElementCount() - 1;
505 if (NumInitElements
> NumArrayElements
) {
506 NumInitElements
= NumArrayElements
;
512 assert(NumInitElements
<= NumArrayElements
);
514 QualType elementType
=
515 CGF
.getContext().getAsArrayType(ArrayQTy
)->getElementType();
516 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
517 CharUnits elementAlign
=
518 DestPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
519 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
521 // Consider initializing the array by copying from a global. For this to be
522 // more efficient than per-element initialization, the size of the elements
523 // with explicit initializers should be large enough.
524 if (NumInitElements
* elementSize
.getQuantity() > 16 &&
525 elementType
.isTriviallyCopyableType(CGF
.getContext())) {
526 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
527 ConstantEmitter
Emitter(CGF
);
528 QualType GVArrayQTy
= CGM
.getContext().getAddrSpaceQualType(
529 CGM
.getContext().removeAddrSpaceQualType(ArrayQTy
),
530 CGM
.GetGlobalConstantAddressSpace());
531 LangAS AS
= GVArrayQTy
.getAddressSpace();
532 if (llvm::Constant
*C
=
533 Emitter
.tryEmitForInitializer(ExprToVisit
, AS
, GVArrayQTy
)) {
534 auto GV
= new llvm::GlobalVariable(
535 CGM
.getModule(), C
->getType(),
536 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage
, C
,
538 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal
,
539 CGM
.getContext().getTargetAddressSpace(AS
));
540 Emitter
.finalize(GV
);
541 CharUnits Align
= CGM
.getContext().getTypeAlignInChars(GVArrayQTy
);
542 GV
->setAlignment(Align
.getAsAlign());
543 Address
GVAddr(GV
, GV
->getValueType(), Align
);
544 EmitFinalDestCopy(ArrayQTy
, CGF
.MakeAddrLValue(GVAddr
, GVArrayQTy
));
549 // Exception safety requires us to destroy all the
550 // already-constructed members if an initializer throws.
551 // For that, we'll need an EH cleanup.
552 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
553 Address endOfInit
= Address::invalid();
554 CodeGenFunction::CleanupDeactivationScope
deactivation(CGF
);
556 llvm::Value
*begin
= DestPtr
.emitRawPointer(CGF
);
558 CodeGenFunction::AllocaTrackerRAII
allocaTracker(CGF
);
559 // In principle we could tell the cleanup where we are more
560 // directly, but the control flow can get so varied here that it
561 // would actually be quite complex. Therefore we go through an
563 llvm::Instruction
*dominatingIP
=
564 Builder
.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF
.Int8PtrTy
));
565 endOfInit
= CGF
.CreateTempAlloca(begin
->getType(), CGF
.getPointerAlign(),
566 "arrayinit.endOfInit");
567 Builder
.CreateStore(begin
, endOfInit
);
568 CGF
.pushIrregularPartialArrayCleanup(begin
, endOfInit
, elementType
,
570 CGF
.getDestroyer(dtorKind
));
571 cast
<EHCleanupScope
>(*CGF
.EHStack
.find(CGF
.EHStack
.stable_begin()))
572 .AddAuxAllocas(allocaTracker
.Take());
574 CGF
.DeferredDeactivationCleanupStack
.push_back(
575 {CGF
.EHStack
.stable_begin(), dominatingIP
});
578 llvm::Value
*one
= llvm::ConstantInt::get(CGF
.SizeTy
, 1);
580 auto Emit
= [&](Expr
*Init
, uint64_t ArrayIndex
) {
581 llvm::Value
*element
= begin
;
582 if (ArrayIndex
> 0) {
583 element
= Builder
.CreateInBoundsGEP(
584 llvmElementType
, begin
,
585 llvm::ConstantInt::get(CGF
.SizeTy
, ArrayIndex
), "arrayinit.element");
587 // Tell the cleanup that it needs to destroy up to this
588 // element. TODO: some of these stores can be trivially
589 // observed to be unnecessary.
590 if (endOfInit
.isValid())
591 Builder
.CreateStore(element
, endOfInit
);
594 LValue elementLV
= CGF
.MakeAddrLValue(
595 Address(element
, llvmElementType
, elementAlign
), elementType
);
596 EmitInitializationToLValue(Init
, elementLV
);
600 unsigned ArrayIndex
= 0;
601 // Emit the explicit initializers.
602 for (uint64_t i
= 0; i
!= NumInitElements
; ++i
) {
603 if (ArrayIndex
>= NumInitElements
)
605 if (auto *EmbedS
= dyn_cast
<EmbedExpr
>(Args
[i
]->IgnoreParenImpCasts())) {
606 EmbedS
->doForEachDataElement(Emit
, ArrayIndex
);
608 Emit(Args
[i
], ArrayIndex
);
613 // Check whether there's a non-trivial array-fill expression.
614 bool hasTrivialFiller
= isTrivialFiller(ArrayFiller
);
616 // Any remaining elements need to be zero-initialized, possibly
617 // using the filler expression. We can skip this if the we're
618 // emitting to zeroed memory.
619 if (NumInitElements
!= NumArrayElements
&&
620 !(Dest
.isZeroed() && hasTrivialFiller
&&
621 CGF
.getTypes().isZeroInitializable(elementType
))) {
623 // Use an actual loop. This is basically
624 // do { *array++ = filler; } while (array != end);
626 // Advance to the start of the rest of the array.
627 llvm::Value
*element
= begin
;
628 if (NumInitElements
) {
629 element
= Builder
.CreateInBoundsGEP(
630 llvmElementType
, element
,
631 llvm::ConstantInt::get(CGF
.SizeTy
, NumInitElements
),
633 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
636 // Compute the end of the array.
637 llvm::Value
*end
= Builder
.CreateInBoundsGEP(
638 llvmElementType
, begin
,
639 llvm::ConstantInt::get(CGF
.SizeTy
, NumArrayElements
), "arrayinit.end");
641 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
642 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
644 // Jump into the body.
645 CGF
.EmitBlock(bodyBB
);
646 llvm::PHINode
*currentElement
=
647 Builder
.CreatePHI(element
->getType(), 2, "arrayinit.cur");
648 currentElement
->addIncoming(element
, entryBB
);
650 // Emit the actual filler expression.
652 // C++1z [class.temporary]p5:
653 // when a default constructor is called to initialize an element of
654 // an array with no corresponding initializer [...] the destruction of
655 // every temporary created in a default argument is sequenced before
656 // the construction of the next array element, if any
657 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
658 LValue elementLV
= CGF
.MakeAddrLValue(
659 Address(currentElement
, llvmElementType
, elementAlign
), elementType
);
661 EmitInitializationToLValue(ArrayFiller
, elementLV
);
663 EmitNullInitializationToLValue(elementLV
);
666 // Move on to the next element.
667 llvm::Value
*nextElement
= Builder
.CreateInBoundsGEP(
668 llvmElementType
, currentElement
, one
, "arrayinit.next");
670 // Tell the EH cleanup that we finished with the last element.
671 if (endOfInit
.isValid()) Builder
.CreateStore(nextElement
, endOfInit
);
673 // Leave the loop if we're done.
674 llvm::Value
*done
= Builder
.CreateICmpEQ(nextElement
, end
,
676 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
677 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
678 currentElement
->addIncoming(nextElement
, Builder
.GetInsertBlock());
680 CGF
.EmitBlock(endBB
);
684 //===----------------------------------------------------------------------===//
686 //===----------------------------------------------------------------------===//
688 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
){
689 Visit(E
->getSubExpr());
692 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr
*e
) {
693 // If this is a unique OVE, just visit its source expression.
695 Visit(e
->getSourceExpr());
697 EmitFinalDestCopy(e
->getType(), CGF
.getOrCreateOpaqueLValueMapping(e
));
701 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
702 if (Dest
.isPotentiallyAliased() &&
703 E
->getType().isPODType(CGF
.getContext())) {
704 // For a POD type, just emit a load of the lvalue + a copy, because our
705 // compound literal might alias the destination.
706 EmitAggLoadOfLValue(E
);
710 AggValueSlot Slot
= EnsureSlot(E
->getType());
712 // Block-scope compound literals are destroyed at the end of the enclosing
715 !CGF
.getLangOpts().CPlusPlus
&& !Slot
.isExternallyDestructed();
717 Slot
.setExternallyDestructed();
719 CGF
.EmitAggExpr(E
->getInitializer(), Slot
);
722 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
723 CGF
.pushLifetimeExtendedDestroy(
724 CGF
.getCleanupKind(DtorKind
), Slot
.getAddress(), E
->getType(),
725 CGF
.getDestroyer(DtorKind
), DtorKind
& EHCleanup
);
728 /// Attempt to look through various unimportant expressions to find a
729 /// cast of the given kind.
730 static Expr
*findPeephole(Expr
*op
, CastKind kind
, const ASTContext
&ctx
) {
731 op
= op
->IgnoreParenNoopCasts(ctx
);
732 if (auto castE
= dyn_cast
<CastExpr
>(op
)) {
733 if (castE
->getCastKind() == kind
)
734 return castE
->getSubExpr();
739 void AggExprEmitter::VisitCastExpr(CastExpr
*E
) {
740 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(E
))
741 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
742 switch (E
->getCastKind()) {
744 // FIXME: Can this actually happen? We have no test coverage for it.
745 assert(isa
<CXXDynamicCastExpr
>(E
) && "CK_Dynamic without a dynamic_cast?");
746 LValue LV
= CGF
.EmitCheckedLValue(E
->getSubExpr(),
747 CodeGenFunction::TCK_Load
);
748 // FIXME: Do we also need to handle property references here?
750 CGF
.EmitDynamicCast(LV
.getAddress(), cast
<CXXDynamicCastExpr
>(E
));
752 CGF
.CGM
.ErrorUnsupported(E
, "non-simple lvalue dynamic_cast");
754 if (!Dest
.isIgnored())
755 CGF
.CGM
.ErrorUnsupported(E
, "lvalue dynamic_cast with a destination");
760 // Evaluate even if the destination is ignored.
761 if (Dest
.isIgnored()) {
762 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
763 /*ignoreResult=*/true);
767 // GCC union extension
768 QualType Ty
= E
->getSubExpr()->getType();
769 Address CastPtr
= Dest
.getAddress().withElementType(CGF
.ConvertType(Ty
));
770 EmitInitializationToLValue(E
->getSubExpr(),
771 CGF
.MakeAddrLValue(CastPtr
, Ty
));
775 case CK_LValueToRValueBitCast
: {
776 if (Dest
.isIgnored()) {
777 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
778 /*ignoreResult=*/true);
782 LValue SourceLV
= CGF
.EmitLValue(E
->getSubExpr());
783 Address SourceAddress
= SourceLV
.getAddress().withElementType(CGF
.Int8Ty
);
784 Address DestAddress
= Dest
.getAddress().withElementType(CGF
.Int8Ty
);
785 llvm::Value
*SizeVal
= llvm::ConstantInt::get(
787 CGF
.getContext().getTypeSizeInChars(E
->getType()).getQuantity());
788 Builder
.CreateMemCpy(DestAddress
, SourceAddress
, SizeVal
);
792 case CK_DerivedToBase
:
793 case CK_BaseToDerived
:
794 case CK_UncheckedDerivedToBase
: {
795 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
796 "should have been unpacked before we got here");
799 case CK_NonAtomicToAtomic
:
800 case CK_AtomicToNonAtomic
: {
801 bool isToAtomic
= (E
->getCastKind() == CK_NonAtomicToAtomic
);
803 // Determine the atomic and value types.
804 QualType atomicType
= E
->getSubExpr()->getType();
805 QualType valueType
= E
->getType();
806 if (isToAtomic
) std::swap(atomicType
, valueType
);
808 assert(atomicType
->isAtomicType());
809 assert(CGF
.getContext().hasSameUnqualifiedType(valueType
,
810 atomicType
->castAs
<AtomicType
>()->getValueType()));
812 // Just recurse normally if we're ignoring the result or the
813 // atomic type doesn't change representation.
814 if (Dest
.isIgnored() || !CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
815 return Visit(E
->getSubExpr());
818 CastKind peepholeTarget
=
819 (isToAtomic
? CK_AtomicToNonAtomic
: CK_NonAtomicToAtomic
);
821 // These two cases are reverses of each other; try to peephole them.
823 findPeephole(E
->getSubExpr(), peepholeTarget
, CGF
.getContext())) {
824 assert(CGF
.getContext().hasSameUnqualifiedType(op
->getType(),
826 "peephole significantly changed types?");
830 // If we're converting an r-value of non-atomic type to an r-value
831 // of atomic type, just emit directly into the relevant sub-object.
833 AggValueSlot valueDest
= Dest
;
834 if (!valueDest
.isIgnored() && CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
835 // Zero-initialize. (Strictly speaking, we only need to initialize
836 // the padding at the end, but this is simpler.)
837 if (!Dest
.isZeroed())
838 CGF
.EmitNullInitialization(Dest
.getAddress(), atomicType
);
840 // Build a GEP to refer to the subobject.
842 CGF
.Builder
.CreateStructGEP(valueDest
.getAddress(), 0);
843 valueDest
= AggValueSlot::forAddr(valueAddr
,
844 valueDest
.getQualifiers(),
845 valueDest
.isExternallyDestructed(),
846 valueDest
.requiresGCollection(),
847 valueDest
.isPotentiallyAliased(),
848 AggValueSlot::DoesNotOverlap
,
849 AggValueSlot::IsZeroed
);
852 CGF
.EmitAggExpr(E
->getSubExpr(), valueDest
);
856 // Otherwise, we're converting an atomic type to a non-atomic type.
857 // Make an atomic temporary, emit into that, and then copy the value out.
858 AggValueSlot atomicSlot
=
859 CGF
.CreateAggTemp(atomicType
, "atomic-to-nonatomic.temp");
860 CGF
.EmitAggExpr(E
->getSubExpr(), atomicSlot
);
862 Address valueAddr
= Builder
.CreateStructGEP(atomicSlot
.getAddress(), 0);
863 RValue rvalue
= RValue::getAggregate(valueAddr
, atomicSlot
.isVolatile());
864 return EmitFinalDestCopy(valueType
, rvalue
);
866 case CK_AddressSpaceConversion
:
867 return Visit(E
->getSubExpr());
869 case CK_LValueToRValue
:
870 // If we're loading from a volatile type, force the destination
872 if (E
->getSubExpr()->getType().isVolatileQualified()) {
874 !Dest
.isExternallyDestructed() &&
875 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
877 Dest
.setExternallyDestructed();
878 EnsureDest(E
->getType());
879 Visit(E
->getSubExpr());
882 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
890 case CK_HLSLArrayRValue
:
891 Visit(E
->getSubExpr());
895 case CK_UserDefinedConversion
:
896 case CK_ConstructorConversion
:
897 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getSubExpr()->getType(),
899 "Implicit cast types must be compatible");
900 Visit(E
->getSubExpr());
903 case CK_LValueBitCast
:
904 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
908 case CK_ArrayToPointerDecay
:
909 case CK_FunctionToPointerDecay
:
910 case CK_NullToPointer
:
911 case CK_NullToMemberPointer
:
912 case CK_BaseToDerivedMemberPointer
:
913 case CK_DerivedToBaseMemberPointer
:
914 case CK_MemberPointerToBoolean
:
915 case CK_ReinterpretMemberPointer
:
916 case CK_IntegralToPointer
:
917 case CK_PointerToIntegral
:
918 case CK_PointerToBoolean
:
921 case CK_IntegralCast
:
922 case CK_BooleanToSignedIntegral
:
923 case CK_IntegralToBoolean
:
924 case CK_IntegralToFloating
:
925 case CK_FloatingToIntegral
:
926 case CK_FloatingToBoolean
:
927 case CK_FloatingCast
:
928 case CK_CPointerToObjCPointerCast
:
929 case CK_BlockPointerToObjCPointerCast
:
930 case CK_AnyPointerToBlockPointerCast
:
931 case CK_ObjCObjectLValueCast
:
932 case CK_FloatingRealToComplex
:
933 case CK_FloatingComplexToReal
:
934 case CK_FloatingComplexToBoolean
:
935 case CK_FloatingComplexCast
:
936 case CK_FloatingComplexToIntegralComplex
:
937 case CK_IntegralRealToComplex
:
938 case CK_IntegralComplexToReal
:
939 case CK_IntegralComplexToBoolean
:
940 case CK_IntegralComplexCast
:
941 case CK_IntegralComplexToFloatingComplex
:
942 case CK_ARCProduceObject
:
943 case CK_ARCConsumeObject
:
944 case CK_ARCReclaimReturnedObject
:
945 case CK_ARCExtendBlockObject
:
946 case CK_CopyAndAutoreleaseBlockObject
:
947 case CK_BuiltinFnToFnPtr
:
948 case CK_ZeroToOCLOpaqueType
:
950 case CK_HLSLVectorTruncation
:
952 case CK_IntToOCLSampler
:
953 case CK_FloatingToFixedPoint
:
954 case CK_FixedPointToFloating
:
955 case CK_FixedPointCast
:
956 case CK_FixedPointToBoolean
:
957 case CK_FixedPointToIntegral
:
958 case CK_IntegralToFixedPoint
:
959 llvm_unreachable("cast kind invalid for aggregate types");
963 void AggExprEmitter::VisitCallExpr(const CallExpr
*E
) {
964 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType()) {
965 EmitAggLoadOfLValue(E
);
969 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
970 return CGF
.EmitCallExpr(E
, Slot
);
974 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
975 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
976 return CGF
.EmitObjCMessageExpr(E
, Slot
);
980 void AggExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
981 CGF
.EmitIgnoredExpr(E
->getLHS());
985 void AggExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
986 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
987 CGF
.EmitCompoundStmt(*E
->getSubStmt(), true, Dest
);
996 static llvm::Value
*EmitCompare(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
997 const BinaryOperator
*E
, llvm::Value
*LHS
,
998 llvm::Value
*RHS
, CompareKind Kind
,
999 const char *NameSuffix
= "") {
1000 QualType ArgTy
= E
->getLHS()->getType();
1001 if (const ComplexType
*CT
= ArgTy
->getAs
<ComplexType
>())
1002 ArgTy
= CT
->getElementType();
1004 if (const auto *MPT
= ArgTy
->getAs
<MemberPointerType
>()) {
1005 assert(Kind
== CK_Equal
&&
1006 "member pointers may only be compared for equality");
1007 return CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
1008 CGF
, LHS
, RHS
, MPT
, /*IsInequality*/ false);
1011 // Compute the comparison instructions for the specified comparison kind.
1012 struct CmpInstInfo
{
1014 llvm::CmpInst::Predicate FCmp
;
1015 llvm::CmpInst::Predicate SCmp
;
1016 llvm::CmpInst::Predicate UCmp
;
1018 CmpInstInfo InstInfo
= [&]() -> CmpInstInfo
{
1019 using FI
= llvm::FCmpInst
;
1020 using II
= llvm::ICmpInst
;
1023 return {"cmp.lt", FI::FCMP_OLT
, II::ICMP_SLT
, II::ICMP_ULT
};
1025 return {"cmp.gt", FI::FCMP_OGT
, II::ICMP_SGT
, II::ICMP_UGT
};
1027 return {"cmp.eq", FI::FCMP_OEQ
, II::ICMP_EQ
, II::ICMP_EQ
};
1029 llvm_unreachable("Unrecognised CompareKind enum");
1032 if (ArgTy
->hasFloatingRepresentation())
1033 return Builder
.CreateFCmp(InstInfo
.FCmp
, LHS
, RHS
,
1034 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1035 if (ArgTy
->isIntegralOrEnumerationType() || ArgTy
->isPointerType()) {
1037 ArgTy
->hasSignedIntegerRepresentation() ? InstInfo
.SCmp
: InstInfo
.UCmp
;
1038 return Builder
.CreateICmp(Inst
, LHS
, RHS
,
1039 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1042 llvm_unreachable("unsupported aggregate binary expression should have "
1043 "already been handled");
1046 void AggExprEmitter::VisitBinCmp(const BinaryOperator
*E
) {
1047 using llvm::BasicBlock
;
1048 using llvm::PHINode
;
1050 assert(CGF
.getContext().hasSameType(E
->getLHS()->getType(),
1051 E
->getRHS()->getType()));
1052 const ComparisonCategoryInfo
&CmpInfo
=
1053 CGF
.getContext().CompCategories
.getInfoForType(E
->getType());
1054 assert(CmpInfo
.Record
->isTriviallyCopyable() &&
1055 "cannot copy non-trivially copyable aggregate");
1057 QualType ArgTy
= E
->getLHS()->getType();
1059 if (!ArgTy
->isIntegralOrEnumerationType() && !ArgTy
->isRealFloatingType() &&
1060 !ArgTy
->isNullPtrType() && !ArgTy
->isPointerType() &&
1061 !ArgTy
->isMemberPointerType() && !ArgTy
->isAnyComplexType()) {
1062 return CGF
.ErrorUnsupported(E
, "aggregate three-way comparison");
1064 bool IsComplex
= ArgTy
->isAnyComplexType();
1066 // Evaluate the operands to the expression and extract their values.
1067 auto EmitOperand
= [&](Expr
*E
) -> std::pair
<Value
*, Value
*> {
1068 RValue RV
= CGF
.EmitAnyExpr(E
);
1070 return {RV
.getScalarVal(), nullptr};
1071 if (RV
.isAggregate())
1072 return {RV
.getAggregatePointer(E
->getType(), CGF
), nullptr};
1073 assert(RV
.isComplex());
1074 return RV
.getComplexVal();
1076 auto LHSValues
= EmitOperand(E
->getLHS()),
1077 RHSValues
= EmitOperand(E
->getRHS());
1079 auto EmitCmp
= [&](CompareKind K
) {
1080 Value
*Cmp
= EmitCompare(Builder
, CGF
, E
, LHSValues
.first
, RHSValues
.first
,
1081 K
, IsComplex
? ".r" : "");
1084 assert(K
== CompareKind::CK_Equal
);
1085 Value
*CmpImag
= EmitCompare(Builder
, CGF
, E
, LHSValues
.second
,
1086 RHSValues
.second
, K
, ".i");
1087 return Builder
.CreateAnd(Cmp
, CmpImag
, "and.eq");
1089 auto EmitCmpRes
= [&](const ComparisonCategoryInfo::ValueInfo
*VInfo
) {
1090 return Builder
.getInt(VInfo
->getIntValue());
1094 if (ArgTy
->isNullPtrType()) {
1095 Select
= EmitCmpRes(CmpInfo
.getEqualOrEquiv());
1096 } else if (!CmpInfo
.isPartial()) {
1098 Builder
.CreateSelect(EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()),
1099 EmitCmpRes(CmpInfo
.getGreater()), "sel.lt");
1100 Select
= Builder
.CreateSelect(EmitCmp(CK_Equal
),
1101 EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1102 SelectOne
, "sel.eq");
1104 Value
*SelectEq
= Builder
.CreateSelect(
1105 EmitCmp(CK_Equal
), EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1106 EmitCmpRes(CmpInfo
.getUnordered()), "sel.eq");
1107 Value
*SelectGT
= Builder
.CreateSelect(EmitCmp(CK_Greater
),
1108 EmitCmpRes(CmpInfo
.getGreater()),
1109 SelectEq
, "sel.gt");
1110 Select
= Builder
.CreateSelect(
1111 EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()), SelectGT
, "sel.lt");
1113 // Create the return value in the destination slot.
1114 EnsureDest(E
->getType());
1115 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1117 // Emit the address of the first (and only) field in the comparison category
1118 // type, and initialize it from the constant integer value selected above.
1119 LValue FieldLV
= CGF
.EmitLValueForFieldInitialization(
1120 DestLV
, *CmpInfo
.Record
->field_begin());
1121 CGF
.EmitStoreThroughLValue(RValue::get(Select
), FieldLV
, /*IsInit*/ true);
1123 // All done! The result is in the Dest slot.
1126 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator
*E
) {
1127 if (E
->getOpcode() == BO_PtrMemD
|| E
->getOpcode() == BO_PtrMemI
)
1128 VisitPointerToDataMemberBinaryOperator(E
);
1130 CGF
.ErrorUnsupported(E
, "aggregate binary expression");
1133 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1134 const BinaryOperator
*E
) {
1135 LValue LV
= CGF
.EmitPointerToDataMemberBinaryExpr(E
);
1136 EmitFinalDestCopy(E
->getType(), LV
);
1139 /// Is the value of the given expression possibly a reference to or
1140 /// into a __block variable?
1141 static bool isBlockVarRef(const Expr
*E
) {
1142 // Make sure we look through parens.
1143 E
= E
->IgnoreParens();
1145 // Check for a direct reference to a __block variable.
1146 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
1147 const VarDecl
*var
= dyn_cast
<VarDecl
>(DRE
->getDecl());
1148 return (var
&& var
->hasAttr
<BlocksAttr
>());
1151 // More complicated stuff.
1153 // Binary operators.
1154 if (const BinaryOperator
*op
= dyn_cast
<BinaryOperator
>(E
)) {
1155 // For an assignment or pointer-to-member operation, just care
1157 if (op
->isAssignmentOp() || op
->isPtrMemOp())
1158 return isBlockVarRef(op
->getLHS());
1160 // For a comma, just care about the RHS.
1161 if (op
->getOpcode() == BO_Comma
)
1162 return isBlockVarRef(op
->getRHS());
1164 // FIXME: pointer arithmetic?
1167 // Check both sides of a conditional operator.
1168 } else if (const AbstractConditionalOperator
*op
1169 = dyn_cast
<AbstractConditionalOperator
>(E
)) {
1170 return isBlockVarRef(op
->getTrueExpr())
1171 || isBlockVarRef(op
->getFalseExpr());
1173 // OVEs are required to support BinaryConditionalOperators.
1174 } else if (const OpaqueValueExpr
*op
1175 = dyn_cast
<OpaqueValueExpr
>(E
)) {
1176 if (const Expr
*src
= op
->getSourceExpr())
1177 return isBlockVarRef(src
);
1179 // Casts are necessary to get things like (*(int*)&var) = foo().
1180 // We don't really care about the kind of cast here, except
1181 // we don't want to look through l2r casts, because it's okay
1182 // to get the *value* in a __block variable.
1183 } else if (const CastExpr
*cast
= dyn_cast
<CastExpr
>(E
)) {
1184 if (cast
->getCastKind() == CK_LValueToRValue
)
1186 return isBlockVarRef(cast
->getSubExpr());
1188 // Handle unary operators. Again, just aggressively look through
1189 // it, ignoring the operation.
1190 } else if (const UnaryOperator
*uop
= dyn_cast
<UnaryOperator
>(E
)) {
1191 return isBlockVarRef(uop
->getSubExpr());
1193 // Look into the base of a field access.
1194 } else if (const MemberExpr
*mem
= dyn_cast
<MemberExpr
>(E
)) {
1195 return isBlockVarRef(mem
->getBase());
1197 // Look into the base of a subscript.
1198 } else if (const ArraySubscriptExpr
*sub
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
1199 return isBlockVarRef(sub
->getBase());
1205 void AggExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
1206 // For an assignment to work, the value on the right has
1207 // to be compatible with the value on the left.
1208 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getLHS()->getType(),
1209 E
->getRHS()->getType())
1210 && "Invalid assignment");
1212 // If the LHS might be a __block variable, and the RHS can
1213 // potentially cause a block copy, we need to evaluate the RHS first
1214 // so that the assignment goes the right place.
1215 // This is pretty semantically fragile.
1216 if (isBlockVarRef(E
->getLHS()) &&
1217 E
->getRHS()->HasSideEffects(CGF
.getContext())) {
1218 // Ensure that we have a destination, and evaluate the RHS into that.
1219 EnsureDest(E
->getRHS()->getType());
1222 // Now emit the LHS and copy into it.
1223 LValue LHS
= CGF
.EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
1225 // That copy is an atomic copy if the LHS is atomic.
1226 if (LHS
.getType()->isAtomicType() ||
1227 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1228 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1232 EmitCopy(E
->getLHS()->getType(),
1233 AggValueSlot::forLValue(LHS
, AggValueSlot::IsDestructed
,
1234 needsGC(E
->getLHS()->getType()),
1235 AggValueSlot::IsAliased
,
1236 AggValueSlot::MayOverlap
),
1241 LValue LHS
= CGF
.EmitLValue(E
->getLHS());
1243 // If we have an atomic type, evaluate into the destination and then
1244 // do an atomic copy.
1245 if (LHS
.getType()->isAtomicType() ||
1246 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1247 EnsureDest(E
->getRHS()->getType());
1249 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1253 // Codegen the RHS so that it stores directly into the LHS.
1254 AggValueSlot LHSSlot
= AggValueSlot::forLValue(
1255 LHS
, AggValueSlot::IsDestructed
, needsGC(E
->getLHS()->getType()),
1256 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
1257 // A non-volatile aggregate destination might have volatile member.
1258 if (!LHSSlot
.isVolatile() &&
1259 CGF
.hasVolatileMember(E
->getLHS()->getType()))
1260 LHSSlot
.setVolatile(true);
1262 CGF
.EmitAggExpr(E
->getRHS(), LHSSlot
);
1264 // Copy into the destination if the assignment isn't ignored.
1265 EmitFinalDestCopy(E
->getType(), LHS
);
1267 if (!Dest
.isIgnored() && !Dest
.isExternallyDestructed() &&
1268 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
)
1269 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1273 void AggExprEmitter::
1274 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
1275 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
1276 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
1277 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
1279 // Bind the common expression if necessary.
1280 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
1282 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
1283 CGF
.EmitBranchOnBoolExpr(E
->getCond(), LHSBlock
, RHSBlock
,
1284 CGF
.getProfileCount(E
));
1286 // Save whether the destination's lifetime is externally managed.
1287 bool isExternallyDestructed
= Dest
.isExternallyDestructed();
1288 bool destructNonTrivialCStruct
=
1289 !isExternallyDestructed
&&
1290 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
1291 isExternallyDestructed
|= destructNonTrivialCStruct
;
1292 Dest
.setExternallyDestructed(isExternallyDestructed
);
1295 CGF
.EmitBlock(LHSBlock
);
1296 if (llvm::EnableSingleByteCoverage
)
1297 CGF
.incrementProfileCounter(E
->getTrueExpr());
1299 CGF
.incrementProfileCounter(E
);
1300 Visit(E
->getTrueExpr());
1303 assert(CGF
.HaveInsertPoint() && "expression evaluation ended with no IP!");
1304 CGF
.Builder
.CreateBr(ContBlock
);
1306 // If the result of an agg expression is unused, then the emission
1307 // of the LHS might need to create a destination slot. That's fine
1308 // with us, and we can safely emit the RHS into the same slot, but
1309 // we shouldn't claim that it's already being destructed.
1310 Dest
.setExternallyDestructed(isExternallyDestructed
);
1313 CGF
.EmitBlock(RHSBlock
);
1314 if (llvm::EnableSingleByteCoverage
)
1315 CGF
.incrementProfileCounter(E
->getFalseExpr());
1316 Visit(E
->getFalseExpr());
1319 if (destructNonTrivialCStruct
)
1320 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1323 CGF
.EmitBlock(ContBlock
);
1324 if (llvm::EnableSingleByteCoverage
)
1325 CGF
.incrementProfileCounter(E
);
1328 void AggExprEmitter::VisitChooseExpr(const ChooseExpr
*CE
) {
1329 Visit(CE
->getChosenSubExpr());
1332 void AggExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
1333 Address ArgValue
= Address::invalid();
1334 CGF
.EmitVAArg(VE
, ArgValue
, Dest
);
1336 // If EmitVAArg fails, emit an error.
1337 if (!ArgValue
.isValid()) {
1338 CGF
.ErrorUnsupported(VE
, "aggregate va_arg expression");
1343 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
) {
1344 // Ensure that we have a slot, but if we already do, remember
1345 // whether it was externally destructed.
1346 bool wasExternallyDestructed
= Dest
.isExternallyDestructed();
1347 EnsureDest(E
->getType());
1349 // We're going to push a destructor if there isn't already one.
1350 Dest
.setExternallyDestructed();
1352 Visit(E
->getSubExpr());
1354 // Push that destructor we promised.
1355 if (!wasExternallyDestructed
)
1356 CGF
.EmitCXXTemporary(E
->getTemporary(), E
->getType(), Dest
.getAddress());
1360 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr
*E
) {
1361 AggValueSlot Slot
= EnsureSlot(E
->getType());
1362 CGF
.EmitCXXConstructExpr(E
, Slot
);
1365 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1366 const CXXInheritedCtorInitExpr
*E
) {
1367 AggValueSlot Slot
= EnsureSlot(E
->getType());
1368 CGF
.EmitInheritedCXXConstructorCall(
1369 E
->getConstructor(), E
->constructsVBase(), Slot
.getAddress(),
1370 E
->inheritedFromVBase(), E
);
1374 AggExprEmitter::VisitLambdaExpr(LambdaExpr
*E
) {
1375 AggValueSlot Slot
= EnsureSlot(E
->getType());
1376 LValue SlotLV
= CGF
.MakeAddrLValue(Slot
.getAddress(), E
->getType());
1378 // We'll need to enter cleanup scopes in case any of the element
1379 // initializers throws an exception or contains branch out of the expressions.
1380 CodeGenFunction::CleanupDeactivationScope
scope(CGF
);
1382 CXXRecordDecl::field_iterator CurField
= E
->getLambdaClass()->field_begin();
1383 for (LambdaExpr::const_capture_init_iterator i
= E
->capture_init_begin(),
1384 e
= E
->capture_init_end();
1385 i
!= e
; ++i
, ++CurField
) {
1386 // Emit initialization
1387 LValue LV
= CGF
.EmitLValueForFieldInitialization(SlotLV
, *CurField
);
1388 if (CurField
->hasCapturedVLAType()) {
1389 CGF
.EmitLambdaVLACapture(CurField
->getCapturedVLAType(), LV
);
1393 EmitInitializationToLValue(*i
, LV
);
1395 // Push a destructor if necessary.
1396 if (QualType::DestructionKind DtorKind
=
1397 CurField
->getType().isDestructedType()) {
1398 assert(LV
.isSimple());
1400 CGF
.pushDestroyAndDeferDeactivation(NormalAndEHCleanup
, LV
.getAddress(),
1401 CurField
->getType(),
1402 CGF
.getDestroyer(DtorKind
), false);
1407 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
1408 CodeGenFunction::RunCleanupsScope
cleanups(CGF
);
1409 Visit(E
->getSubExpr());
1412 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
) {
1413 QualType T
= E
->getType();
1414 AggValueSlot Slot
= EnsureSlot(T
);
1415 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1418 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
) {
1419 QualType T
= E
->getType();
1420 AggValueSlot Slot
= EnsureSlot(T
);
1421 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1424 /// Determine whether the given cast kind is known to always convert values
1425 /// with all zero bits in their value representation to values with all zero
1426 /// bits in their value representation.
1427 static bool castPreservesZero(const CastExpr
*CE
) {
1428 switch (CE
->getCastKind()) {
1431 case CK_UserDefinedConversion
:
1432 case CK_ConstructorConversion
:
1436 // Conversions between (possibly-complex) integral, (possibly-complex)
1437 // floating-point, and bool.
1438 case CK_BooleanToSignedIntegral
:
1439 case CK_FloatingCast
:
1440 case CK_FloatingComplexCast
:
1441 case CK_FloatingComplexToBoolean
:
1442 case CK_FloatingComplexToIntegralComplex
:
1443 case CK_FloatingComplexToReal
:
1444 case CK_FloatingRealToComplex
:
1445 case CK_FloatingToBoolean
:
1446 case CK_FloatingToIntegral
:
1447 case CK_IntegralCast
:
1448 case CK_IntegralComplexCast
:
1449 case CK_IntegralComplexToBoolean
:
1450 case CK_IntegralComplexToFloatingComplex
:
1451 case CK_IntegralComplexToReal
:
1452 case CK_IntegralRealToComplex
:
1453 case CK_IntegralToBoolean
:
1454 case CK_IntegralToFloating
:
1455 // Reinterpreting integers as pointers and vice versa.
1456 case CK_IntegralToPointer
:
1457 case CK_PointerToIntegral
:
1458 // Language extensions.
1459 case CK_VectorSplat
:
1461 case CK_NonAtomicToAtomic
:
1462 case CK_AtomicToNonAtomic
:
1463 case CK_HLSLVectorTruncation
:
1466 case CK_BaseToDerivedMemberPointer
:
1467 case CK_DerivedToBaseMemberPointer
:
1468 case CK_MemberPointerToBoolean
:
1469 case CK_NullToMemberPointer
:
1470 case CK_ReinterpretMemberPointer
:
1471 // FIXME: ABI-dependent.
1474 case CK_AnyPointerToBlockPointerCast
:
1475 case CK_BlockPointerToObjCPointerCast
:
1476 case CK_CPointerToObjCPointerCast
:
1477 case CK_ObjCObjectLValueCast
:
1478 case CK_IntToOCLSampler
:
1479 case CK_ZeroToOCLOpaqueType
:
1480 // FIXME: Check these.
1483 case CK_FixedPointCast
:
1484 case CK_FixedPointToBoolean
:
1485 case CK_FixedPointToFloating
:
1486 case CK_FixedPointToIntegral
:
1487 case CK_FloatingToFixedPoint
:
1488 case CK_IntegralToFixedPoint
:
1489 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1492 case CK_AddressSpaceConversion
:
1493 case CK_BaseToDerived
:
1494 case CK_DerivedToBase
:
1496 case CK_NullToPointer
:
1497 case CK_PointerToBoolean
:
1498 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1499 // same representation in all involved address spaces.
1502 case CK_ARCConsumeObject
:
1503 case CK_ARCExtendBlockObject
:
1504 case CK_ARCProduceObject
:
1505 case CK_ARCReclaimReturnedObject
:
1506 case CK_CopyAndAutoreleaseBlockObject
:
1507 case CK_ArrayToPointerDecay
:
1508 case CK_FunctionToPointerDecay
:
1509 case CK_BuiltinFnToFnPtr
:
1511 case CK_LValueBitCast
:
1512 case CK_LValueToRValue
:
1513 case CK_LValueToRValueBitCast
:
1514 case CK_UncheckedDerivedToBase
:
1515 case CK_HLSLArrayRValue
:
1518 llvm_unreachable("Unhandled clang::CastKind enum");
1521 /// isSimpleZero - If emitting this value will obviously just cause a store of
1522 /// zero to memory, return true. This can return false if uncertain, so it just
1523 /// handles simple cases.
1524 static bool isSimpleZero(const Expr
*E
, CodeGenFunction
&CGF
) {
1525 E
= E
->IgnoreParens();
1526 while (auto *CE
= dyn_cast
<CastExpr
>(E
)) {
1527 if (!castPreservesZero(CE
))
1529 E
= CE
->getSubExpr()->IgnoreParens();
1533 if (const IntegerLiteral
*IL
= dyn_cast
<IntegerLiteral
>(E
))
1534 return IL
->getValue() == 0;
1536 if (const FloatingLiteral
*FL
= dyn_cast
<FloatingLiteral
>(E
))
1537 return FL
->getValue().isPosZero();
1539 if ((isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) &&
1540 CGF
.getTypes().isZeroInitializable(E
->getType()))
1542 // (int*)0 - Null pointer expressions.
1543 if (const CastExpr
*ICE
= dyn_cast
<CastExpr
>(E
))
1544 return ICE
->getCastKind() == CK_NullToPointer
&&
1545 CGF
.getTypes().isPointerZeroInitializable(E
->getType()) &&
1546 !E
->HasSideEffects(CGF
.getContext());
1548 if (const CharacterLiteral
*CL
= dyn_cast
<CharacterLiteral
>(E
))
1549 return CL
->getValue() == 0;
1551 // Otherwise, hard case: conservatively return false.
1557 AggExprEmitter::EmitInitializationToLValue(Expr
*E
, LValue LV
) {
1558 QualType type
= LV
.getType();
1559 // FIXME: Ignore result?
1560 // FIXME: Are initializers affected by volatile?
1561 if (Dest
.isZeroed() && isSimpleZero(E
, CGF
)) {
1562 // Storing "i32 0" to a zero'd memory location is a noop.
1564 } else if (isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) {
1565 return EmitNullInitializationToLValue(LV
);
1566 } else if (isa
<NoInitExpr
>(E
)) {
1569 } else if (type
->isReferenceType()) {
1570 RValue RV
= CGF
.EmitReferenceBindingToExpr(E
);
1571 return CGF
.EmitStoreThroughLValue(RV
, LV
);
1574 CGF
.EmitInitializationToLValue(E
, LV
, Dest
.isZeroed());
1577 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv
) {
1578 QualType type
= lv
.getType();
1580 // If the destination slot is already zeroed out before the aggregate is
1581 // copied into it, we don't have to emit any zeros here.
1582 if (Dest
.isZeroed() && CGF
.getTypes().isZeroInitializable(type
))
1585 if (CGF
.hasScalarEvaluationKind(type
)) {
1586 // For non-aggregates, we can store the appropriate null constant.
1587 llvm::Value
*null
= CGF
.CGM
.EmitNullConstant(type
);
1588 // Note that the following is not equivalent to
1589 // EmitStoreThroughBitfieldLValue for ARC types.
1590 if (lv
.isBitField()) {
1591 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(null
), lv
);
1593 assert(lv
.isSimple());
1594 CGF
.EmitStoreOfScalar(null
, lv
, /* isInitialization */ true);
1597 // There's a potential optimization opportunity in combining
1598 // memsets; that would be easy for arrays, but relatively
1599 // difficult for structures with the current code.
1600 CGF
.EmitNullInitialization(lv
.getAddress(), lv
.getType());
1604 void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
) {
1605 VisitCXXParenListOrInitListExpr(E
, E
->getInitExprs(),
1606 E
->getInitializedFieldInUnion(),
1607 E
->getArrayFiller());
1610 void AggExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
1611 if (E
->hadArrayRangeDesignator())
1612 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
1614 if (E
->isTransparent())
1615 return Visit(E
->getInit(0));
1617 VisitCXXParenListOrInitListExpr(
1618 E
, E
->inits(), E
->getInitializedFieldInUnion(), E
->getArrayFiller());
1621 void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1622 Expr
*ExprToVisit
, ArrayRef
<Expr
*> InitExprs
,
1623 FieldDecl
*InitializedFieldInUnion
, Expr
*ArrayFiller
) {
1625 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1626 // (Length of globals? Chunks of zeroed-out space?).
1628 // If we can, prefer a copy from a global; this is a lot less code for long
1629 // globals, and it's easier for the current optimizers to analyze.
1630 if (llvm::Constant
*C
=
1631 CGF
.CGM
.EmitConstantExpr(ExprToVisit
, ExprToVisit
->getType(), &CGF
)) {
1632 llvm::GlobalVariable
* GV
=
1633 new llvm::GlobalVariable(CGF
.CGM
.getModule(), C
->getType(), true,
1634 llvm::GlobalValue::InternalLinkage
, C
, "");
1635 EmitFinalDestCopy(ExprToVisit
->getType(),
1636 CGF
.MakeAddrLValue(GV
, ExprToVisit
->getType()));
1641 AggValueSlot Dest
= EnsureSlot(ExprToVisit
->getType());
1643 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), ExprToVisit
->getType());
1645 // Handle initialization of an array.
1646 if (ExprToVisit
->getType()->isConstantArrayType()) {
1647 auto AType
= cast
<llvm::ArrayType
>(Dest
.getAddress().getElementType());
1648 EmitArrayInit(Dest
.getAddress(), AType
, ExprToVisit
->getType(), ExprToVisit
,
1649 InitExprs
, ArrayFiller
);
1651 } else if (ExprToVisit
->getType()->isVariableArrayType()) {
1652 // A variable array type that has an initializer can only do empty
1653 // initialization. And because this feature is not exposed as an extension
1654 // in C++, we can safely memset the array memory to zero.
1655 assert(InitExprs
.size() == 0 &&
1656 "you can only use an empty initializer with VLAs");
1657 CGF
.EmitNullInitialization(Dest
.getAddress(), ExprToVisit
->getType());
1661 assert(ExprToVisit
->getType()->isRecordType() &&
1662 "Only support structs/unions here!");
1664 // Do struct initialization; this code just sets each individual member
1665 // to the approprate value. This makes bitfield support automatic;
1666 // the disadvantage is that the generated code is more difficult for
1667 // the optimizer, especially with bitfields.
1668 unsigned NumInitElements
= InitExprs
.size();
1669 RecordDecl
*record
= ExprToVisit
->getType()->castAs
<RecordType
>()->getDecl();
1671 // We'll need to enter cleanup scopes in case any of the element
1672 // initializers throws an exception.
1673 SmallVector
<EHScopeStack::stable_iterator
, 16> cleanups
;
1674 CodeGenFunction::CleanupDeactivationScope
DeactivateCleanups(CGF
);
1676 unsigned curInitIndex
= 0;
1678 // Emit initialization of base classes.
1679 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(record
)) {
1680 assert(NumInitElements
>= CXXRD
->getNumBases() &&
1681 "missing initializer for base class");
1682 for (auto &Base
: CXXRD
->bases()) {
1683 assert(!Base
.isVirtual() && "should not see vbases here");
1684 auto *BaseRD
= Base
.getType()->getAsCXXRecordDecl();
1685 Address V
= CGF
.GetAddressOfDirectBaseInCompleteClass(
1686 Dest
.getAddress(), CXXRD
, BaseRD
,
1687 /*isBaseVirtual*/ false);
1688 AggValueSlot AggSlot
= AggValueSlot::forAddr(
1690 AggValueSlot::IsDestructed
,
1691 AggValueSlot::DoesNotNeedGCBarriers
,
1692 AggValueSlot::IsNotAliased
,
1693 CGF
.getOverlapForBaseInit(CXXRD
, BaseRD
, Base
.isVirtual()));
1694 CGF
.EmitAggExpr(InitExprs
[curInitIndex
++], AggSlot
);
1696 if (QualType::DestructionKind dtorKind
=
1697 Base
.getType().isDestructedType())
1698 CGF
.pushDestroyAndDeferDeactivation(dtorKind
, V
, Base
.getType());
1702 // Prepare a 'this' for CXXDefaultInitExprs.
1703 CodeGenFunction::FieldConstructionScope
FCS(CGF
, Dest
.getAddress());
1705 const bool ZeroInitPadding
=
1706 CGF
.CGM
.shouldZeroInitPadding() && !Dest
.isZeroed();
1708 if (record
->isUnion()) {
1709 // Only initialize one field of a union. The field itself is
1710 // specified by the initializer list.
1711 if (!InitializedFieldInUnion
) {
1712 // Empty union; we have nothing to do.
1715 // Make sure that it's really an empty and not a failure of
1716 // semantic analysis.
1717 for (const auto *Field
: record
->fields())
1719 (Field
->isUnnamedBitField() || Field
->isAnonymousStructOrUnion()) &&
1720 "Only unnamed bitfields or anonymous class allowed");
1725 // FIXME: volatility
1726 FieldDecl
*Field
= InitializedFieldInUnion
;
1728 LValue FieldLoc
= CGF
.EmitLValueForFieldInitialization(DestLV
, Field
);
1729 if (NumInitElements
) {
1730 // Store the initializer into the field
1731 EmitInitializationToLValue(InitExprs
[0], FieldLoc
);
1732 if (ZeroInitPadding
) {
1733 uint64_t TotalSize
= CGF
.getContext().toBits(
1734 Dest
.getPreferredSize(CGF
.getContext(), DestLV
.getType()));
1735 uint64_t FieldSize
= CGF
.getContext().getTypeSize(FieldLoc
.getType());
1736 DoZeroInitPadding(FieldSize
, TotalSize
, nullptr);
1739 // Default-initialize to null.
1740 if (ZeroInitPadding
)
1741 EmitNullInitializationToLValue(DestLV
);
1743 EmitNullInitializationToLValue(FieldLoc
);
1748 // Here we iterate over the fields; this makes it simpler to both
1749 // default-initialize fields and skip over unnamed fields.
1750 const ASTRecordLayout
&Layout
= CGF
.getContext().getASTRecordLayout(record
);
1751 uint64_t PaddingStart
= 0;
1753 for (const auto *field
: record
->fields()) {
1754 // We're done once we hit the flexible array member.
1755 if (field
->getType()->isIncompleteArrayType())
1758 // Always skip anonymous bitfields.
1759 if (field
->isUnnamedBitField())
1762 // We're done if we reach the end of the explicit initializers, we
1763 // have a zeroed object, and the rest of the fields are
1764 // zero-initializable.
1765 if (curInitIndex
== NumInitElements
&& Dest
.isZeroed() &&
1766 CGF
.getTypes().isZeroInitializable(ExprToVisit
->getType()))
1769 if (ZeroInitPadding
)
1770 DoZeroInitPadding(PaddingStart
,
1771 Layout
.getFieldOffset(field
->getFieldIndex()), field
);
1773 LValue LV
= CGF
.EmitLValueForFieldInitialization(DestLV
, field
);
1774 // We never generate write-barries for initialized fields.
1777 if (curInitIndex
< NumInitElements
) {
1778 // Store the initializer into the field.
1779 EmitInitializationToLValue(InitExprs
[curInitIndex
++], LV
);
1781 // We're out of initializers; default-initialize to null
1782 EmitNullInitializationToLValue(LV
);
1785 // Push a destructor if necessary.
1786 // FIXME: if we have an array of structures, all explicitly
1787 // initialized, we can end up pushing a linear number of cleanups.
1788 if (QualType::DestructionKind dtorKind
1789 = field
->getType().isDestructedType()) {
1790 assert(LV
.isSimple());
1792 CGF
.pushDestroyAndDeferDeactivation(NormalAndEHCleanup
, LV
.getAddress(),
1794 CGF
.getDestroyer(dtorKind
), false);
1798 if (ZeroInitPadding
) {
1799 uint64_t TotalSize
= CGF
.getContext().toBits(
1800 Dest
.getPreferredSize(CGF
.getContext(), DestLV
.getType()));
1801 DoZeroInitPadding(PaddingStart
, TotalSize
, nullptr);
1805 void AggExprEmitter::DoZeroInitPadding(uint64_t &PaddingStart
,
1806 uint64_t PaddingEnd
,
1807 const FieldDecl
*NextField
) {
1809 auto InitBytes
= [&](uint64_t StartBit
, uint64_t EndBit
) {
1810 CharUnits Start
= CGF
.getContext().toCharUnitsFromBits(StartBit
);
1811 CharUnits End
= CGF
.getContext().toCharUnitsFromBits(EndBit
);
1812 Address Addr
= Dest
.getAddress().withElementType(CGF
.CharTy
);
1813 if (!Start
.isZero())
1814 Addr
= Builder
.CreateConstGEP(Addr
, Start
.getQuantity());
1815 llvm::Constant
*SizeVal
= Builder
.getInt64((End
- Start
).getQuantity());
1816 CGF
.Builder
.CreateMemSet(Addr
, Builder
.getInt8(0), SizeVal
, false);
1819 if (NextField
!= nullptr && NextField
->isBitField()) {
1820 // For bitfield, zero init StorageSize before storing the bits. So we don't
1821 // need to handle big/little endian.
1822 const CGRecordLayout
&RL
=
1823 CGF
.getTypes().getCGRecordLayout(NextField
->getParent());
1824 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(NextField
);
1825 uint64_t StorageStart
= CGF
.getContext().toBits(Info
.StorageOffset
);
1826 if (StorageStart
+ Info
.StorageSize
> PaddingStart
) {
1827 if (StorageStart
> PaddingStart
)
1828 InitBytes(PaddingStart
, StorageStart
);
1829 Address Addr
= Dest
.getAddress();
1830 if (!Info
.StorageOffset
.isZero())
1831 Addr
= Builder
.CreateConstGEP(Addr
.withElementType(CGF
.CharTy
),
1832 Info
.StorageOffset
.getQuantity());
1833 Addr
= Addr
.withElementType(
1834 llvm::Type::getIntNTy(CGF
.getLLVMContext(), Info
.StorageSize
));
1835 Builder
.CreateStore(Builder
.getIntN(Info
.StorageSize
, 0), Addr
);
1836 PaddingStart
= StorageStart
+ Info
.StorageSize
;
1841 if (PaddingStart
< PaddingEnd
)
1842 InitBytes(PaddingStart
, PaddingEnd
);
1843 if (NextField
!= nullptr)
1845 PaddingEnd
+ CGF
.getContext().getTypeSize(NextField
->getType());
1848 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
1849 llvm::Value
*outerBegin
) {
1850 // Emit the common subexpression.
1851 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
->getCommonExpr());
1853 Address destPtr
= EnsureSlot(E
->getType()).getAddress();
1854 uint64_t numElements
= E
->getArraySize().getZExtValue();
1859 // destPtr is an array*. Construct an elementType* by drilling down a level.
1860 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
1861 llvm::Value
*indices
[] = {zero
, zero
};
1862 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(destPtr
.getElementType(),
1863 destPtr
.emitRawPointer(CGF
),
1864 indices
, "arrayinit.begin");
1866 // Prepare to special-case multidimensional array initialization: we avoid
1867 // emitting multiple destructor loops in that case.
1870 ArrayInitLoopExpr
*InnerLoop
= dyn_cast
<ArrayInitLoopExpr
>(E
->getSubExpr());
1872 QualType elementType
=
1873 CGF
.getContext().getAsArrayType(E
->getType())->getElementType();
1874 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
1875 CharUnits elementAlign
=
1876 destPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
1877 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
1879 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
1880 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
1882 // Jump into the body.
1883 CGF
.EmitBlock(bodyBB
);
1884 llvm::PHINode
*index
=
1885 Builder
.CreatePHI(zero
->getType(), 2, "arrayinit.index");
1886 index
->addIncoming(zero
, entryBB
);
1887 llvm::Value
*element
=
1888 Builder
.CreateInBoundsGEP(llvmElementType
, begin
, index
);
1890 // Prepare for a cleanup.
1891 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
1892 EHScopeStack::stable_iterator cleanup
;
1893 if (CGF
.needsEHCleanup(dtorKind
) && !InnerLoop
) {
1894 if (outerBegin
->getType() != element
->getType())
1895 outerBegin
= Builder
.CreateBitCast(outerBegin
, element
->getType());
1896 CGF
.pushRegularPartialArrayCleanup(outerBegin
, element
, elementType
,
1898 CGF
.getDestroyer(dtorKind
));
1899 cleanup
= CGF
.EHStack
.stable_begin();
1901 dtorKind
= QualType::DK_none
;
1904 // Emit the actual filler expression.
1906 // Temporaries created in an array initialization loop are destroyed
1907 // at the end of each iteration.
1908 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
1909 CodeGenFunction::ArrayInitLoopExprScope
Scope(CGF
, index
);
1910 LValue elementLV
= CGF
.MakeAddrLValue(
1911 Address(element
, llvmElementType
, elementAlign
), elementType
);
1914 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1915 auto elementSlot
= AggValueSlot::forLValue(
1916 elementLV
, AggValueSlot::IsDestructed
,
1917 AggValueSlot::DoesNotNeedGCBarriers
, AggValueSlot::IsNotAliased
,
1918 AggValueSlot::DoesNotOverlap
);
1919 AggExprEmitter(CGF
, elementSlot
, false)
1920 .VisitArrayInitLoopExpr(InnerLoop
, outerBegin
);
1922 EmitInitializationToLValue(E
->getSubExpr(), elementLV
);
1925 // Move on to the next element.
1926 llvm::Value
*nextIndex
= Builder
.CreateNUWAdd(
1927 index
, llvm::ConstantInt::get(CGF
.SizeTy
, 1), "arrayinit.next");
1928 index
->addIncoming(nextIndex
, Builder
.GetInsertBlock());
1930 // Leave the loop if we're done.
1931 llvm::Value
*done
= Builder
.CreateICmpEQ(
1932 nextIndex
, llvm::ConstantInt::get(CGF
.SizeTy
, numElements
),
1934 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
1935 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
1937 CGF
.EmitBlock(endBB
);
1939 // Leave the partial-array cleanup if we entered one.
1941 CGF
.DeactivateCleanupBlock(cleanup
, index
);
1944 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
) {
1945 AggValueSlot Dest
= EnsureSlot(E
->getType());
1947 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1948 EmitInitializationToLValue(E
->getBase(), DestLV
);
1949 VisitInitListExpr(E
->getUpdater());
1952 //===----------------------------------------------------------------------===//
1953 // Entry Points into this File
1954 //===----------------------------------------------------------------------===//
1956 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1957 /// non-zero bytes that will be stored when outputting the initializer for the
1958 /// specified initializer expression.
1959 static CharUnits
GetNumNonZeroBytesInInit(const Expr
*E
, CodeGenFunction
&CGF
) {
1960 if (auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(E
))
1961 E
= MTE
->getSubExpr();
1962 E
= E
->IgnoreParenNoopCasts(CGF
.getContext());
1964 // 0 and 0.0 won't require any non-zero stores!
1965 if (isSimpleZero(E
, CGF
)) return CharUnits::Zero();
1967 // If this is an initlist expr, sum up the size of sizes of the (present)
1968 // elements. If this is something weird, assume the whole thing is non-zero.
1969 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
);
1970 while (ILE
&& ILE
->isTransparent())
1971 ILE
= dyn_cast
<InitListExpr
>(ILE
->getInit(0));
1972 if (!ILE
|| !CGF
.getTypes().isZeroInitializable(ILE
->getType()))
1973 return CGF
.getContext().getTypeSizeInChars(E
->getType());
1975 // InitListExprs for structs have to be handled carefully. If there are
1976 // reference members, we need to consider the size of the reference, not the
1977 // referencee. InitListExprs for unions and arrays can't have references.
1978 if (const RecordType
*RT
= E
->getType()->getAs
<RecordType
>()) {
1979 if (!RT
->isUnionType()) {
1980 RecordDecl
*SD
= RT
->getDecl();
1981 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1983 unsigned ILEElement
= 0;
1984 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(SD
))
1985 while (ILEElement
!= CXXRD
->getNumBases())
1987 GetNumNonZeroBytesInInit(ILE
->getInit(ILEElement
++), CGF
);
1988 for (const auto *Field
: SD
->fields()) {
1989 // We're done once we hit the flexible array member or run out of
1990 // InitListExpr elements.
1991 if (Field
->getType()->isIncompleteArrayType() ||
1992 ILEElement
== ILE
->getNumInits())
1994 if (Field
->isUnnamedBitField())
1997 const Expr
*E
= ILE
->getInit(ILEElement
++);
1999 // Reference values are always non-null and have the width of a pointer.
2000 if (Field
->getType()->isReferenceType())
2001 NumNonZeroBytes
+= CGF
.getContext().toCharUnitsFromBits(
2002 CGF
.getTarget().getPointerWidth(LangAS::Default
));
2004 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(E
, CGF
);
2007 return NumNonZeroBytes
;
2011 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
2012 CharUnits NumNonZeroBytes
= CharUnits::Zero();
2013 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
2014 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(ILE
->getInit(i
), CGF
);
2015 return NumNonZeroBytes
;
2018 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
2019 /// zeros in it, emit a memset and avoid storing the individual zeros.
2021 static void CheckAggExprForMemSetUse(AggValueSlot
&Slot
, const Expr
*E
,
2022 CodeGenFunction
&CGF
) {
2023 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2025 if (Slot
.isZeroed() || Slot
.isVolatile() || !Slot
.getAddress().isValid())
2028 // C++ objects with a user-declared constructor don't need zero'ing.
2029 if (CGF
.getLangOpts().CPlusPlus
)
2030 if (const RecordType
*RT
= CGF
.getContext()
2031 .getBaseElementType(E
->getType())->getAs
<RecordType
>()) {
2032 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
2033 if (RD
->hasUserDeclaredConstructor())
2037 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2038 CharUnits Size
= Slot
.getPreferredSize(CGF
.getContext(), E
->getType());
2039 if (Size
<= CharUnits::fromQuantity(16))
2042 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2043 // we prefer to emit memset + individual stores for the rest.
2044 CharUnits NumNonZeroBytes
= GetNumNonZeroBytesInInit(E
, CGF
);
2045 if (NumNonZeroBytes
*4 > Size
)
2048 // Okay, it seems like a good idea to use an initial memset, emit the call.
2049 llvm::Constant
*SizeVal
= CGF
.Builder
.getInt64(Size
.getQuantity());
2051 Address Loc
= Slot
.getAddress().withElementType(CGF
.Int8Ty
);
2052 CGF
.Builder
.CreateMemSet(Loc
, CGF
.Builder
.getInt8(0), SizeVal
, false);
2054 // Tell the AggExprEmitter that the slot is known zero.
2061 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
2062 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
2063 /// the value of the aggregate expression is not needed. If VolatileDest is
2064 /// true, DestPtr cannot be 0.
2065 void CodeGenFunction::EmitAggExpr(const Expr
*E
, AggValueSlot Slot
) {
2066 assert(E
&& hasAggregateEvaluationKind(E
->getType()) &&
2067 "Invalid aggregate expression to emit");
2068 assert((Slot
.getAddress().isValid() || Slot
.isIgnored()) &&
2069 "slot has bits but no address");
2071 // Optimize the slot if possible.
2072 CheckAggExprForMemSetUse(Slot
, E
, *this);
2074 AggExprEmitter(*this, Slot
, Slot
.isIgnored()).Visit(const_cast<Expr
*>(E
));
2077 LValue
CodeGenFunction::EmitAggExprToLValue(const Expr
*E
) {
2078 assert(hasAggregateEvaluationKind(E
->getType()) && "Invalid argument!");
2079 Address Temp
= CreateMemTemp(E
->getType());
2080 LValue LV
= MakeAddrLValue(Temp
, E
->getType());
2081 EmitAggExpr(E
, AggValueSlot::forLValue(LV
, AggValueSlot::IsNotDestructed
,
2082 AggValueSlot::DoesNotNeedGCBarriers
,
2083 AggValueSlot::IsNotAliased
,
2084 AggValueSlot::DoesNotOverlap
));
2088 void CodeGenFunction::EmitAggFinalDestCopy(QualType Type
, AggValueSlot Dest
,
2090 ExprValueKind SrcKind
) {
2091 return AggExprEmitter(*this, Dest
, Dest
.isIgnored())
2092 .EmitFinalDestCopy(Type
, Src
, SrcKind
);
2095 AggValueSlot::Overlap_t
2096 CodeGenFunction::getOverlapForFieldInit(const FieldDecl
*FD
) {
2097 if (!FD
->hasAttr
<NoUniqueAddressAttr
>() || !FD
->getType()->isRecordType())
2098 return AggValueSlot::DoesNotOverlap
;
2100 // Empty fields can overlap earlier fields.
2101 if (FD
->getType()->getAsCXXRecordDecl()->isEmpty())
2102 return AggValueSlot::MayOverlap
;
2104 // If the field lies entirely within the enclosing class's nvsize, its tail
2105 // padding cannot overlap any already-initialized object. (The only subobjects
2106 // with greater addresses that might already be initialized are vbases.)
2107 const RecordDecl
*ClassRD
= FD
->getParent();
2108 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(ClassRD
);
2109 if (Layout
.getFieldOffset(FD
->getFieldIndex()) +
2110 getContext().getTypeSize(FD
->getType()) <=
2111 (uint64_t)getContext().toBits(Layout
.getNonVirtualSize()))
2112 return AggValueSlot::DoesNotOverlap
;
2114 // The tail padding may contain values we need to preserve.
2115 return AggValueSlot::MayOverlap
;
2118 AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForBaseInit(
2119 const CXXRecordDecl
*RD
, const CXXRecordDecl
*BaseRD
, bool IsVirtual
) {
2120 // If the most-derived object is a field declared with [[no_unique_address]],
2121 // the tail padding of any virtual base could be reused for other subobjects
2122 // of that field's class.
2124 return AggValueSlot::MayOverlap
;
2126 // Empty bases can overlap earlier bases.
2127 if (BaseRD
->isEmpty())
2128 return AggValueSlot::MayOverlap
;
2130 // If the base class is laid out entirely within the nvsize of the derived
2131 // class, its tail padding cannot yet be initialized, so we can issue
2132 // stores at the full width of the base class.
2133 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(RD
);
2134 if (Layout
.getBaseClassOffset(BaseRD
) +
2135 getContext().getASTRecordLayout(BaseRD
).getSize() <=
2136 Layout
.getNonVirtualSize())
2137 return AggValueSlot::DoesNotOverlap
;
2139 // The tail padding may contain values we need to preserve.
2140 return AggValueSlot::MayOverlap
;
2143 void CodeGenFunction::EmitAggregateCopy(LValue Dest
, LValue Src
, QualType Ty
,
2144 AggValueSlot::Overlap_t MayOverlap
,
2146 assert(!Ty
->isAnyComplexType() && "Shouldn't happen for complex");
2148 Address DestPtr
= Dest
.getAddress();
2149 Address SrcPtr
= Src
.getAddress();
2151 if (getLangOpts().CPlusPlus
) {
2152 if (const RecordType
*RT
= Ty
->getAs
<RecordType
>()) {
2153 CXXRecordDecl
*Record
= cast
<CXXRecordDecl
>(RT
->getDecl());
2154 assert((Record
->hasTrivialCopyConstructor() ||
2155 Record
->hasTrivialCopyAssignment() ||
2156 Record
->hasTrivialMoveConstructor() ||
2157 Record
->hasTrivialMoveAssignment() ||
2158 Record
->hasAttr
<TrivialABIAttr
>() || Record
->isUnion()) &&
2159 "Trying to aggregate-copy a type without a trivial copy/move "
2160 "constructor or assignment operator");
2161 // Ignore empty classes in C++.
2162 if (Record
->isEmpty())
2167 if (getLangOpts().CUDAIsDevice
) {
2168 if (Ty
->isCUDADeviceBuiltinSurfaceType()) {
2169 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest
,
2172 } else if (Ty
->isCUDADeviceBuiltinTextureType()) {
2173 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest
,
2179 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2180 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2181 // read from another object that overlaps in anyway the storage of the first
2182 // object, then the overlap shall be exact and the two objects shall have
2183 // qualified or unqualified versions of a compatible type."
2185 // memcpy is not defined if the source and destination pointers are exactly
2186 // equal, but other compilers do this optimization, and almost every memcpy
2187 // implementation handles this case safely. If there is a libc that does not
2188 // safely handle this, we can add a target hook.
2190 // Get data size info for this aggregate. Don't copy the tail padding if this
2191 // might be a potentially-overlapping subobject, since the tail padding might
2192 // be occupied by a different object. Otherwise, copying it is fine.
2193 TypeInfoChars TypeInfo
;
2195 TypeInfo
= getContext().getTypeInfoDataSizeInChars(Ty
);
2197 TypeInfo
= getContext().getTypeInfoInChars(Ty
);
2199 llvm::Value
*SizeVal
= nullptr;
2200 if (TypeInfo
.Width
.isZero()) {
2201 // But note that getTypeInfo returns 0 for a VLA.
2202 if (auto *VAT
= dyn_cast_or_null
<VariableArrayType
>(
2203 getContext().getAsArrayType(Ty
))) {
2205 SizeVal
= emitArrayLength(VAT
, BaseEltTy
, DestPtr
);
2206 TypeInfo
= getContext().getTypeInfoInChars(BaseEltTy
);
2207 assert(!TypeInfo
.Width
.isZero());
2208 SizeVal
= Builder
.CreateNUWMul(
2210 llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity()));
2214 SizeVal
= llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity());
2217 // FIXME: If we have a volatile struct, the optimizer can remove what might
2218 // appear to be `extra' memory ops:
2220 // volatile struct { int i; } a, b;
2227 // we need to use a different call here. We use isVolatile to indicate when
2228 // either the source or the destination is volatile.
2230 DestPtr
= DestPtr
.withElementType(Int8Ty
);
2231 SrcPtr
= SrcPtr
.withElementType(Int8Ty
);
2233 // Don't do any of the memmove_collectable tests if GC isn't set.
2234 if (CGM
.getLangOpts().getGC() == LangOptions::NonGC
) {
2236 } else if (const RecordType
*RecordTy
= Ty
->getAs
<RecordType
>()) {
2237 RecordDecl
*Record
= RecordTy
->getDecl();
2238 if (Record
->hasObjectMember()) {
2239 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2243 } else if (Ty
->isArrayType()) {
2244 QualType BaseType
= getContext().getBaseElementType(Ty
);
2245 if (const RecordType
*RecordTy
= BaseType
->getAs
<RecordType
>()) {
2246 if (RecordTy
->getDecl()->hasObjectMember()) {
2247 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2254 auto Inst
= Builder
.CreateMemCpy(DestPtr
, SrcPtr
, SizeVal
, isVolatile
);
2256 // Determine the metadata to describe the position of any padding in this
2257 // memcpy, as well as the TBAA tags for the members of the struct, in case
2258 // the optimizer wishes to expand it in to scalar memory operations.
2259 if (llvm::MDNode
*TBAAStructTag
= CGM
.getTBAAStructInfo(Ty
))
2260 Inst
->setMetadata(llvm::LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
2262 if (CGM
.getCodeGenOpts().NewStructPathTBAA
) {
2263 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForMemoryTransfer(
2264 Dest
.getTBAAInfo(), Src
.getTBAAInfo());
2265 CGM
.DecorateInstructionWithTBAA(Inst
, TBAAInfo
);