1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclTemplate.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Intrinsics.h"
29 using namespace clang
;
30 using namespace CodeGen
;
32 //===----------------------------------------------------------------------===//
33 // Aggregate Expression Emitter
34 //===----------------------------------------------------------------------===//
37 class AggExprEmitter
: public StmtVisitor
<AggExprEmitter
> {
43 AggValueSlot
EnsureSlot(QualType T
) {
44 if (!Dest
.isIgnored()) return Dest
;
45 return CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
47 void EnsureDest(QualType T
) {
48 if (!Dest
.isIgnored()) return;
49 Dest
= CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
52 // Calls `Fn` with a valid return value slot, potentially creating a temporary
53 // to do so. If a temporary is created, an appropriate copy into `Dest` will
54 // be emitted, as will lifetime markers.
56 // The given function should take a ReturnValueSlot, and return an RValue that
57 // points to said slot.
58 void withReturnValueSlot(const Expr
*E
,
59 llvm::function_ref
<RValue(ReturnValueSlot
)> Fn
);
62 AggExprEmitter(CodeGenFunction
&cgf
, AggValueSlot Dest
, bool IsResultUnused
)
63 : CGF(cgf
), Builder(CGF
.Builder
), Dest(Dest
),
64 IsResultUnused(IsResultUnused
) { }
66 //===--------------------------------------------------------------------===//
68 //===--------------------------------------------------------------------===//
70 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71 /// represents a value lvalue, this method emits the address of the lvalue,
72 /// then loads the result into DestPtr.
73 void EmitAggLoadOfLValue(const Expr
*E
);
80 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
81 /// SrcIsRValue is true if source comes from an RValue.
82 void EmitFinalDestCopy(QualType type
, const LValue
&src
,
83 ExprValueKind SrcValueKind
= EVK_NonRValue
);
84 void EmitFinalDestCopy(QualType type
, RValue src
);
85 void EmitCopy(QualType type
, const AggValueSlot
&dest
,
86 const AggValueSlot
&src
);
88 void EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
, QualType ArrayQTy
,
89 Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
92 AggValueSlot::NeedsGCBarriers_t
needsGC(QualType T
) {
93 if (CGF
.getLangOpts().getGC() && TypeRequiresGCollection(T
))
94 return AggValueSlot::NeedsGCBarriers
;
95 return AggValueSlot::DoesNotNeedGCBarriers
;
98 bool TypeRequiresGCollection(QualType T
);
100 //===--------------------------------------------------------------------===//
102 //===--------------------------------------------------------------------===//
104 void Visit(Expr
*E
) {
105 ApplyDebugLocation
DL(CGF
, E
);
106 StmtVisitor
<AggExprEmitter
>::Visit(E
);
109 void VisitStmt(Stmt
*S
) {
110 CGF
.ErrorUnsupported(S
, "aggregate expression");
112 void VisitParenExpr(ParenExpr
*PE
) { Visit(PE
->getSubExpr()); }
113 void VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
114 Visit(GE
->getResultExpr());
116 void VisitCoawaitExpr(CoawaitExpr
*E
) {
117 CGF
.EmitCoawaitExpr(*E
, Dest
, IsResultUnused
);
119 void VisitCoyieldExpr(CoyieldExpr
*E
) {
120 CGF
.EmitCoyieldExpr(*E
, Dest
, IsResultUnused
);
122 void VisitUnaryCoawait(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
123 void VisitUnaryExtension(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
124 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
125 return Visit(E
->getReplacement());
128 void VisitConstantExpr(ConstantExpr
*E
) {
129 EnsureDest(E
->getType());
131 if (llvm::Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
132 Address StoreDest
= Dest
.getAddress();
133 // The emitted value is guaranteed to have the same size as the
134 // destination but can have a different type. Just do a bitcast in this
135 // case to avoid incorrect GEPs.
136 if (Result
->getType() != StoreDest
.getType())
138 CGF
.Builder
.CreateElementBitCast(StoreDest
, Result
->getType());
139 CGF
.EmitAggregateStore(Result
, StoreDest
,
140 E
->getType().isVolatileQualified());
143 return Visit(E
->getSubExpr());
147 void VisitDeclRefExpr(DeclRefExpr
*E
) { EmitAggLoadOfLValue(E
); }
148 void VisitMemberExpr(MemberExpr
*ME
) { EmitAggLoadOfLValue(ME
); }
149 void VisitUnaryDeref(UnaryOperator
*E
) { EmitAggLoadOfLValue(E
); }
150 void VisitStringLiteral(StringLiteral
*E
) { EmitAggLoadOfLValue(E
); }
151 void VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
);
152 void VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
153 EmitAggLoadOfLValue(E
);
155 void VisitPredefinedExpr(const PredefinedExpr
*E
) {
156 EmitAggLoadOfLValue(E
);
160 void VisitCastExpr(CastExpr
*E
);
161 void VisitCallExpr(const CallExpr
*E
);
162 void VisitStmtExpr(const StmtExpr
*E
);
163 void VisitBinaryOperator(const BinaryOperator
*BO
);
164 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator
*BO
);
165 void VisitBinAssign(const BinaryOperator
*E
);
166 void VisitBinComma(const BinaryOperator
*E
);
167 void VisitBinCmp(const BinaryOperator
*E
);
168 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
169 Visit(E
->getSemanticForm());
172 void VisitObjCMessageExpr(ObjCMessageExpr
*E
);
173 void VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
174 EmitAggLoadOfLValue(E
);
177 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
);
178 void VisitAbstractConditionalOperator(const AbstractConditionalOperator
*CO
);
179 void VisitChooseExpr(const ChooseExpr
*CE
);
180 void VisitInitListExpr(InitListExpr
*E
);
181 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
182 FieldDecl
*InitializedFieldInUnion
,
184 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
185 llvm::Value
*outerBegin
= nullptr);
186 void VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
);
187 void VisitNoInitExpr(NoInitExpr
*E
) { } // Do nothing.
188 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
189 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
190 Visit(DAE
->getExpr());
192 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
193 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
194 Visit(DIE
->getExpr());
196 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
);
197 void VisitCXXConstructExpr(const CXXConstructExpr
*E
);
198 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr
*E
);
199 void VisitLambdaExpr(LambdaExpr
*E
);
200 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
);
201 void VisitExprWithCleanups(ExprWithCleanups
*E
);
202 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
);
203 void VisitCXXTypeidExpr(CXXTypeidExpr
*E
) { EmitAggLoadOfLValue(E
); }
204 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
);
205 void VisitOpaqueValueExpr(OpaqueValueExpr
*E
);
207 void VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
208 if (E
->isGLValue()) {
209 LValue LV
= CGF
.EmitPseudoObjectLValue(E
);
210 return EmitFinalDestCopy(E
->getType(), LV
);
213 AggValueSlot Slot
= EnsureSlot(E
->getType());
214 bool NeedsDestruction
=
215 !Slot
.isExternallyDestructed() &&
216 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
217 if (NeedsDestruction
)
218 Slot
.setExternallyDestructed();
219 CGF
.EmitPseudoObjectRValue(E
, Slot
);
220 if (NeedsDestruction
)
221 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Slot
.getAddress(),
225 void VisitVAArgExpr(VAArgExpr
*E
);
226 void VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
);
227 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
230 void EmitInitializationToLValue(Expr
*E
, LValue Address
);
231 void EmitNullInitializationToLValue(LValue Address
);
232 // case Expr::ChooseExprClass:
233 void VisitCXXThrowExpr(const CXXThrowExpr
*E
) { CGF
.EmitCXXThrowExpr(E
); }
234 void VisitAtomicExpr(AtomicExpr
*E
) {
235 RValue Res
= CGF
.EmitAtomicExpr(E
);
236 EmitFinalDestCopy(E
->getType(), Res
);
239 } // end anonymous namespace.
241 //===----------------------------------------------------------------------===//
243 //===----------------------------------------------------------------------===//
245 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
246 /// represents a value lvalue, this method emits the address of the lvalue,
247 /// then loads the result into DestPtr.
248 void AggExprEmitter::EmitAggLoadOfLValue(const Expr
*E
) {
249 LValue LV
= CGF
.EmitLValue(E
);
251 // If the type of the l-value is atomic, then do an atomic load.
252 if (LV
.getType()->isAtomicType() || CGF
.LValueIsSuitableForInlineAtomic(LV
)) {
253 CGF
.EmitAtomicLoad(LV
, E
->getExprLoc(), Dest
);
257 EmitFinalDestCopy(E
->getType(), LV
);
260 /// True if the given aggregate type requires special GC API calls.
261 bool AggExprEmitter::TypeRequiresGCollection(QualType T
) {
262 // Only record types have members that might require garbage collection.
263 const RecordType
*RecordTy
= T
->getAs
<RecordType
>();
264 if (!RecordTy
) return false;
266 // Don't mess with non-trivial C++ types.
267 RecordDecl
*Record
= RecordTy
->getDecl();
268 if (isa
<CXXRecordDecl
>(Record
) &&
269 (cast
<CXXRecordDecl
>(Record
)->hasNonTrivialCopyConstructor() ||
270 !cast
<CXXRecordDecl
>(Record
)->hasTrivialDestructor()))
273 // Check whether the type has an object member.
274 return Record
->hasObjectMember();
277 void AggExprEmitter::withReturnValueSlot(
278 const Expr
*E
, llvm::function_ref
<RValue(ReturnValueSlot
)> EmitCall
) {
279 QualType RetTy
= E
->getType();
280 bool RequiresDestruction
=
281 !Dest
.isExternallyDestructed() &&
282 RetTy
.isDestructedType() == QualType::DK_nontrivial_c_struct
;
284 // If it makes no observable difference, save a memcpy + temporary.
286 // We need to always provide our own temporary if destruction is required.
287 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
288 // its lifetime before we have the chance to emit a proper destructor call.
289 bool UseTemp
= Dest
.isPotentiallyAliased() || Dest
.requiresGCollection() ||
290 (RequiresDestruction
&& !Dest
.getAddress().isValid());
292 Address RetAddr
= Address::invalid();
293 Address RetAllocaAddr
= Address::invalid();
295 EHScopeStack::stable_iterator LifetimeEndBlock
;
296 llvm::Value
*LifetimeSizePtr
= nullptr;
297 llvm::IntrinsicInst
*LifetimeStartInst
= nullptr;
299 RetAddr
= Dest
.getAddress();
301 RetAddr
= CGF
.CreateMemTemp(RetTy
, "tmp", &RetAllocaAddr
);
302 llvm::TypeSize Size
=
303 CGF
.CGM
.getDataLayout().getTypeAllocSize(CGF
.ConvertTypeForMem(RetTy
));
304 LifetimeSizePtr
= CGF
.EmitLifetimeStart(Size
, RetAllocaAddr
.getPointer());
305 if (LifetimeSizePtr
) {
307 cast
<llvm::IntrinsicInst
>(std::prev(Builder
.GetInsertPoint()));
308 assert(LifetimeStartInst
->getIntrinsicID() ==
309 llvm::Intrinsic::lifetime_start
&&
310 "Last insertion wasn't a lifetime.start?");
312 CGF
.pushFullExprCleanup
<CodeGenFunction::CallLifetimeEnd
>(
313 NormalEHLifetimeMarker
, RetAllocaAddr
, LifetimeSizePtr
);
314 LifetimeEndBlock
= CGF
.EHStack
.stable_begin();
319 EmitCall(ReturnValueSlot(RetAddr
, Dest
.isVolatile(), IsResultUnused
,
320 Dest
.isExternallyDestructed()));
325 assert(Dest
.isIgnored() || Dest
.getPointer() != Src
.getAggregatePointer());
326 EmitFinalDestCopy(E
->getType(), Src
);
328 if (!RequiresDestruction
&& LifetimeStartInst
) {
329 // If there's no dtor to run, the copy was the last use of our temporary.
330 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
332 CGF
.DeactivateCleanupBlock(LifetimeEndBlock
, LifetimeStartInst
);
333 CGF
.EmitLifetimeEnd(LifetimeSizePtr
, RetAllocaAddr
.getPointer());
337 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
338 void AggExprEmitter::EmitFinalDestCopy(QualType type
, RValue src
) {
339 assert(src
.isAggregate() && "value must be aggregate value!");
340 LValue srcLV
= CGF
.MakeAddrLValue(src
.getAggregateAddress(), type
);
341 EmitFinalDestCopy(type
, srcLV
, EVK_RValue
);
344 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
345 void AggExprEmitter::EmitFinalDestCopy(QualType type
, const LValue
&src
,
346 ExprValueKind SrcValueKind
) {
347 // If Dest is ignored, then we're evaluating an aggregate expression
348 // in a context that doesn't care about the result. Note that loads
349 // from volatile l-values force the existence of a non-ignored
351 if (Dest
.isIgnored())
354 // Copy non-trivial C structs here.
355 LValue DstLV
= CGF
.MakeAddrLValue(
356 Dest
.getAddress(), Dest
.isVolatile() ? type
.withVolatile() : type
);
358 if (SrcValueKind
== EVK_RValue
) {
359 if (type
.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct
) {
360 if (Dest
.isPotentiallyAliased())
361 CGF
.callCStructMoveAssignmentOperator(DstLV
, src
);
363 CGF
.callCStructMoveConstructor(DstLV
, src
);
367 if (type
.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct
) {
368 if (Dest
.isPotentiallyAliased())
369 CGF
.callCStructCopyAssignmentOperator(DstLV
, src
);
371 CGF
.callCStructCopyConstructor(DstLV
, src
);
376 AggValueSlot srcAgg
= AggValueSlot::forLValue(
377 src
, CGF
, AggValueSlot::IsDestructed
, needsGC(type
),
378 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
379 EmitCopy(type
, Dest
, srcAgg
);
382 /// Perform a copy from the source into the destination.
384 /// \param type - the type of the aggregate being copied; qualifiers are
386 void AggExprEmitter::EmitCopy(QualType type
, const AggValueSlot
&dest
,
387 const AggValueSlot
&src
) {
388 if (dest
.requiresGCollection()) {
389 CharUnits sz
= dest
.getPreferredSize(CGF
.getContext(), type
);
390 llvm::Value
*size
= llvm::ConstantInt::get(CGF
.SizeTy
, sz
.getQuantity());
391 CGF
.CGM
.getObjCRuntime().EmitGCMemmoveCollectable(CGF
,
398 // If the result of the assignment is used, copy the LHS there also.
399 // It's volatile if either side is. Use the minimum alignment of
401 LValue DestLV
= CGF
.MakeAddrLValue(dest
.getAddress(), type
);
402 LValue SrcLV
= CGF
.MakeAddrLValue(src
.getAddress(), type
);
403 CGF
.EmitAggregateCopy(DestLV
, SrcLV
, type
, dest
.mayOverlap(),
404 dest
.isVolatile() || src
.isVolatile());
407 /// Emit the initializer for a std::initializer_list initialized with a
408 /// real initializer list.
410 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
) {
411 // Emit an array containing the elements. The array is externally destructed
412 // if the std::initializer_list object is.
413 ASTContext
&Ctx
= CGF
.getContext();
414 LValue Array
= CGF
.EmitLValue(E
->getSubExpr());
415 assert(Array
.isSimple() && "initializer_list array not a simple lvalue");
416 Address ArrayPtr
= Array
.getAddress(CGF
);
418 const ConstantArrayType
*ArrayType
=
419 Ctx
.getAsConstantArrayType(E
->getSubExpr()->getType());
420 assert(ArrayType
&& "std::initializer_list constructed from non-array");
422 // FIXME: Perform the checks on the field types in SemaInit.
423 RecordDecl
*Record
= E
->getType()->castAs
<RecordType
>()->getDecl();
424 RecordDecl::field_iterator Field
= Record
->field_begin();
425 if (Field
== Record
->field_end()) {
426 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
431 if (!Field
->getType()->isPointerType() ||
432 !Ctx
.hasSameType(Field
->getType()->getPointeeType(),
433 ArrayType
->getElementType())) {
434 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
438 AggValueSlot Dest
= EnsureSlot(E
->getType());
439 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
440 LValue Start
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
441 llvm::Value
*Zero
= llvm::ConstantInt::get(CGF
.PtrDiffTy
, 0);
442 llvm::Value
*IdxStart
[] = { Zero
, Zero
};
443 llvm::Value
*ArrayStart
= Builder
.CreateInBoundsGEP(
444 ArrayPtr
.getElementType(), ArrayPtr
.getPointer(), IdxStart
, "arraystart");
445 CGF
.EmitStoreThroughLValue(RValue::get(ArrayStart
), Start
);
448 if (Field
== Record
->field_end()) {
449 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
453 llvm::Value
*Size
= Builder
.getInt(ArrayType
->getSize());
454 LValue EndOrLength
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
455 if (Field
->getType()->isPointerType() &&
456 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
457 ArrayType
->getElementType())) {
459 llvm::Value
*IdxEnd
[] = { Zero
, Size
};
460 llvm::Value
*ArrayEnd
= Builder
.CreateInBoundsGEP(
461 ArrayPtr
.getElementType(), ArrayPtr
.getPointer(), IdxEnd
, "arrayend");
462 CGF
.EmitStoreThroughLValue(RValue::get(ArrayEnd
), EndOrLength
);
463 } else if (Ctx
.hasSameType(Field
->getType(), Ctx
.getSizeType())) {
465 CGF
.EmitStoreThroughLValue(RValue::get(Size
), EndOrLength
);
467 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
472 /// Determine if E is a trivial array filler, that is, one that is
473 /// equivalent to zero-initialization.
474 static bool isTrivialFiller(Expr
*E
) {
478 if (isa
<ImplicitValueInitExpr
>(E
))
481 if (auto *ILE
= dyn_cast
<InitListExpr
>(E
)) {
482 if (ILE
->getNumInits())
484 return isTrivialFiller(ILE
->getArrayFiller());
487 if (auto *Cons
= dyn_cast_or_null
<CXXConstructExpr
>(E
))
488 return Cons
->getConstructor()->isDefaultConstructor() &&
489 Cons
->getConstructor()->isTrivial();
491 // FIXME: Are there other cases where we can avoid emitting an initializer?
495 /// Emit initialization of an array from an initializer list. ExprToVisit must
496 /// be either an InitListEpxr a CXXParenInitListExpr.
497 void AggExprEmitter::EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
,
498 QualType ArrayQTy
, Expr
*ExprToVisit
,
499 ArrayRef
<Expr
*> Args
, Expr
*ArrayFiller
) {
500 uint64_t NumInitElements
= Args
.size();
502 uint64_t NumArrayElements
= AType
->getNumElements();
503 assert(NumInitElements
<= NumArrayElements
);
505 QualType elementType
=
506 CGF
.getContext().getAsArrayType(ArrayQTy
)->getElementType();
508 // DestPtr is an array*. Construct an elementType* by drilling
510 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
511 llvm::Value
*indices
[] = { zero
, zero
};
512 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(
513 DestPtr
.getElementType(), DestPtr
.getPointer(), indices
,
516 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
517 CharUnits elementAlign
=
518 DestPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
519 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
521 // Consider initializing the array by copying from a global. For this to be
522 // more efficient than per-element initialization, the size of the elements
523 // with explicit initializers should be large enough.
524 if (NumInitElements
* elementSize
.getQuantity() > 16 &&
525 elementType
.isTriviallyCopyableType(CGF
.getContext())) {
526 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
527 ConstantEmitter
Emitter(CGF
);
528 LangAS AS
= ArrayQTy
.getAddressSpace();
529 if (llvm::Constant
*C
=
530 Emitter
.tryEmitForInitializer(ExprToVisit
, AS
, ArrayQTy
)) {
531 auto GV
= new llvm::GlobalVariable(
532 CGM
.getModule(), C
->getType(),
533 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage
, C
,
535 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal
,
536 CGM
.getContext().getTargetAddressSpace(AS
));
537 Emitter
.finalize(GV
);
538 CharUnits Align
= CGM
.getContext().getTypeAlignInChars(ArrayQTy
);
539 GV
->setAlignment(Align
.getAsAlign());
540 Address
GVAddr(GV
, GV
->getValueType(), Align
);
541 EmitFinalDestCopy(ArrayQTy
, CGF
.MakeAddrLValue(GVAddr
, ArrayQTy
));
546 // Exception safety requires us to destroy all the
547 // already-constructed members if an initializer throws.
548 // For that, we'll need an EH cleanup.
549 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
550 Address endOfInit
= Address::invalid();
551 EHScopeStack::stable_iterator cleanup
;
552 llvm::Instruction
*cleanupDominator
= nullptr;
553 if (CGF
.needsEHCleanup(dtorKind
)) {
554 // In principle we could tell the cleanup where we are more
555 // directly, but the control flow can get so varied here that it
556 // would actually be quite complex. Therefore we go through an
558 endOfInit
= CGF
.CreateTempAlloca(begin
->getType(), CGF
.getPointerAlign(),
559 "arrayinit.endOfInit");
560 cleanupDominator
= Builder
.CreateStore(begin
, endOfInit
);
561 CGF
.pushIrregularPartialArrayCleanup(begin
, endOfInit
, elementType
,
563 CGF
.getDestroyer(dtorKind
));
564 cleanup
= CGF
.EHStack
.stable_begin();
566 // Otherwise, remember that we didn't need a cleanup.
568 dtorKind
= QualType::DK_none
;
571 llvm::Value
*one
= llvm::ConstantInt::get(CGF
.SizeTy
, 1);
573 // The 'current element to initialize'. The invariants on this
574 // variable are complicated. Essentially, after each iteration of
575 // the loop, it points to the last initialized element, except
576 // that it points to the beginning of the array before any
577 // elements have been initialized.
578 llvm::Value
*element
= begin
;
580 // Emit the explicit initializers.
581 for (uint64_t i
= 0; i
!= NumInitElements
; ++i
) {
582 // Advance to the next element.
584 element
= Builder
.CreateInBoundsGEP(
585 llvmElementType
, element
, one
, "arrayinit.element");
587 // Tell the cleanup that it needs to destroy up to this
588 // element. TODO: some of these stores can be trivially
589 // observed to be unnecessary.
590 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
593 LValue elementLV
= CGF
.MakeAddrLValue(
594 Address(element
, llvmElementType
, elementAlign
), elementType
);
595 EmitInitializationToLValue(Args
[i
], elementLV
);
598 // Check whether there's a non-trivial array-fill expression.
599 bool hasTrivialFiller
= isTrivialFiller(ArrayFiller
);
601 // Any remaining elements need to be zero-initialized, possibly
602 // using the filler expression. We can skip this if the we're
603 // emitting to zeroed memory.
604 if (NumInitElements
!= NumArrayElements
&&
605 !(Dest
.isZeroed() && hasTrivialFiller
&&
606 CGF
.getTypes().isZeroInitializable(elementType
))) {
608 // Use an actual loop. This is basically
609 // do { *array++ = filler; } while (array != end);
611 // Advance to the start of the rest of the array.
612 if (NumInitElements
) {
613 element
= Builder
.CreateInBoundsGEP(
614 llvmElementType
, element
, one
, "arrayinit.start");
615 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
618 // Compute the end of the array.
619 llvm::Value
*end
= Builder
.CreateInBoundsGEP(
620 llvmElementType
, begin
,
621 llvm::ConstantInt::get(CGF
.SizeTy
, NumArrayElements
), "arrayinit.end");
623 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
624 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
626 // Jump into the body.
627 CGF
.EmitBlock(bodyBB
);
628 llvm::PHINode
*currentElement
=
629 Builder
.CreatePHI(element
->getType(), 2, "arrayinit.cur");
630 currentElement
->addIncoming(element
, entryBB
);
632 // Emit the actual filler expression.
634 // C++1z [class.temporary]p5:
635 // when a default constructor is called to initialize an element of
636 // an array with no corresponding initializer [...] the destruction of
637 // every temporary created in a default argument is sequenced before
638 // the construction of the next array element, if any
639 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
640 LValue elementLV
= CGF
.MakeAddrLValue(
641 Address(currentElement
, llvmElementType
, elementAlign
), elementType
);
643 EmitInitializationToLValue(ArrayFiller
, elementLV
);
645 EmitNullInitializationToLValue(elementLV
);
648 // Move on to the next element.
649 llvm::Value
*nextElement
= Builder
.CreateInBoundsGEP(
650 llvmElementType
, currentElement
, one
, "arrayinit.next");
652 // Tell the EH cleanup that we finished with the last element.
653 if (endOfInit
.isValid()) Builder
.CreateStore(nextElement
, endOfInit
);
655 // Leave the loop if we're done.
656 llvm::Value
*done
= Builder
.CreateICmpEQ(nextElement
, end
,
658 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
659 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
660 currentElement
->addIncoming(nextElement
, Builder
.GetInsertBlock());
662 CGF
.EmitBlock(endBB
);
665 // Leave the partial-array cleanup if we entered one.
666 if (dtorKind
) CGF
.DeactivateCleanupBlock(cleanup
, cleanupDominator
);
669 //===----------------------------------------------------------------------===//
671 //===----------------------------------------------------------------------===//
673 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
){
674 Visit(E
->getSubExpr());
677 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr
*e
) {
678 // If this is a unique OVE, just visit its source expression.
680 Visit(e
->getSourceExpr());
682 EmitFinalDestCopy(e
->getType(), CGF
.getOrCreateOpaqueLValueMapping(e
));
686 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
687 if (Dest
.isPotentiallyAliased() &&
688 E
->getType().isPODType(CGF
.getContext())) {
689 // For a POD type, just emit a load of the lvalue + a copy, because our
690 // compound literal might alias the destination.
691 EmitAggLoadOfLValue(E
);
695 AggValueSlot Slot
= EnsureSlot(E
->getType());
697 // Block-scope compound literals are destroyed at the end of the enclosing
700 !CGF
.getLangOpts().CPlusPlus
&& !Slot
.isExternallyDestructed();
702 Slot
.setExternallyDestructed();
704 CGF
.EmitAggExpr(E
->getInitializer(), Slot
);
707 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
708 CGF
.pushLifetimeExtendedDestroy(
709 CGF
.getCleanupKind(DtorKind
), Slot
.getAddress(), E
->getType(),
710 CGF
.getDestroyer(DtorKind
), DtorKind
& EHCleanup
);
713 /// Attempt to look through various unimportant expressions to find a
714 /// cast of the given kind.
715 static Expr
*findPeephole(Expr
*op
, CastKind kind
, const ASTContext
&ctx
) {
716 op
= op
->IgnoreParenNoopCasts(ctx
);
717 if (auto castE
= dyn_cast
<CastExpr
>(op
)) {
718 if (castE
->getCastKind() == kind
)
719 return castE
->getSubExpr();
724 void AggExprEmitter::VisitCastExpr(CastExpr
*E
) {
725 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(E
))
726 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
727 switch (E
->getCastKind()) {
729 // FIXME: Can this actually happen? We have no test coverage for it.
730 assert(isa
<CXXDynamicCastExpr
>(E
) && "CK_Dynamic without a dynamic_cast?");
731 LValue LV
= CGF
.EmitCheckedLValue(E
->getSubExpr(),
732 CodeGenFunction::TCK_Load
);
733 // FIXME: Do we also need to handle property references here?
735 CGF
.EmitDynamicCast(LV
.getAddress(CGF
), cast
<CXXDynamicCastExpr
>(E
));
737 CGF
.CGM
.ErrorUnsupported(E
, "non-simple lvalue dynamic_cast");
739 if (!Dest
.isIgnored())
740 CGF
.CGM
.ErrorUnsupported(E
, "lvalue dynamic_cast with a destination");
745 // Evaluate even if the destination is ignored.
746 if (Dest
.isIgnored()) {
747 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
748 /*ignoreResult=*/true);
752 // GCC union extension
753 QualType Ty
= E
->getSubExpr()->getType();
755 Builder
.CreateElementBitCast(Dest
.getAddress(), CGF
.ConvertType(Ty
));
756 EmitInitializationToLValue(E
->getSubExpr(),
757 CGF
.MakeAddrLValue(CastPtr
, Ty
));
761 case CK_LValueToRValueBitCast
: {
762 if (Dest
.isIgnored()) {
763 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
764 /*ignoreResult=*/true);
768 LValue SourceLV
= CGF
.EmitLValue(E
->getSubExpr());
769 Address SourceAddress
=
770 Builder
.CreateElementBitCast(SourceLV
.getAddress(CGF
), CGF
.Int8Ty
);
771 Address DestAddress
=
772 Builder
.CreateElementBitCast(Dest
.getAddress(), CGF
.Int8Ty
);
773 llvm::Value
*SizeVal
= llvm::ConstantInt::get(
775 CGF
.getContext().getTypeSizeInChars(E
->getType()).getQuantity());
776 Builder
.CreateMemCpy(DestAddress
, SourceAddress
, SizeVal
);
780 case CK_DerivedToBase
:
781 case CK_BaseToDerived
:
782 case CK_UncheckedDerivedToBase
: {
783 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
784 "should have been unpacked before we got here");
787 case CK_NonAtomicToAtomic
:
788 case CK_AtomicToNonAtomic
: {
789 bool isToAtomic
= (E
->getCastKind() == CK_NonAtomicToAtomic
);
791 // Determine the atomic and value types.
792 QualType atomicType
= E
->getSubExpr()->getType();
793 QualType valueType
= E
->getType();
794 if (isToAtomic
) std::swap(atomicType
, valueType
);
796 assert(atomicType
->isAtomicType());
797 assert(CGF
.getContext().hasSameUnqualifiedType(valueType
,
798 atomicType
->castAs
<AtomicType
>()->getValueType()));
800 // Just recurse normally if we're ignoring the result or the
801 // atomic type doesn't change representation.
802 if (Dest
.isIgnored() || !CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
803 return Visit(E
->getSubExpr());
806 CastKind peepholeTarget
=
807 (isToAtomic
? CK_AtomicToNonAtomic
: CK_NonAtomicToAtomic
);
809 // These two cases are reverses of each other; try to peephole them.
811 findPeephole(E
->getSubExpr(), peepholeTarget
, CGF
.getContext())) {
812 assert(CGF
.getContext().hasSameUnqualifiedType(op
->getType(),
814 "peephole significantly changed types?");
818 // If we're converting an r-value of non-atomic type to an r-value
819 // of atomic type, just emit directly into the relevant sub-object.
821 AggValueSlot valueDest
= Dest
;
822 if (!valueDest
.isIgnored() && CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
823 // Zero-initialize. (Strictly speaking, we only need to initialize
824 // the padding at the end, but this is simpler.)
825 if (!Dest
.isZeroed())
826 CGF
.EmitNullInitialization(Dest
.getAddress(), atomicType
);
828 // Build a GEP to refer to the subobject.
830 CGF
.Builder
.CreateStructGEP(valueDest
.getAddress(), 0);
831 valueDest
= AggValueSlot::forAddr(valueAddr
,
832 valueDest
.getQualifiers(),
833 valueDest
.isExternallyDestructed(),
834 valueDest
.requiresGCollection(),
835 valueDest
.isPotentiallyAliased(),
836 AggValueSlot::DoesNotOverlap
,
837 AggValueSlot::IsZeroed
);
840 CGF
.EmitAggExpr(E
->getSubExpr(), valueDest
);
844 // Otherwise, we're converting an atomic type to a non-atomic type.
845 // Make an atomic temporary, emit into that, and then copy the value out.
846 AggValueSlot atomicSlot
=
847 CGF
.CreateAggTemp(atomicType
, "atomic-to-nonatomic.temp");
848 CGF
.EmitAggExpr(E
->getSubExpr(), atomicSlot
);
850 Address valueAddr
= Builder
.CreateStructGEP(atomicSlot
.getAddress(), 0);
851 RValue rvalue
= RValue::getAggregate(valueAddr
, atomicSlot
.isVolatile());
852 return EmitFinalDestCopy(valueType
, rvalue
);
854 case CK_AddressSpaceConversion
:
855 return Visit(E
->getSubExpr());
857 case CK_LValueToRValue
:
858 // If we're loading from a volatile type, force the destination
860 if (E
->getSubExpr()->getType().isVolatileQualified()) {
862 !Dest
.isExternallyDestructed() &&
863 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
865 Dest
.setExternallyDestructed();
866 EnsureDest(E
->getType());
867 Visit(E
->getSubExpr());
870 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
880 case CK_UserDefinedConversion
:
881 case CK_ConstructorConversion
:
882 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getSubExpr()->getType(),
884 "Implicit cast types must be compatible");
885 Visit(E
->getSubExpr());
888 case CK_LValueBitCast
:
889 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
893 case CK_ArrayToPointerDecay
:
894 case CK_FunctionToPointerDecay
:
895 case CK_NullToPointer
:
896 case CK_NullToMemberPointer
:
897 case CK_BaseToDerivedMemberPointer
:
898 case CK_DerivedToBaseMemberPointer
:
899 case CK_MemberPointerToBoolean
:
900 case CK_ReinterpretMemberPointer
:
901 case CK_IntegralToPointer
:
902 case CK_PointerToIntegral
:
903 case CK_PointerToBoolean
:
906 case CK_IntegralCast
:
907 case CK_BooleanToSignedIntegral
:
908 case CK_IntegralToBoolean
:
909 case CK_IntegralToFloating
:
910 case CK_FloatingToIntegral
:
911 case CK_FloatingToBoolean
:
912 case CK_FloatingCast
:
913 case CK_CPointerToObjCPointerCast
:
914 case CK_BlockPointerToObjCPointerCast
:
915 case CK_AnyPointerToBlockPointerCast
:
916 case CK_ObjCObjectLValueCast
:
917 case CK_FloatingRealToComplex
:
918 case CK_FloatingComplexToReal
:
919 case CK_FloatingComplexToBoolean
:
920 case CK_FloatingComplexCast
:
921 case CK_FloatingComplexToIntegralComplex
:
922 case CK_IntegralRealToComplex
:
923 case CK_IntegralComplexToReal
:
924 case CK_IntegralComplexToBoolean
:
925 case CK_IntegralComplexCast
:
926 case CK_IntegralComplexToFloatingComplex
:
927 case CK_ARCProduceObject
:
928 case CK_ARCConsumeObject
:
929 case CK_ARCReclaimReturnedObject
:
930 case CK_ARCExtendBlockObject
:
931 case CK_CopyAndAutoreleaseBlockObject
:
932 case CK_BuiltinFnToFnPtr
:
933 case CK_ZeroToOCLOpaqueType
:
936 case CK_IntToOCLSampler
:
937 case CK_FloatingToFixedPoint
:
938 case CK_FixedPointToFloating
:
939 case CK_FixedPointCast
:
940 case CK_FixedPointToBoolean
:
941 case CK_FixedPointToIntegral
:
942 case CK_IntegralToFixedPoint
:
943 llvm_unreachable("cast kind invalid for aggregate types");
947 void AggExprEmitter::VisitCallExpr(const CallExpr
*E
) {
948 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType()) {
949 EmitAggLoadOfLValue(E
);
953 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
954 return CGF
.EmitCallExpr(E
, Slot
);
958 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
959 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
960 return CGF
.EmitObjCMessageExpr(E
, Slot
);
964 void AggExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
965 CGF
.EmitIgnoredExpr(E
->getLHS());
969 void AggExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
970 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
971 CGF
.EmitCompoundStmt(*E
->getSubStmt(), true, Dest
);
980 static llvm::Value
*EmitCompare(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
981 const BinaryOperator
*E
, llvm::Value
*LHS
,
982 llvm::Value
*RHS
, CompareKind Kind
,
983 const char *NameSuffix
= "") {
984 QualType ArgTy
= E
->getLHS()->getType();
985 if (const ComplexType
*CT
= ArgTy
->getAs
<ComplexType
>())
986 ArgTy
= CT
->getElementType();
988 if (const auto *MPT
= ArgTy
->getAs
<MemberPointerType
>()) {
989 assert(Kind
== CK_Equal
&&
990 "member pointers may only be compared for equality");
991 return CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
992 CGF
, LHS
, RHS
, MPT
, /*IsInequality*/ false);
995 // Compute the comparison instructions for the specified comparison kind.
998 llvm::CmpInst::Predicate FCmp
;
999 llvm::CmpInst::Predicate SCmp
;
1000 llvm::CmpInst::Predicate UCmp
;
1002 CmpInstInfo InstInfo
= [&]() -> CmpInstInfo
{
1003 using FI
= llvm::FCmpInst
;
1004 using II
= llvm::ICmpInst
;
1007 return {"cmp.lt", FI::FCMP_OLT
, II::ICMP_SLT
, II::ICMP_ULT
};
1009 return {"cmp.gt", FI::FCMP_OGT
, II::ICMP_SGT
, II::ICMP_UGT
};
1011 return {"cmp.eq", FI::FCMP_OEQ
, II::ICMP_EQ
, II::ICMP_EQ
};
1013 llvm_unreachable("Unrecognised CompareKind enum");
1016 if (ArgTy
->hasFloatingRepresentation())
1017 return Builder
.CreateFCmp(InstInfo
.FCmp
, LHS
, RHS
,
1018 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1019 if (ArgTy
->isIntegralOrEnumerationType() || ArgTy
->isPointerType()) {
1021 ArgTy
->hasSignedIntegerRepresentation() ? InstInfo
.SCmp
: InstInfo
.UCmp
;
1022 return Builder
.CreateICmp(Inst
, LHS
, RHS
,
1023 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1026 llvm_unreachable("unsupported aggregate binary expression should have "
1027 "already been handled");
1030 void AggExprEmitter::VisitBinCmp(const BinaryOperator
*E
) {
1031 using llvm::BasicBlock
;
1032 using llvm::PHINode
;
1034 assert(CGF
.getContext().hasSameType(E
->getLHS()->getType(),
1035 E
->getRHS()->getType()));
1036 const ComparisonCategoryInfo
&CmpInfo
=
1037 CGF
.getContext().CompCategories
.getInfoForType(E
->getType());
1038 assert(CmpInfo
.Record
->isTriviallyCopyable() &&
1039 "cannot copy non-trivially copyable aggregate");
1041 QualType ArgTy
= E
->getLHS()->getType();
1043 if (!ArgTy
->isIntegralOrEnumerationType() && !ArgTy
->isRealFloatingType() &&
1044 !ArgTy
->isNullPtrType() && !ArgTy
->isPointerType() &&
1045 !ArgTy
->isMemberPointerType() && !ArgTy
->isAnyComplexType()) {
1046 return CGF
.ErrorUnsupported(E
, "aggregate three-way comparison");
1048 bool IsComplex
= ArgTy
->isAnyComplexType();
1050 // Evaluate the operands to the expression and extract their values.
1051 auto EmitOperand
= [&](Expr
*E
) -> std::pair
<Value
*, Value
*> {
1052 RValue RV
= CGF
.EmitAnyExpr(E
);
1054 return {RV
.getScalarVal(), nullptr};
1055 if (RV
.isAggregate())
1056 return {RV
.getAggregatePointer(), nullptr};
1057 assert(RV
.isComplex());
1058 return RV
.getComplexVal();
1060 auto LHSValues
= EmitOperand(E
->getLHS()),
1061 RHSValues
= EmitOperand(E
->getRHS());
1063 auto EmitCmp
= [&](CompareKind K
) {
1064 Value
*Cmp
= EmitCompare(Builder
, CGF
, E
, LHSValues
.first
, RHSValues
.first
,
1065 K
, IsComplex
? ".r" : "");
1068 assert(K
== CompareKind::CK_Equal
);
1069 Value
*CmpImag
= EmitCompare(Builder
, CGF
, E
, LHSValues
.second
,
1070 RHSValues
.second
, K
, ".i");
1071 return Builder
.CreateAnd(Cmp
, CmpImag
, "and.eq");
1073 auto EmitCmpRes
= [&](const ComparisonCategoryInfo::ValueInfo
*VInfo
) {
1074 return Builder
.getInt(VInfo
->getIntValue());
1078 if (ArgTy
->isNullPtrType()) {
1079 Select
= EmitCmpRes(CmpInfo
.getEqualOrEquiv());
1080 } else if (!CmpInfo
.isPartial()) {
1082 Builder
.CreateSelect(EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()),
1083 EmitCmpRes(CmpInfo
.getGreater()), "sel.lt");
1084 Select
= Builder
.CreateSelect(EmitCmp(CK_Equal
),
1085 EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1086 SelectOne
, "sel.eq");
1088 Value
*SelectEq
= Builder
.CreateSelect(
1089 EmitCmp(CK_Equal
), EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1090 EmitCmpRes(CmpInfo
.getUnordered()), "sel.eq");
1091 Value
*SelectGT
= Builder
.CreateSelect(EmitCmp(CK_Greater
),
1092 EmitCmpRes(CmpInfo
.getGreater()),
1093 SelectEq
, "sel.gt");
1094 Select
= Builder
.CreateSelect(
1095 EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()), SelectGT
, "sel.lt");
1097 // Create the return value in the destination slot.
1098 EnsureDest(E
->getType());
1099 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1101 // Emit the address of the first (and only) field in the comparison category
1102 // type, and initialize it from the constant integer value selected above.
1103 LValue FieldLV
= CGF
.EmitLValueForFieldInitialization(
1104 DestLV
, *CmpInfo
.Record
->field_begin());
1105 CGF
.EmitStoreThroughLValue(RValue::get(Select
), FieldLV
, /*IsInit*/ true);
1107 // All done! The result is in the Dest slot.
1110 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator
*E
) {
1111 if (E
->getOpcode() == BO_PtrMemD
|| E
->getOpcode() == BO_PtrMemI
)
1112 VisitPointerToDataMemberBinaryOperator(E
);
1114 CGF
.ErrorUnsupported(E
, "aggregate binary expression");
1117 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1118 const BinaryOperator
*E
) {
1119 LValue LV
= CGF
.EmitPointerToDataMemberBinaryExpr(E
);
1120 EmitFinalDestCopy(E
->getType(), LV
);
1123 /// Is the value of the given expression possibly a reference to or
1124 /// into a __block variable?
1125 static bool isBlockVarRef(const Expr
*E
) {
1126 // Make sure we look through parens.
1127 E
= E
->IgnoreParens();
1129 // Check for a direct reference to a __block variable.
1130 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
1131 const VarDecl
*var
= dyn_cast
<VarDecl
>(DRE
->getDecl());
1132 return (var
&& var
->hasAttr
<BlocksAttr
>());
1135 // More complicated stuff.
1137 // Binary operators.
1138 if (const BinaryOperator
*op
= dyn_cast
<BinaryOperator
>(E
)) {
1139 // For an assignment or pointer-to-member operation, just care
1141 if (op
->isAssignmentOp() || op
->isPtrMemOp())
1142 return isBlockVarRef(op
->getLHS());
1144 // For a comma, just care about the RHS.
1145 if (op
->getOpcode() == BO_Comma
)
1146 return isBlockVarRef(op
->getRHS());
1148 // FIXME: pointer arithmetic?
1151 // Check both sides of a conditional operator.
1152 } else if (const AbstractConditionalOperator
*op
1153 = dyn_cast
<AbstractConditionalOperator
>(E
)) {
1154 return isBlockVarRef(op
->getTrueExpr())
1155 || isBlockVarRef(op
->getFalseExpr());
1157 // OVEs are required to support BinaryConditionalOperators.
1158 } else if (const OpaqueValueExpr
*op
1159 = dyn_cast
<OpaqueValueExpr
>(E
)) {
1160 if (const Expr
*src
= op
->getSourceExpr())
1161 return isBlockVarRef(src
);
1163 // Casts are necessary to get things like (*(int*)&var) = foo().
1164 // We don't really care about the kind of cast here, except
1165 // we don't want to look through l2r casts, because it's okay
1166 // to get the *value* in a __block variable.
1167 } else if (const CastExpr
*cast
= dyn_cast
<CastExpr
>(E
)) {
1168 if (cast
->getCastKind() == CK_LValueToRValue
)
1170 return isBlockVarRef(cast
->getSubExpr());
1172 // Handle unary operators. Again, just aggressively look through
1173 // it, ignoring the operation.
1174 } else if (const UnaryOperator
*uop
= dyn_cast
<UnaryOperator
>(E
)) {
1175 return isBlockVarRef(uop
->getSubExpr());
1177 // Look into the base of a field access.
1178 } else if (const MemberExpr
*mem
= dyn_cast
<MemberExpr
>(E
)) {
1179 return isBlockVarRef(mem
->getBase());
1181 // Look into the base of a subscript.
1182 } else if (const ArraySubscriptExpr
*sub
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
1183 return isBlockVarRef(sub
->getBase());
1189 void AggExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
1190 // For an assignment to work, the value on the right has
1191 // to be compatible with the value on the left.
1192 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getLHS()->getType(),
1193 E
->getRHS()->getType())
1194 && "Invalid assignment");
1196 // If the LHS might be a __block variable, and the RHS can
1197 // potentially cause a block copy, we need to evaluate the RHS first
1198 // so that the assignment goes the right place.
1199 // This is pretty semantically fragile.
1200 if (isBlockVarRef(E
->getLHS()) &&
1201 E
->getRHS()->HasSideEffects(CGF
.getContext())) {
1202 // Ensure that we have a destination, and evaluate the RHS into that.
1203 EnsureDest(E
->getRHS()->getType());
1206 // Now emit the LHS and copy into it.
1207 LValue LHS
= CGF
.EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
1209 // That copy is an atomic copy if the LHS is atomic.
1210 if (LHS
.getType()->isAtomicType() ||
1211 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1212 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1216 EmitCopy(E
->getLHS()->getType(),
1217 AggValueSlot::forLValue(LHS
, CGF
, AggValueSlot::IsDestructed
,
1218 needsGC(E
->getLHS()->getType()),
1219 AggValueSlot::IsAliased
,
1220 AggValueSlot::MayOverlap
),
1225 LValue LHS
= CGF
.EmitLValue(E
->getLHS());
1227 // If we have an atomic type, evaluate into the destination and then
1228 // do an atomic copy.
1229 if (LHS
.getType()->isAtomicType() ||
1230 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1231 EnsureDest(E
->getRHS()->getType());
1233 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1237 // Codegen the RHS so that it stores directly into the LHS.
1238 AggValueSlot LHSSlot
= AggValueSlot::forLValue(
1239 LHS
, CGF
, AggValueSlot::IsDestructed
, needsGC(E
->getLHS()->getType()),
1240 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
1241 // A non-volatile aggregate destination might have volatile member.
1242 if (!LHSSlot
.isVolatile() &&
1243 CGF
.hasVolatileMember(E
->getLHS()->getType()))
1244 LHSSlot
.setVolatile(true);
1246 CGF
.EmitAggExpr(E
->getRHS(), LHSSlot
);
1248 // Copy into the destination if the assignment isn't ignored.
1249 EmitFinalDestCopy(E
->getType(), LHS
);
1251 if (!Dest
.isIgnored() && !Dest
.isExternallyDestructed() &&
1252 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
)
1253 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1257 void AggExprEmitter::
1258 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
1259 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
1260 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
1261 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
1263 // Bind the common expression if necessary.
1264 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
1266 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
1267 CGF
.EmitBranchOnBoolExpr(E
->getCond(), LHSBlock
, RHSBlock
,
1268 CGF
.getProfileCount(E
));
1270 // Save whether the destination's lifetime is externally managed.
1271 bool isExternallyDestructed
= Dest
.isExternallyDestructed();
1272 bool destructNonTrivialCStruct
=
1273 !isExternallyDestructed
&&
1274 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
1275 isExternallyDestructed
|= destructNonTrivialCStruct
;
1276 Dest
.setExternallyDestructed(isExternallyDestructed
);
1279 CGF
.EmitBlock(LHSBlock
);
1280 CGF
.incrementProfileCounter(E
);
1281 Visit(E
->getTrueExpr());
1284 assert(CGF
.HaveInsertPoint() && "expression evaluation ended with no IP!");
1285 CGF
.Builder
.CreateBr(ContBlock
);
1287 // If the result of an agg expression is unused, then the emission
1288 // of the LHS might need to create a destination slot. That's fine
1289 // with us, and we can safely emit the RHS into the same slot, but
1290 // we shouldn't claim that it's already being destructed.
1291 Dest
.setExternallyDestructed(isExternallyDestructed
);
1294 CGF
.EmitBlock(RHSBlock
);
1295 Visit(E
->getFalseExpr());
1298 if (destructNonTrivialCStruct
)
1299 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1302 CGF
.EmitBlock(ContBlock
);
1305 void AggExprEmitter::VisitChooseExpr(const ChooseExpr
*CE
) {
1306 Visit(CE
->getChosenSubExpr());
1309 void AggExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
1310 Address ArgValue
= Address::invalid();
1311 Address ArgPtr
= CGF
.EmitVAArg(VE
, ArgValue
);
1313 // If EmitVAArg fails, emit an error.
1314 if (!ArgPtr
.isValid()) {
1315 CGF
.ErrorUnsupported(VE
, "aggregate va_arg expression");
1319 EmitFinalDestCopy(VE
->getType(), CGF
.MakeAddrLValue(ArgPtr
, VE
->getType()));
1322 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
) {
1323 // Ensure that we have a slot, but if we already do, remember
1324 // whether it was externally destructed.
1325 bool wasExternallyDestructed
= Dest
.isExternallyDestructed();
1326 EnsureDest(E
->getType());
1328 // We're going to push a destructor if there isn't already one.
1329 Dest
.setExternallyDestructed();
1331 Visit(E
->getSubExpr());
1333 // Push that destructor we promised.
1334 if (!wasExternallyDestructed
)
1335 CGF
.EmitCXXTemporary(E
->getTemporary(), E
->getType(), Dest
.getAddress());
1339 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr
*E
) {
1340 AggValueSlot Slot
= EnsureSlot(E
->getType());
1341 CGF
.EmitCXXConstructExpr(E
, Slot
);
1344 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1345 const CXXInheritedCtorInitExpr
*E
) {
1346 AggValueSlot Slot
= EnsureSlot(E
->getType());
1347 CGF
.EmitInheritedCXXConstructorCall(
1348 E
->getConstructor(), E
->constructsVBase(), Slot
.getAddress(),
1349 E
->inheritedFromVBase(), E
);
1353 AggExprEmitter::VisitLambdaExpr(LambdaExpr
*E
) {
1354 AggValueSlot Slot
= EnsureSlot(E
->getType());
1355 LValue SlotLV
= CGF
.MakeAddrLValue(Slot
.getAddress(), E
->getType());
1357 // We'll need to enter cleanup scopes in case any of the element
1358 // initializers throws an exception.
1359 SmallVector
<EHScopeStack::stable_iterator
, 16> Cleanups
;
1360 llvm::Instruction
*CleanupDominator
= nullptr;
1362 CXXRecordDecl::field_iterator CurField
= E
->getLambdaClass()->field_begin();
1363 for (LambdaExpr::const_capture_init_iterator i
= E
->capture_init_begin(),
1364 e
= E
->capture_init_end();
1365 i
!= e
; ++i
, ++CurField
) {
1366 // Emit initialization
1367 LValue LV
= CGF
.EmitLValueForFieldInitialization(SlotLV
, *CurField
);
1368 if (CurField
->hasCapturedVLAType()) {
1369 CGF
.EmitLambdaVLACapture(CurField
->getCapturedVLAType(), LV
);
1373 EmitInitializationToLValue(*i
, LV
);
1375 // Push a destructor if necessary.
1376 if (QualType::DestructionKind DtorKind
=
1377 CurField
->getType().isDestructedType()) {
1378 assert(LV
.isSimple());
1379 if (CGF
.needsEHCleanup(DtorKind
)) {
1380 if (!CleanupDominator
)
1381 CleanupDominator
= CGF
.Builder
.CreateAlignedLoad(
1383 llvm::Constant::getNullValue(CGF
.Int8PtrTy
),
1384 CharUnits::One()); // placeholder
1386 CGF
.pushDestroy(EHCleanup
, LV
.getAddress(CGF
), CurField
->getType(),
1387 CGF
.getDestroyer(DtorKind
), false);
1388 Cleanups
.push_back(CGF
.EHStack
.stable_begin());
1393 // Deactivate all the partial cleanups in reverse order, which
1394 // generally means popping them.
1395 for (unsigned i
= Cleanups
.size(); i
!= 0; --i
)
1396 CGF
.DeactivateCleanupBlock(Cleanups
[i
-1], CleanupDominator
);
1398 // Destroy the placeholder if we made one.
1399 if (CleanupDominator
)
1400 CleanupDominator
->eraseFromParent();
1403 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
1404 CodeGenFunction::RunCleanupsScope
cleanups(CGF
);
1405 Visit(E
->getSubExpr());
1408 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
) {
1409 QualType T
= E
->getType();
1410 AggValueSlot Slot
= EnsureSlot(T
);
1411 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1414 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
) {
1415 QualType T
= E
->getType();
1416 AggValueSlot Slot
= EnsureSlot(T
);
1417 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1420 /// Determine whether the given cast kind is known to always convert values
1421 /// with all zero bits in their value representation to values with all zero
1422 /// bits in their value representation.
1423 static bool castPreservesZero(const CastExpr
*CE
) {
1424 switch (CE
->getCastKind()) {
1427 case CK_UserDefinedConversion
:
1428 case CK_ConstructorConversion
:
1432 // Conversions between (possibly-complex) integral, (possibly-complex)
1433 // floating-point, and bool.
1434 case CK_BooleanToSignedIntegral
:
1435 case CK_FloatingCast
:
1436 case CK_FloatingComplexCast
:
1437 case CK_FloatingComplexToBoolean
:
1438 case CK_FloatingComplexToIntegralComplex
:
1439 case CK_FloatingComplexToReal
:
1440 case CK_FloatingRealToComplex
:
1441 case CK_FloatingToBoolean
:
1442 case CK_FloatingToIntegral
:
1443 case CK_IntegralCast
:
1444 case CK_IntegralComplexCast
:
1445 case CK_IntegralComplexToBoolean
:
1446 case CK_IntegralComplexToFloatingComplex
:
1447 case CK_IntegralComplexToReal
:
1448 case CK_IntegralRealToComplex
:
1449 case CK_IntegralToBoolean
:
1450 case CK_IntegralToFloating
:
1451 // Reinterpreting integers as pointers and vice versa.
1452 case CK_IntegralToPointer
:
1453 case CK_PointerToIntegral
:
1454 // Language extensions.
1455 case CK_VectorSplat
:
1457 case CK_NonAtomicToAtomic
:
1458 case CK_AtomicToNonAtomic
:
1461 case CK_BaseToDerivedMemberPointer
:
1462 case CK_DerivedToBaseMemberPointer
:
1463 case CK_MemberPointerToBoolean
:
1464 case CK_NullToMemberPointer
:
1465 case CK_ReinterpretMemberPointer
:
1466 // FIXME: ABI-dependent.
1469 case CK_AnyPointerToBlockPointerCast
:
1470 case CK_BlockPointerToObjCPointerCast
:
1471 case CK_CPointerToObjCPointerCast
:
1472 case CK_ObjCObjectLValueCast
:
1473 case CK_IntToOCLSampler
:
1474 case CK_ZeroToOCLOpaqueType
:
1475 // FIXME: Check these.
1478 case CK_FixedPointCast
:
1479 case CK_FixedPointToBoolean
:
1480 case CK_FixedPointToFloating
:
1481 case CK_FixedPointToIntegral
:
1482 case CK_FloatingToFixedPoint
:
1483 case CK_IntegralToFixedPoint
:
1484 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1487 case CK_AddressSpaceConversion
:
1488 case CK_BaseToDerived
:
1489 case CK_DerivedToBase
:
1491 case CK_NullToPointer
:
1492 case CK_PointerToBoolean
:
1493 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1494 // same representation in all involved address spaces.
1497 case CK_ARCConsumeObject
:
1498 case CK_ARCExtendBlockObject
:
1499 case CK_ARCProduceObject
:
1500 case CK_ARCReclaimReturnedObject
:
1501 case CK_CopyAndAutoreleaseBlockObject
:
1502 case CK_ArrayToPointerDecay
:
1503 case CK_FunctionToPointerDecay
:
1504 case CK_BuiltinFnToFnPtr
:
1506 case CK_LValueBitCast
:
1507 case CK_LValueToRValue
:
1508 case CK_LValueToRValueBitCast
:
1509 case CK_UncheckedDerivedToBase
:
1512 llvm_unreachable("Unhandled clang::CastKind enum");
1515 /// isSimpleZero - If emitting this value will obviously just cause a store of
1516 /// zero to memory, return true. This can return false if uncertain, so it just
1517 /// handles simple cases.
1518 static bool isSimpleZero(const Expr
*E
, CodeGenFunction
&CGF
) {
1519 E
= E
->IgnoreParens();
1520 while (auto *CE
= dyn_cast
<CastExpr
>(E
)) {
1521 if (!castPreservesZero(CE
))
1523 E
= CE
->getSubExpr()->IgnoreParens();
1527 if (const IntegerLiteral
*IL
= dyn_cast
<IntegerLiteral
>(E
))
1528 return IL
->getValue() == 0;
1530 if (const FloatingLiteral
*FL
= dyn_cast
<FloatingLiteral
>(E
))
1531 return FL
->getValue().isPosZero();
1533 if ((isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) &&
1534 CGF
.getTypes().isZeroInitializable(E
->getType()))
1536 // (int*)0 - Null pointer expressions.
1537 if (const CastExpr
*ICE
= dyn_cast
<CastExpr
>(E
))
1538 return ICE
->getCastKind() == CK_NullToPointer
&&
1539 CGF
.getTypes().isPointerZeroInitializable(E
->getType()) &&
1540 !E
->HasSideEffects(CGF
.getContext());
1542 if (const CharacterLiteral
*CL
= dyn_cast
<CharacterLiteral
>(E
))
1543 return CL
->getValue() == 0;
1545 // Otherwise, hard case: conservatively return false.
1551 AggExprEmitter::EmitInitializationToLValue(Expr
*E
, LValue LV
) {
1552 QualType type
= LV
.getType();
1553 // FIXME: Ignore result?
1554 // FIXME: Are initializers affected by volatile?
1555 if (Dest
.isZeroed() && isSimpleZero(E
, CGF
)) {
1556 // Storing "i32 0" to a zero'd memory location is a noop.
1558 } else if (isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) {
1559 return EmitNullInitializationToLValue(LV
);
1560 } else if (isa
<NoInitExpr
>(E
)) {
1563 } else if (type
->isReferenceType()) {
1564 RValue RV
= CGF
.EmitReferenceBindingToExpr(E
);
1565 return CGF
.EmitStoreThroughLValue(RV
, LV
);
1568 switch (CGF
.getEvaluationKind(type
)) {
1570 CGF
.EmitComplexExprIntoLValue(E
, LV
, /*isInit*/ true);
1574 E
, AggValueSlot::forLValue(LV
, CGF
, AggValueSlot::IsDestructed
,
1575 AggValueSlot::DoesNotNeedGCBarriers
,
1576 AggValueSlot::IsNotAliased
,
1577 AggValueSlot::MayOverlap
, Dest
.isZeroed()));
1580 if (LV
.isSimple()) {
1581 CGF
.EmitScalarInit(E
, /*D=*/nullptr, LV
, /*Captured=*/false);
1583 CGF
.EmitStoreThroughLValue(RValue::get(CGF
.EmitScalarExpr(E
)), LV
);
1587 llvm_unreachable("bad evaluation kind");
1590 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv
) {
1591 QualType type
= lv
.getType();
1593 // If the destination slot is already zeroed out before the aggregate is
1594 // copied into it, we don't have to emit any zeros here.
1595 if (Dest
.isZeroed() && CGF
.getTypes().isZeroInitializable(type
))
1598 if (CGF
.hasScalarEvaluationKind(type
)) {
1599 // For non-aggregates, we can store the appropriate null constant.
1600 llvm::Value
*null
= CGF
.CGM
.EmitNullConstant(type
);
1601 // Note that the following is not equivalent to
1602 // EmitStoreThroughBitfieldLValue for ARC types.
1603 if (lv
.isBitField()) {
1604 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(null
), lv
);
1606 assert(lv
.isSimple());
1607 CGF
.EmitStoreOfScalar(null
, lv
, /* isInitialization */ true);
1610 // There's a potential optimization opportunity in combining
1611 // memsets; that would be easy for arrays, but relatively
1612 // difficult for structures with the current code.
1613 CGF
.EmitNullInitialization(lv
.getAddress(CGF
), lv
.getType());
1617 void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
) {
1618 VisitCXXParenListOrInitListExpr(E
, E
->getInitExprs(),
1619 E
->getInitializedFieldInUnion(),
1620 E
->getArrayFiller());
1623 void AggExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
1624 if (E
->hadArrayRangeDesignator())
1625 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
1627 if (E
->isTransparent())
1628 return Visit(E
->getInit(0));
1630 VisitCXXParenListOrInitListExpr(
1631 E
, E
->inits(), E
->getInitializedFieldInUnion(), E
->getArrayFiller());
1634 void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1635 Expr
*ExprToVisit
, ArrayRef
<Expr
*> InitExprs
,
1636 FieldDecl
*InitializedFieldInUnion
, Expr
*ArrayFiller
) {
1638 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1639 // (Length of globals? Chunks of zeroed-out space?).
1641 // If we can, prefer a copy from a global; this is a lot less code for long
1642 // globals, and it's easier for the current optimizers to analyze.
1643 if (llvm::Constant
*C
=
1644 CGF
.CGM
.EmitConstantExpr(ExprToVisit
, ExprToVisit
->getType(), &CGF
)) {
1645 llvm::GlobalVariable
* GV
=
1646 new llvm::GlobalVariable(CGF
.CGM
.getModule(), C
->getType(), true,
1647 llvm::GlobalValue::InternalLinkage
, C
, "");
1648 EmitFinalDestCopy(ExprToVisit
->getType(),
1649 CGF
.MakeAddrLValue(GV
, ExprToVisit
->getType()));
1654 AggValueSlot Dest
= EnsureSlot(ExprToVisit
->getType());
1656 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), ExprToVisit
->getType());
1658 // Handle initialization of an array.
1659 if (ExprToVisit
->getType()->isConstantArrayType()) {
1660 auto AType
= cast
<llvm::ArrayType
>(Dest
.getAddress().getElementType());
1661 EmitArrayInit(Dest
.getAddress(), AType
, ExprToVisit
->getType(), ExprToVisit
,
1662 InitExprs
, ArrayFiller
);
1664 } else if (ExprToVisit
->getType()->isVariableArrayType()) {
1665 // A variable array type that has an initializer can only do empty
1666 // initialization. And because this feature is not exposed as an extension
1667 // in C++, we can safely memset the array memory to zero.
1668 assert(InitExprs
.size() == 0 &&
1669 "you can only use an empty initializer with VLAs");
1670 CGF
.EmitNullInitialization(Dest
.getAddress(), ExprToVisit
->getType());
1674 assert(ExprToVisit
->getType()->isRecordType() &&
1675 "Only support structs/unions here!");
1677 // Do struct initialization; this code just sets each individual member
1678 // to the approprate value. This makes bitfield support automatic;
1679 // the disadvantage is that the generated code is more difficult for
1680 // the optimizer, especially with bitfields.
1681 unsigned NumInitElements
= InitExprs
.size();
1682 RecordDecl
*record
= ExprToVisit
->getType()->castAs
<RecordType
>()->getDecl();
1684 // We'll need to enter cleanup scopes in case any of the element
1685 // initializers throws an exception.
1686 SmallVector
<EHScopeStack::stable_iterator
, 16> cleanups
;
1687 llvm::Instruction
*cleanupDominator
= nullptr;
1688 auto addCleanup
= [&](const EHScopeStack::stable_iterator
&cleanup
) {
1689 cleanups
.push_back(cleanup
);
1690 if (!cleanupDominator
) // create placeholder once needed
1691 cleanupDominator
= CGF
.Builder
.CreateAlignedLoad(
1692 CGF
.Int8Ty
, llvm::Constant::getNullValue(CGF
.Int8PtrTy
),
1696 unsigned curInitIndex
= 0;
1698 // Emit initialization of base classes.
1699 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(record
)) {
1700 assert(NumInitElements
>= CXXRD
->getNumBases() &&
1701 "missing initializer for base class");
1702 for (auto &Base
: CXXRD
->bases()) {
1703 assert(!Base
.isVirtual() && "should not see vbases here");
1704 auto *BaseRD
= Base
.getType()->getAsCXXRecordDecl();
1705 Address V
= CGF
.GetAddressOfDirectBaseInCompleteClass(
1706 Dest
.getAddress(), CXXRD
, BaseRD
,
1707 /*isBaseVirtual*/ false);
1708 AggValueSlot AggSlot
= AggValueSlot::forAddr(
1710 AggValueSlot::IsDestructed
,
1711 AggValueSlot::DoesNotNeedGCBarriers
,
1712 AggValueSlot::IsNotAliased
,
1713 CGF
.getOverlapForBaseInit(CXXRD
, BaseRD
, Base
.isVirtual()));
1714 CGF
.EmitAggExpr(InitExprs
[curInitIndex
++], AggSlot
);
1716 if (QualType::DestructionKind dtorKind
=
1717 Base
.getType().isDestructedType()) {
1718 CGF
.pushDestroy(dtorKind
, V
, Base
.getType());
1719 addCleanup(CGF
.EHStack
.stable_begin());
1724 // Prepare a 'this' for CXXDefaultInitExprs.
1725 CodeGenFunction::FieldConstructionScope
FCS(CGF
, Dest
.getAddress());
1727 if (record
->isUnion()) {
1728 // Only initialize one field of a union. The field itself is
1729 // specified by the initializer list.
1730 if (!InitializedFieldInUnion
) {
1731 // Empty union; we have nothing to do.
1734 // Make sure that it's really an empty and not a failure of
1735 // semantic analysis.
1736 for (const auto *Field
: record
->fields())
1737 assert((Field
->isUnnamedBitfield() || Field
->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
1742 // FIXME: volatility
1743 FieldDecl
*Field
= InitializedFieldInUnion
;
1745 LValue FieldLoc
= CGF
.EmitLValueForFieldInitialization(DestLV
, Field
);
1746 if (NumInitElements
) {
1747 // Store the initializer into the field
1748 EmitInitializationToLValue(InitExprs
[0], FieldLoc
);
1750 // Default-initialize to null.
1751 EmitNullInitializationToLValue(FieldLoc
);
1757 // Here we iterate over the fields; this makes it simpler to both
1758 // default-initialize fields and skip over unnamed fields.
1759 for (const auto *field
: record
->fields()) {
1760 // We're done once we hit the flexible array member.
1761 if (field
->getType()->isIncompleteArrayType())
1764 // Always skip anonymous bitfields.
1765 if (field
->isUnnamedBitfield())
1768 // We're done if we reach the end of the explicit initializers, we
1769 // have a zeroed object, and the rest of the fields are
1770 // zero-initializable.
1771 if (curInitIndex
== NumInitElements
&& Dest
.isZeroed() &&
1772 CGF
.getTypes().isZeroInitializable(ExprToVisit
->getType()))
1776 LValue LV
= CGF
.EmitLValueForFieldInitialization(DestLV
, field
);
1777 // We never generate write-barries for initialized fields.
1780 if (curInitIndex
< NumInitElements
) {
1781 // Store the initializer into the field.
1782 EmitInitializationToLValue(InitExprs
[curInitIndex
++], LV
);
1784 // We're out of initializers; default-initialize to null
1785 EmitNullInitializationToLValue(LV
);
1788 // Push a destructor if necessary.
1789 // FIXME: if we have an array of structures, all explicitly
1790 // initialized, we can end up pushing a linear number of cleanups.
1791 bool pushedCleanup
= false;
1792 if (QualType::DestructionKind dtorKind
1793 = field
->getType().isDestructedType()) {
1794 assert(LV
.isSimple());
1795 if (CGF
.needsEHCleanup(dtorKind
)) {
1796 CGF
.pushDestroy(EHCleanup
, LV
.getAddress(CGF
), field
->getType(),
1797 CGF
.getDestroyer(dtorKind
), false);
1798 addCleanup(CGF
.EHStack
.stable_begin());
1799 pushedCleanup
= true;
1803 // If the GEP didn't get used because of a dead zero init or something
1804 // else, clean it up for -O0 builds and general tidiness.
1805 if (!pushedCleanup
&& LV
.isSimple())
1806 if (llvm::GetElementPtrInst
*GEP
=
1807 dyn_cast
<llvm::GetElementPtrInst
>(LV
.getPointer(CGF
)))
1808 if (GEP
->use_empty())
1809 GEP
->eraseFromParent();
1812 // Deactivate all the partial cleanups in reverse order, which
1813 // generally means popping them.
1814 assert((cleanupDominator
|| cleanups
.empty()) &&
1815 "Missing cleanupDominator before deactivating cleanup blocks");
1816 for (unsigned i
= cleanups
.size(); i
!= 0; --i
)
1817 CGF
.DeactivateCleanupBlock(cleanups
[i
-1], cleanupDominator
);
1819 // Destroy the placeholder if we made one.
1820 if (cleanupDominator
)
1821 cleanupDominator
->eraseFromParent();
1824 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
1825 llvm::Value
*outerBegin
) {
1826 // Emit the common subexpression.
1827 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
->getCommonExpr());
1829 Address destPtr
= EnsureSlot(E
->getType()).getAddress();
1830 uint64_t numElements
= E
->getArraySize().getZExtValue();
1835 // destPtr is an array*. Construct an elementType* by drilling down a level.
1836 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
1837 llvm::Value
*indices
[] = {zero
, zero
};
1838 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(
1839 destPtr
.getElementType(), destPtr
.getPointer(), indices
,
1842 // Prepare to special-case multidimensional array initialization: we avoid
1843 // emitting multiple destructor loops in that case.
1846 ArrayInitLoopExpr
*InnerLoop
= dyn_cast
<ArrayInitLoopExpr
>(E
->getSubExpr());
1848 QualType elementType
=
1849 CGF
.getContext().getAsArrayType(E
->getType())->getElementType();
1850 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
1851 CharUnits elementAlign
=
1852 destPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
1853 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
1855 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
1856 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
1858 // Jump into the body.
1859 CGF
.EmitBlock(bodyBB
);
1860 llvm::PHINode
*index
=
1861 Builder
.CreatePHI(zero
->getType(), 2, "arrayinit.index");
1862 index
->addIncoming(zero
, entryBB
);
1863 llvm::Value
*element
=
1864 Builder
.CreateInBoundsGEP(llvmElementType
, begin
, index
);
1866 // Prepare for a cleanup.
1867 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
1868 EHScopeStack::stable_iterator cleanup
;
1869 if (CGF
.needsEHCleanup(dtorKind
) && !InnerLoop
) {
1870 if (outerBegin
->getType() != element
->getType())
1871 outerBegin
= Builder
.CreateBitCast(outerBegin
, element
->getType());
1872 CGF
.pushRegularPartialArrayCleanup(outerBegin
, element
, elementType
,
1874 CGF
.getDestroyer(dtorKind
));
1875 cleanup
= CGF
.EHStack
.stable_begin();
1877 dtorKind
= QualType::DK_none
;
1880 // Emit the actual filler expression.
1882 // Temporaries created in an array initialization loop are destroyed
1883 // at the end of each iteration.
1884 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
1885 CodeGenFunction::ArrayInitLoopExprScope
Scope(CGF
, index
);
1886 LValue elementLV
= CGF
.MakeAddrLValue(
1887 Address(element
, llvmElementType
, elementAlign
), elementType
);
1890 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1891 auto elementSlot
= AggValueSlot::forLValue(
1892 elementLV
, CGF
, AggValueSlot::IsDestructed
,
1893 AggValueSlot::DoesNotNeedGCBarriers
, AggValueSlot::IsNotAliased
,
1894 AggValueSlot::DoesNotOverlap
);
1895 AggExprEmitter(CGF
, elementSlot
, false)
1896 .VisitArrayInitLoopExpr(InnerLoop
, outerBegin
);
1898 EmitInitializationToLValue(E
->getSubExpr(), elementLV
);
1901 // Move on to the next element.
1902 llvm::Value
*nextIndex
= Builder
.CreateNUWAdd(
1903 index
, llvm::ConstantInt::get(CGF
.SizeTy
, 1), "arrayinit.next");
1904 index
->addIncoming(nextIndex
, Builder
.GetInsertBlock());
1906 // Leave the loop if we're done.
1907 llvm::Value
*done
= Builder
.CreateICmpEQ(
1908 nextIndex
, llvm::ConstantInt::get(CGF
.SizeTy
, numElements
),
1910 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
1911 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
1913 CGF
.EmitBlock(endBB
);
1915 // Leave the partial-array cleanup if we entered one.
1917 CGF
.DeactivateCleanupBlock(cleanup
, index
);
1920 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
) {
1921 AggValueSlot Dest
= EnsureSlot(E
->getType());
1923 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1924 EmitInitializationToLValue(E
->getBase(), DestLV
);
1925 VisitInitListExpr(E
->getUpdater());
1928 //===----------------------------------------------------------------------===//
1929 // Entry Points into this File
1930 //===----------------------------------------------------------------------===//
1932 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1933 /// non-zero bytes that will be stored when outputting the initializer for the
1934 /// specified initializer expression.
1935 static CharUnits
GetNumNonZeroBytesInInit(const Expr
*E
, CodeGenFunction
&CGF
) {
1936 if (auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(E
))
1937 E
= MTE
->getSubExpr();
1938 E
= E
->IgnoreParenNoopCasts(CGF
.getContext());
1940 // 0 and 0.0 won't require any non-zero stores!
1941 if (isSimpleZero(E
, CGF
)) return CharUnits::Zero();
1943 // If this is an initlist expr, sum up the size of sizes of the (present)
1944 // elements. If this is something weird, assume the whole thing is non-zero.
1945 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
);
1946 while (ILE
&& ILE
->isTransparent())
1947 ILE
= dyn_cast
<InitListExpr
>(ILE
->getInit(0));
1948 if (!ILE
|| !CGF
.getTypes().isZeroInitializable(ILE
->getType()))
1949 return CGF
.getContext().getTypeSizeInChars(E
->getType());
1951 // InitListExprs for structs have to be handled carefully. If there are
1952 // reference members, we need to consider the size of the reference, not the
1953 // referencee. InitListExprs for unions and arrays can't have references.
1954 if (const RecordType
*RT
= E
->getType()->getAs
<RecordType
>()) {
1955 if (!RT
->isUnionType()) {
1956 RecordDecl
*SD
= RT
->getDecl();
1957 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1959 unsigned ILEElement
= 0;
1960 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(SD
))
1961 while (ILEElement
!= CXXRD
->getNumBases())
1963 GetNumNonZeroBytesInInit(ILE
->getInit(ILEElement
++), CGF
);
1964 for (const auto *Field
: SD
->fields()) {
1965 // We're done once we hit the flexible array member or run out of
1966 // InitListExpr elements.
1967 if (Field
->getType()->isIncompleteArrayType() ||
1968 ILEElement
== ILE
->getNumInits())
1970 if (Field
->isUnnamedBitfield())
1973 const Expr
*E
= ILE
->getInit(ILEElement
++);
1975 // Reference values are always non-null and have the width of a pointer.
1976 if (Field
->getType()->isReferenceType())
1977 NumNonZeroBytes
+= CGF
.getContext().toCharUnitsFromBits(
1978 CGF
.getTarget().getPointerWidth(LangAS::Default
));
1980 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(E
, CGF
);
1983 return NumNonZeroBytes
;
1987 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1988 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1989 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1990 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(ILE
->getInit(i
), CGF
);
1991 return NumNonZeroBytes
;
1994 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1995 /// zeros in it, emit a memset and avoid storing the individual zeros.
1997 static void CheckAggExprForMemSetUse(AggValueSlot
&Slot
, const Expr
*E
,
1998 CodeGenFunction
&CGF
) {
1999 // If the slot is already known to be zeroed, nothing to do. Don't mess with
2001 if (Slot
.isZeroed() || Slot
.isVolatile() || !Slot
.getAddress().isValid())
2004 // C++ objects with a user-declared constructor don't need zero'ing.
2005 if (CGF
.getLangOpts().CPlusPlus
)
2006 if (const RecordType
*RT
= CGF
.getContext()
2007 .getBaseElementType(E
->getType())->getAs
<RecordType
>()) {
2008 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
2009 if (RD
->hasUserDeclaredConstructor())
2013 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2014 CharUnits Size
= Slot
.getPreferredSize(CGF
.getContext(), E
->getType());
2015 if (Size
<= CharUnits::fromQuantity(16))
2018 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2019 // we prefer to emit memset + individual stores for the rest.
2020 CharUnits NumNonZeroBytes
= GetNumNonZeroBytesInInit(E
, CGF
);
2021 if (NumNonZeroBytes
*4 > Size
)
2024 // Okay, it seems like a good idea to use an initial memset, emit the call.
2025 llvm::Constant
*SizeVal
= CGF
.Builder
.getInt64(Size
.getQuantity());
2027 Address Loc
= Slot
.getAddress();
2028 Loc
= CGF
.Builder
.CreateElementBitCast(Loc
, CGF
.Int8Ty
);
2029 CGF
.Builder
.CreateMemSet(Loc
, CGF
.Builder
.getInt8(0), SizeVal
, false);
2031 // Tell the AggExprEmitter that the slot is known zero.
2038 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
2039 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
2040 /// the value of the aggregate expression is not needed. If VolatileDest is
2041 /// true, DestPtr cannot be 0.
2042 void CodeGenFunction::EmitAggExpr(const Expr
*E
, AggValueSlot Slot
) {
2043 assert(E
&& hasAggregateEvaluationKind(E
->getType()) &&
2044 "Invalid aggregate expression to emit");
2045 assert((Slot
.getAddress().isValid() || Slot
.isIgnored()) &&
2046 "slot has bits but no address");
2048 // Optimize the slot if possible.
2049 CheckAggExprForMemSetUse(Slot
, E
, *this);
2051 AggExprEmitter(*this, Slot
, Slot
.isIgnored()).Visit(const_cast<Expr
*>(E
));
2054 LValue
CodeGenFunction::EmitAggExprToLValue(const Expr
*E
) {
2055 assert(hasAggregateEvaluationKind(E
->getType()) && "Invalid argument!");
2056 Address Temp
= CreateMemTemp(E
->getType());
2057 LValue LV
= MakeAddrLValue(Temp
, E
->getType());
2058 EmitAggExpr(E
, AggValueSlot::forLValue(
2059 LV
, *this, AggValueSlot::IsNotDestructed
,
2060 AggValueSlot::DoesNotNeedGCBarriers
,
2061 AggValueSlot::IsNotAliased
, AggValueSlot::DoesNotOverlap
));
2065 AggValueSlot::Overlap_t
2066 CodeGenFunction::getOverlapForFieldInit(const FieldDecl
*FD
) {
2067 if (!FD
->hasAttr
<NoUniqueAddressAttr
>() || !FD
->getType()->isRecordType())
2068 return AggValueSlot::DoesNotOverlap
;
2070 // If the field lies entirely within the enclosing class's nvsize, its tail
2071 // padding cannot overlap any already-initialized object. (The only subobjects
2072 // with greater addresses that might already be initialized are vbases.)
2073 const RecordDecl
*ClassRD
= FD
->getParent();
2074 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(ClassRD
);
2075 if (Layout
.getFieldOffset(FD
->getFieldIndex()) +
2076 getContext().getTypeSize(FD
->getType()) <=
2077 (uint64_t)getContext().toBits(Layout
.getNonVirtualSize()))
2078 return AggValueSlot::DoesNotOverlap
;
2080 // The tail padding may contain values we need to preserve.
2081 return AggValueSlot::MayOverlap
;
2084 AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForBaseInit(
2085 const CXXRecordDecl
*RD
, const CXXRecordDecl
*BaseRD
, bool IsVirtual
) {
2086 // If the most-derived object is a field declared with [[no_unique_address]],
2087 // the tail padding of any virtual base could be reused for other subobjects
2088 // of that field's class.
2090 return AggValueSlot::MayOverlap
;
2092 // If the base class is laid out entirely within the nvsize of the derived
2093 // class, its tail padding cannot yet be initialized, so we can issue
2094 // stores at the full width of the base class.
2095 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(RD
);
2096 if (Layout
.getBaseClassOffset(BaseRD
) +
2097 getContext().getASTRecordLayout(BaseRD
).getSize() <=
2098 Layout
.getNonVirtualSize())
2099 return AggValueSlot::DoesNotOverlap
;
2101 // The tail padding may contain values we need to preserve.
2102 return AggValueSlot::MayOverlap
;
2105 void CodeGenFunction::EmitAggregateCopy(LValue Dest
, LValue Src
, QualType Ty
,
2106 AggValueSlot::Overlap_t MayOverlap
,
2108 assert(!Ty
->isAnyComplexType() && "Shouldn't happen for complex");
2110 Address DestPtr
= Dest
.getAddress(*this);
2111 Address SrcPtr
= Src
.getAddress(*this);
2113 if (getLangOpts().CPlusPlus
) {
2114 if (const RecordType
*RT
= Ty
->getAs
<RecordType
>()) {
2115 CXXRecordDecl
*Record
= cast
<CXXRecordDecl
>(RT
->getDecl());
2116 assert((Record
->hasTrivialCopyConstructor() ||
2117 Record
->hasTrivialCopyAssignment() ||
2118 Record
->hasTrivialMoveConstructor() ||
2119 Record
->hasTrivialMoveAssignment() ||
2120 Record
->hasAttr
<TrivialABIAttr
>() || Record
->isUnion()) &&
2121 "Trying to aggregate-copy a type without a trivial copy/move "
2122 "constructor or assignment operator");
2123 // Ignore empty classes in C++.
2124 if (Record
->isEmpty())
2129 if (getLangOpts().CUDAIsDevice
) {
2130 if (Ty
->isCUDADeviceBuiltinSurfaceType()) {
2131 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest
,
2134 } else if (Ty
->isCUDADeviceBuiltinTextureType()) {
2135 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest
,
2141 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2142 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2143 // read from another object that overlaps in anyway the storage of the first
2144 // object, then the overlap shall be exact and the two objects shall have
2145 // qualified or unqualified versions of a compatible type."
2147 // memcpy is not defined if the source and destination pointers are exactly
2148 // equal, but other compilers do this optimization, and almost every memcpy
2149 // implementation handles this case safely. If there is a libc that does not
2150 // safely handle this, we can add a target hook.
2152 // Get data size info for this aggregate. Don't copy the tail padding if this
2153 // might be a potentially-overlapping subobject, since the tail padding might
2154 // be occupied by a different object. Otherwise, copying it is fine.
2155 TypeInfoChars TypeInfo
;
2157 TypeInfo
= getContext().getTypeInfoDataSizeInChars(Ty
);
2159 TypeInfo
= getContext().getTypeInfoInChars(Ty
);
2161 llvm::Value
*SizeVal
= nullptr;
2162 if (TypeInfo
.Width
.isZero()) {
2163 // But note that getTypeInfo returns 0 for a VLA.
2164 if (auto *VAT
= dyn_cast_or_null
<VariableArrayType
>(
2165 getContext().getAsArrayType(Ty
))) {
2167 SizeVal
= emitArrayLength(VAT
, BaseEltTy
, DestPtr
);
2168 TypeInfo
= getContext().getTypeInfoInChars(BaseEltTy
);
2169 assert(!TypeInfo
.Width
.isZero());
2170 SizeVal
= Builder
.CreateNUWMul(
2172 llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity()));
2176 SizeVal
= llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity());
2179 // FIXME: If we have a volatile struct, the optimizer can remove what might
2180 // appear to be `extra' memory ops:
2182 // volatile struct { int i; } a, b;
2189 // we need to use a different call here. We use isVolatile to indicate when
2190 // either the source or the destination is volatile.
2192 DestPtr
= Builder
.CreateElementBitCast(DestPtr
, Int8Ty
);
2193 SrcPtr
= Builder
.CreateElementBitCast(SrcPtr
, Int8Ty
);
2195 // Don't do any of the memmove_collectable tests if GC isn't set.
2196 if (CGM
.getLangOpts().getGC() == LangOptions::NonGC
) {
2198 } else if (const RecordType
*RecordTy
= Ty
->getAs
<RecordType
>()) {
2199 RecordDecl
*Record
= RecordTy
->getDecl();
2200 if (Record
->hasObjectMember()) {
2201 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2205 } else if (Ty
->isArrayType()) {
2206 QualType BaseType
= getContext().getBaseElementType(Ty
);
2207 if (const RecordType
*RecordTy
= BaseType
->getAs
<RecordType
>()) {
2208 if (RecordTy
->getDecl()->hasObjectMember()) {
2209 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2216 auto Inst
= Builder
.CreateMemCpy(DestPtr
, SrcPtr
, SizeVal
, isVolatile
);
2218 // Determine the metadata to describe the position of any padding in this
2219 // memcpy, as well as the TBAA tags for the members of the struct, in case
2220 // the optimizer wishes to expand it in to scalar memory operations.
2221 if (llvm::MDNode
*TBAAStructTag
= CGM
.getTBAAStructInfo(Ty
))
2222 Inst
->setMetadata(llvm::LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
2224 if (CGM
.getCodeGenOpts().NewStructPathTBAA
) {
2225 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForMemoryTransfer(
2226 Dest
.getTBAAInfo(), Src
.getTBAAInfo());
2227 CGM
.DecorateInstructionWithTBAA(Inst
, TBAAInfo
);