1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/DeclTemplate.h"
23 #include "clang/AST/StmtVisitor.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Intrinsics.h"
29 using namespace clang
;
30 using namespace CodeGen
;
32 //===----------------------------------------------------------------------===//
33 // Aggregate Expression Emitter
34 //===----------------------------------------------------------------------===//
37 class AggExprEmitter
: public StmtVisitor
<AggExprEmitter
> {
43 AggValueSlot
EnsureSlot(QualType T
) {
44 if (!Dest
.isIgnored()) return Dest
;
45 return CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
47 void EnsureDest(QualType T
) {
48 if (!Dest
.isIgnored()) return;
49 Dest
= CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
52 // Calls `Fn` with a valid return value slot, potentially creating a temporary
53 // to do so. If a temporary is created, an appropriate copy into `Dest` will
54 // be emitted, as will lifetime markers.
56 // The given function should take a ReturnValueSlot, and return an RValue that
57 // points to said slot.
58 void withReturnValueSlot(const Expr
*E
,
59 llvm::function_ref
<RValue(ReturnValueSlot
)> Fn
);
62 AggExprEmitter(CodeGenFunction
&cgf
, AggValueSlot Dest
, bool IsResultUnused
)
63 : CGF(cgf
), Builder(CGF
.Builder
), Dest(Dest
),
64 IsResultUnused(IsResultUnused
) { }
66 //===--------------------------------------------------------------------===//
68 //===--------------------------------------------------------------------===//
70 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71 /// represents a value lvalue, this method emits the address of the lvalue,
72 /// then loads the result into DestPtr.
73 void EmitAggLoadOfLValue(const Expr
*E
);
80 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
81 /// SrcIsRValue is true if source comes from an RValue.
82 void EmitFinalDestCopy(QualType type
, const LValue
&src
,
83 ExprValueKind SrcValueKind
= EVK_NonRValue
);
84 void EmitFinalDestCopy(QualType type
, RValue src
);
85 void EmitCopy(QualType type
, const AggValueSlot
&dest
,
86 const AggValueSlot
&src
);
88 void EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
, QualType ArrayQTy
,
89 Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
92 AggValueSlot::NeedsGCBarriers_t
needsGC(QualType T
) {
93 if (CGF
.getLangOpts().getGC() && TypeRequiresGCollection(T
))
94 return AggValueSlot::NeedsGCBarriers
;
95 return AggValueSlot::DoesNotNeedGCBarriers
;
98 bool TypeRequiresGCollection(QualType T
);
100 //===--------------------------------------------------------------------===//
102 //===--------------------------------------------------------------------===//
104 void Visit(Expr
*E
) {
105 ApplyDebugLocation
DL(CGF
, E
);
106 StmtVisitor
<AggExprEmitter
>::Visit(E
);
109 void VisitStmt(Stmt
*S
) {
110 CGF
.ErrorUnsupported(S
, "aggregate expression");
112 void VisitParenExpr(ParenExpr
*PE
) { Visit(PE
->getSubExpr()); }
113 void VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
114 Visit(GE
->getResultExpr());
116 void VisitCoawaitExpr(CoawaitExpr
*E
) {
117 CGF
.EmitCoawaitExpr(*E
, Dest
, IsResultUnused
);
119 void VisitCoyieldExpr(CoyieldExpr
*E
) {
120 CGF
.EmitCoyieldExpr(*E
, Dest
, IsResultUnused
);
122 void VisitUnaryCoawait(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
123 void VisitUnaryExtension(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
124 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
125 return Visit(E
->getReplacement());
128 void VisitConstantExpr(ConstantExpr
*E
) {
129 EnsureDest(E
->getType());
131 if (llvm::Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
132 Address StoreDest
= Dest
.getAddress();
133 // The emitted value is guaranteed to have the same size as the
134 // destination but can have a different type. Just do a bitcast in this
135 // case to avoid incorrect GEPs.
136 if (Result
->getType() != StoreDest
.getType())
137 StoreDest
= StoreDest
.withElementType(Result
->getType());
139 CGF
.EmitAggregateStore(Result
, StoreDest
,
140 E
->getType().isVolatileQualified());
143 return Visit(E
->getSubExpr());
147 void VisitDeclRefExpr(DeclRefExpr
*E
) { EmitAggLoadOfLValue(E
); }
148 void VisitMemberExpr(MemberExpr
*ME
) { EmitAggLoadOfLValue(ME
); }
149 void VisitUnaryDeref(UnaryOperator
*E
) { EmitAggLoadOfLValue(E
); }
150 void VisitStringLiteral(StringLiteral
*E
) { EmitAggLoadOfLValue(E
); }
151 void VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
);
152 void VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
153 EmitAggLoadOfLValue(E
);
155 void VisitPredefinedExpr(const PredefinedExpr
*E
) {
156 EmitAggLoadOfLValue(E
);
160 void VisitCastExpr(CastExpr
*E
);
161 void VisitCallExpr(const CallExpr
*E
);
162 void VisitStmtExpr(const StmtExpr
*E
);
163 void VisitBinaryOperator(const BinaryOperator
*BO
);
164 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator
*BO
);
165 void VisitBinAssign(const BinaryOperator
*E
);
166 void VisitBinComma(const BinaryOperator
*E
);
167 void VisitBinCmp(const BinaryOperator
*E
);
168 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
169 Visit(E
->getSemanticForm());
172 void VisitObjCMessageExpr(ObjCMessageExpr
*E
);
173 void VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
174 EmitAggLoadOfLValue(E
);
177 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
);
178 void VisitAbstractConditionalOperator(const AbstractConditionalOperator
*CO
);
179 void VisitChooseExpr(const ChooseExpr
*CE
);
180 void VisitInitListExpr(InitListExpr
*E
);
181 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
182 FieldDecl
*InitializedFieldInUnion
,
184 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
185 llvm::Value
*outerBegin
= nullptr);
186 void VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
);
187 void VisitNoInitExpr(NoInitExpr
*E
) { } // Do nothing.
188 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
189 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
190 Visit(DAE
->getExpr());
192 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
193 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
194 Visit(DIE
->getExpr());
196 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
);
197 void VisitCXXConstructExpr(const CXXConstructExpr
*E
);
198 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr
*E
);
199 void VisitLambdaExpr(LambdaExpr
*E
);
200 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
);
201 void VisitExprWithCleanups(ExprWithCleanups
*E
);
202 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
);
203 void VisitCXXTypeidExpr(CXXTypeidExpr
*E
) { EmitAggLoadOfLValue(E
); }
204 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
);
205 void VisitOpaqueValueExpr(OpaqueValueExpr
*E
);
207 void VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
208 if (E
->isGLValue()) {
209 LValue LV
= CGF
.EmitPseudoObjectLValue(E
);
210 return EmitFinalDestCopy(E
->getType(), LV
);
213 AggValueSlot Slot
= EnsureSlot(E
->getType());
214 bool NeedsDestruction
=
215 !Slot
.isExternallyDestructed() &&
216 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
217 if (NeedsDestruction
)
218 Slot
.setExternallyDestructed();
219 CGF
.EmitPseudoObjectRValue(E
, Slot
);
220 if (NeedsDestruction
)
221 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Slot
.getAddress(),
225 void VisitVAArgExpr(VAArgExpr
*E
);
226 void VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
);
227 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
230 void EmitInitializationToLValue(Expr
*E
, LValue Address
);
231 void EmitNullInitializationToLValue(LValue Address
);
232 // case Expr::ChooseExprClass:
233 void VisitCXXThrowExpr(const CXXThrowExpr
*E
) { CGF
.EmitCXXThrowExpr(E
); }
234 void VisitAtomicExpr(AtomicExpr
*E
) {
235 RValue Res
= CGF
.EmitAtomicExpr(E
);
236 EmitFinalDestCopy(E
->getType(), Res
);
239 } // end anonymous namespace.
241 //===----------------------------------------------------------------------===//
243 //===----------------------------------------------------------------------===//
245 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
246 /// represents a value lvalue, this method emits the address of the lvalue,
247 /// then loads the result into DestPtr.
248 void AggExprEmitter::EmitAggLoadOfLValue(const Expr
*E
) {
249 LValue LV
= CGF
.EmitLValue(E
);
251 // If the type of the l-value is atomic, then do an atomic load.
252 if (LV
.getType()->isAtomicType() || CGF
.LValueIsSuitableForInlineAtomic(LV
)) {
253 CGF
.EmitAtomicLoad(LV
, E
->getExprLoc(), Dest
);
257 EmitFinalDestCopy(E
->getType(), LV
);
260 /// True if the given aggregate type requires special GC API calls.
261 bool AggExprEmitter::TypeRequiresGCollection(QualType T
) {
262 // Only record types have members that might require garbage collection.
263 const RecordType
*RecordTy
= T
->getAs
<RecordType
>();
264 if (!RecordTy
) return false;
266 // Don't mess with non-trivial C++ types.
267 RecordDecl
*Record
= RecordTy
->getDecl();
268 if (isa
<CXXRecordDecl
>(Record
) &&
269 (cast
<CXXRecordDecl
>(Record
)->hasNonTrivialCopyConstructor() ||
270 !cast
<CXXRecordDecl
>(Record
)->hasTrivialDestructor()))
273 // Check whether the type has an object member.
274 return Record
->hasObjectMember();
277 void AggExprEmitter::withReturnValueSlot(
278 const Expr
*E
, llvm::function_ref
<RValue(ReturnValueSlot
)> EmitCall
) {
279 QualType RetTy
= E
->getType();
280 bool RequiresDestruction
=
281 !Dest
.isExternallyDestructed() &&
282 RetTy
.isDestructedType() == QualType::DK_nontrivial_c_struct
;
284 // If it makes no observable difference, save a memcpy + temporary.
286 // We need to always provide our own temporary if destruction is required.
287 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
288 // its lifetime before we have the chance to emit a proper destructor call.
289 bool UseTemp
= Dest
.isPotentiallyAliased() || Dest
.requiresGCollection() ||
290 (RequiresDestruction
&& !Dest
.getAddress().isValid());
292 Address RetAddr
= Address::invalid();
293 Address RetAllocaAddr
= Address::invalid();
295 EHScopeStack::stable_iterator LifetimeEndBlock
;
296 llvm::Value
*LifetimeSizePtr
= nullptr;
297 llvm::IntrinsicInst
*LifetimeStartInst
= nullptr;
299 RetAddr
= Dest
.getAddress();
301 RetAddr
= CGF
.CreateMemTemp(RetTy
, "tmp", &RetAllocaAddr
);
302 llvm::TypeSize Size
=
303 CGF
.CGM
.getDataLayout().getTypeAllocSize(CGF
.ConvertTypeForMem(RetTy
));
304 LifetimeSizePtr
= CGF
.EmitLifetimeStart(Size
, RetAllocaAddr
.getPointer());
305 if (LifetimeSizePtr
) {
307 cast
<llvm::IntrinsicInst
>(std::prev(Builder
.GetInsertPoint()));
308 assert(LifetimeStartInst
->getIntrinsicID() ==
309 llvm::Intrinsic::lifetime_start
&&
310 "Last insertion wasn't a lifetime.start?");
312 CGF
.pushFullExprCleanup
<CodeGenFunction::CallLifetimeEnd
>(
313 NormalEHLifetimeMarker
, RetAllocaAddr
, LifetimeSizePtr
);
314 LifetimeEndBlock
= CGF
.EHStack
.stable_begin();
319 EmitCall(ReturnValueSlot(RetAddr
, Dest
.isVolatile(), IsResultUnused
,
320 Dest
.isExternallyDestructed()));
325 assert(Dest
.isIgnored() || Dest
.getPointer() != Src
.getAggregatePointer());
326 EmitFinalDestCopy(E
->getType(), Src
);
328 if (!RequiresDestruction
&& LifetimeStartInst
) {
329 // If there's no dtor to run, the copy was the last use of our temporary.
330 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
332 CGF
.DeactivateCleanupBlock(LifetimeEndBlock
, LifetimeStartInst
);
333 CGF
.EmitLifetimeEnd(LifetimeSizePtr
, RetAllocaAddr
.getPointer());
337 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
338 void AggExprEmitter::EmitFinalDestCopy(QualType type
, RValue src
) {
339 assert(src
.isAggregate() && "value must be aggregate value!");
340 LValue srcLV
= CGF
.MakeAddrLValue(src
.getAggregateAddress(), type
);
341 EmitFinalDestCopy(type
, srcLV
, EVK_RValue
);
344 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
345 void AggExprEmitter::EmitFinalDestCopy(QualType type
, const LValue
&src
,
346 ExprValueKind SrcValueKind
) {
347 // If Dest is ignored, then we're evaluating an aggregate expression
348 // in a context that doesn't care about the result. Note that loads
349 // from volatile l-values force the existence of a non-ignored
351 if (Dest
.isIgnored())
354 // Copy non-trivial C structs here.
355 LValue DstLV
= CGF
.MakeAddrLValue(
356 Dest
.getAddress(), Dest
.isVolatile() ? type
.withVolatile() : type
);
358 if (SrcValueKind
== EVK_RValue
) {
359 if (type
.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct
) {
360 if (Dest
.isPotentiallyAliased())
361 CGF
.callCStructMoveAssignmentOperator(DstLV
, src
);
363 CGF
.callCStructMoveConstructor(DstLV
, src
);
367 if (type
.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct
) {
368 if (Dest
.isPotentiallyAliased())
369 CGF
.callCStructCopyAssignmentOperator(DstLV
, src
);
371 CGF
.callCStructCopyConstructor(DstLV
, src
);
376 AggValueSlot srcAgg
= AggValueSlot::forLValue(
377 src
, CGF
, AggValueSlot::IsDestructed
, needsGC(type
),
378 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
379 EmitCopy(type
, Dest
, srcAgg
);
382 /// Perform a copy from the source into the destination.
384 /// \param type - the type of the aggregate being copied; qualifiers are
386 void AggExprEmitter::EmitCopy(QualType type
, const AggValueSlot
&dest
,
387 const AggValueSlot
&src
) {
388 if (dest
.requiresGCollection()) {
389 CharUnits sz
= dest
.getPreferredSize(CGF
.getContext(), type
);
390 llvm::Value
*size
= llvm::ConstantInt::get(CGF
.SizeTy
, sz
.getQuantity());
391 CGF
.CGM
.getObjCRuntime().EmitGCMemmoveCollectable(CGF
,
398 // If the result of the assignment is used, copy the LHS there also.
399 // It's volatile if either side is. Use the minimum alignment of
401 LValue DestLV
= CGF
.MakeAddrLValue(dest
.getAddress(), type
);
402 LValue SrcLV
= CGF
.MakeAddrLValue(src
.getAddress(), type
);
403 CGF
.EmitAggregateCopy(DestLV
, SrcLV
, type
, dest
.mayOverlap(),
404 dest
.isVolatile() || src
.isVolatile());
407 /// Emit the initializer for a std::initializer_list initialized with a
408 /// real initializer list.
410 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
) {
411 // Emit an array containing the elements. The array is externally destructed
412 // if the std::initializer_list object is.
413 ASTContext
&Ctx
= CGF
.getContext();
414 LValue Array
= CGF
.EmitLValue(E
->getSubExpr());
415 assert(Array
.isSimple() && "initializer_list array not a simple lvalue");
416 Address ArrayPtr
= Array
.getAddress(CGF
);
418 const ConstantArrayType
*ArrayType
=
419 Ctx
.getAsConstantArrayType(E
->getSubExpr()->getType());
420 assert(ArrayType
&& "std::initializer_list constructed from non-array");
422 // FIXME: Perform the checks on the field types in SemaInit.
423 RecordDecl
*Record
= E
->getType()->castAs
<RecordType
>()->getDecl();
424 RecordDecl::field_iterator Field
= Record
->field_begin();
425 if (Field
== Record
->field_end()) {
426 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
431 if (!Field
->getType()->isPointerType() ||
432 !Ctx
.hasSameType(Field
->getType()->getPointeeType(),
433 ArrayType
->getElementType())) {
434 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
438 AggValueSlot Dest
= EnsureSlot(E
->getType());
439 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
440 LValue Start
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
441 llvm::Value
*Zero
= llvm::ConstantInt::get(CGF
.PtrDiffTy
, 0);
442 llvm::Value
*IdxStart
[] = { Zero
, Zero
};
443 llvm::Value
*ArrayStart
= Builder
.CreateInBoundsGEP(
444 ArrayPtr
.getElementType(), ArrayPtr
.getPointer(), IdxStart
, "arraystart");
445 CGF
.EmitStoreThroughLValue(RValue::get(ArrayStart
), Start
);
448 if (Field
== Record
->field_end()) {
449 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
453 llvm::Value
*Size
= Builder
.getInt(ArrayType
->getSize());
454 LValue EndOrLength
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
455 if (Field
->getType()->isPointerType() &&
456 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
457 ArrayType
->getElementType())) {
459 llvm::Value
*IdxEnd
[] = { Zero
, Size
};
460 llvm::Value
*ArrayEnd
= Builder
.CreateInBoundsGEP(
461 ArrayPtr
.getElementType(), ArrayPtr
.getPointer(), IdxEnd
, "arrayend");
462 CGF
.EmitStoreThroughLValue(RValue::get(ArrayEnd
), EndOrLength
);
463 } else if (Ctx
.hasSameType(Field
->getType(), Ctx
.getSizeType())) {
465 CGF
.EmitStoreThroughLValue(RValue::get(Size
), EndOrLength
);
467 CGF
.ErrorUnsupported(E
, "weird std::initializer_list");
472 /// Determine if E is a trivial array filler, that is, one that is
473 /// equivalent to zero-initialization.
474 static bool isTrivialFiller(Expr
*E
) {
478 if (isa
<ImplicitValueInitExpr
>(E
))
481 if (auto *ILE
= dyn_cast
<InitListExpr
>(E
)) {
482 if (ILE
->getNumInits())
484 return isTrivialFiller(ILE
->getArrayFiller());
487 if (auto *Cons
= dyn_cast_or_null
<CXXConstructExpr
>(E
))
488 return Cons
->getConstructor()->isDefaultConstructor() &&
489 Cons
->getConstructor()->isTrivial();
491 // FIXME: Are there other cases where we can avoid emitting an initializer?
495 /// Emit initialization of an array from an initializer list. ExprToVisit must
496 /// be either an InitListEpxr a CXXParenInitListExpr.
497 void AggExprEmitter::EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
,
498 QualType ArrayQTy
, Expr
*ExprToVisit
,
499 ArrayRef
<Expr
*> Args
, Expr
*ArrayFiller
) {
500 uint64_t NumInitElements
= Args
.size();
502 uint64_t NumArrayElements
= AType
->getNumElements();
503 assert(NumInitElements
<= NumArrayElements
);
505 QualType elementType
=
506 CGF
.getContext().getAsArrayType(ArrayQTy
)->getElementType();
508 // DestPtr is an array*. Construct an elementType* by drilling
510 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
511 llvm::Value
*indices
[] = { zero
, zero
};
512 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(
513 DestPtr
.getElementType(), DestPtr
.getPointer(), indices
,
516 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
517 CharUnits elementAlign
=
518 DestPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
519 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
521 // Consider initializing the array by copying from a global. For this to be
522 // more efficient than per-element initialization, the size of the elements
523 // with explicit initializers should be large enough.
524 if (NumInitElements
* elementSize
.getQuantity() > 16 &&
525 elementType
.isTriviallyCopyableType(CGF
.getContext())) {
526 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
527 ConstantEmitter
Emitter(CGF
);
528 LangAS AS
= ArrayQTy
.getAddressSpace();
529 if (llvm::Constant
*C
=
530 Emitter
.tryEmitForInitializer(ExprToVisit
, AS
, ArrayQTy
)) {
531 auto GV
= new llvm::GlobalVariable(
532 CGM
.getModule(), C
->getType(),
533 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage
, C
,
535 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal
,
536 CGM
.getContext().getTargetAddressSpace(AS
));
537 Emitter
.finalize(GV
);
538 CharUnits Align
= CGM
.getContext().getTypeAlignInChars(ArrayQTy
);
539 GV
->setAlignment(Align
.getAsAlign());
540 Address
GVAddr(GV
, GV
->getValueType(), Align
);
541 EmitFinalDestCopy(ArrayQTy
, CGF
.MakeAddrLValue(GVAddr
, ArrayQTy
));
546 // Exception safety requires us to destroy all the
547 // already-constructed members if an initializer throws.
548 // For that, we'll need an EH cleanup.
549 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
550 Address endOfInit
= Address::invalid();
551 EHScopeStack::stable_iterator cleanup
;
552 llvm::Instruction
*cleanupDominator
= nullptr;
553 if (CGF
.needsEHCleanup(dtorKind
)) {
554 // In principle we could tell the cleanup where we are more
555 // directly, but the control flow can get so varied here that it
556 // would actually be quite complex. Therefore we go through an
558 endOfInit
= CGF
.CreateTempAlloca(begin
->getType(), CGF
.getPointerAlign(),
559 "arrayinit.endOfInit");
560 cleanupDominator
= Builder
.CreateStore(begin
, endOfInit
);
561 CGF
.pushIrregularPartialArrayCleanup(begin
, endOfInit
, elementType
,
563 CGF
.getDestroyer(dtorKind
));
564 cleanup
= CGF
.EHStack
.stable_begin();
566 // Otherwise, remember that we didn't need a cleanup.
568 dtorKind
= QualType::DK_none
;
571 llvm::Value
*one
= llvm::ConstantInt::get(CGF
.SizeTy
, 1);
573 // The 'current element to initialize'. The invariants on this
574 // variable are complicated. Essentially, after each iteration of
575 // the loop, it points to the last initialized element, except
576 // that it points to the beginning of the array before any
577 // elements have been initialized.
578 llvm::Value
*element
= begin
;
580 // Emit the explicit initializers.
581 for (uint64_t i
= 0; i
!= NumInitElements
; ++i
) {
582 // Advance to the next element.
584 element
= Builder
.CreateInBoundsGEP(
585 llvmElementType
, element
, one
, "arrayinit.element");
587 // Tell the cleanup that it needs to destroy up to this
588 // element. TODO: some of these stores can be trivially
589 // observed to be unnecessary.
590 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
593 LValue elementLV
= CGF
.MakeAddrLValue(
594 Address(element
, llvmElementType
, elementAlign
), elementType
);
595 EmitInitializationToLValue(Args
[i
], elementLV
);
598 // Check whether there's a non-trivial array-fill expression.
599 bool hasTrivialFiller
= isTrivialFiller(ArrayFiller
);
601 // Any remaining elements need to be zero-initialized, possibly
602 // using the filler expression. We can skip this if the we're
603 // emitting to zeroed memory.
604 if (NumInitElements
!= NumArrayElements
&&
605 !(Dest
.isZeroed() && hasTrivialFiller
&&
606 CGF
.getTypes().isZeroInitializable(elementType
))) {
608 // Use an actual loop. This is basically
609 // do { *array++ = filler; } while (array != end);
611 // Advance to the start of the rest of the array.
612 if (NumInitElements
) {
613 element
= Builder
.CreateInBoundsGEP(
614 llvmElementType
, element
, one
, "arrayinit.start");
615 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
618 // Compute the end of the array.
619 llvm::Value
*end
= Builder
.CreateInBoundsGEP(
620 llvmElementType
, begin
,
621 llvm::ConstantInt::get(CGF
.SizeTy
, NumArrayElements
), "arrayinit.end");
623 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
624 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
626 // Jump into the body.
627 CGF
.EmitBlock(bodyBB
);
628 llvm::PHINode
*currentElement
=
629 Builder
.CreatePHI(element
->getType(), 2, "arrayinit.cur");
630 currentElement
->addIncoming(element
, entryBB
);
632 // Emit the actual filler expression.
634 // C++1z [class.temporary]p5:
635 // when a default constructor is called to initialize an element of
636 // an array with no corresponding initializer [...] the destruction of
637 // every temporary created in a default argument is sequenced before
638 // the construction of the next array element, if any
639 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
640 LValue elementLV
= CGF
.MakeAddrLValue(
641 Address(currentElement
, llvmElementType
, elementAlign
), elementType
);
643 EmitInitializationToLValue(ArrayFiller
, elementLV
);
645 EmitNullInitializationToLValue(elementLV
);
648 // Move on to the next element.
649 llvm::Value
*nextElement
= Builder
.CreateInBoundsGEP(
650 llvmElementType
, currentElement
, one
, "arrayinit.next");
652 // Tell the EH cleanup that we finished with the last element.
653 if (endOfInit
.isValid()) Builder
.CreateStore(nextElement
, endOfInit
);
655 // Leave the loop if we're done.
656 llvm::Value
*done
= Builder
.CreateICmpEQ(nextElement
, end
,
658 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
659 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
660 currentElement
->addIncoming(nextElement
, Builder
.GetInsertBlock());
662 CGF
.EmitBlock(endBB
);
665 // Leave the partial-array cleanup if we entered one.
666 if (dtorKind
) CGF
.DeactivateCleanupBlock(cleanup
, cleanupDominator
);
669 //===----------------------------------------------------------------------===//
671 //===----------------------------------------------------------------------===//
673 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
){
674 Visit(E
->getSubExpr());
677 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr
*e
) {
678 // If this is a unique OVE, just visit its source expression.
680 Visit(e
->getSourceExpr());
682 EmitFinalDestCopy(e
->getType(), CGF
.getOrCreateOpaqueLValueMapping(e
));
686 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
687 if (Dest
.isPotentiallyAliased() &&
688 E
->getType().isPODType(CGF
.getContext())) {
689 // For a POD type, just emit a load of the lvalue + a copy, because our
690 // compound literal might alias the destination.
691 EmitAggLoadOfLValue(E
);
695 AggValueSlot Slot
= EnsureSlot(E
->getType());
697 // Block-scope compound literals are destroyed at the end of the enclosing
700 !CGF
.getLangOpts().CPlusPlus
&& !Slot
.isExternallyDestructed();
702 Slot
.setExternallyDestructed();
704 CGF
.EmitAggExpr(E
->getInitializer(), Slot
);
707 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
708 CGF
.pushLifetimeExtendedDestroy(
709 CGF
.getCleanupKind(DtorKind
), Slot
.getAddress(), E
->getType(),
710 CGF
.getDestroyer(DtorKind
), DtorKind
& EHCleanup
);
713 /// Attempt to look through various unimportant expressions to find a
714 /// cast of the given kind.
715 static Expr
*findPeephole(Expr
*op
, CastKind kind
, const ASTContext
&ctx
) {
716 op
= op
->IgnoreParenNoopCasts(ctx
);
717 if (auto castE
= dyn_cast
<CastExpr
>(op
)) {
718 if (castE
->getCastKind() == kind
)
719 return castE
->getSubExpr();
724 void AggExprEmitter::VisitCastExpr(CastExpr
*E
) {
725 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(E
))
726 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
727 switch (E
->getCastKind()) {
729 // FIXME: Can this actually happen? We have no test coverage for it.
730 assert(isa
<CXXDynamicCastExpr
>(E
) && "CK_Dynamic without a dynamic_cast?");
731 LValue LV
= CGF
.EmitCheckedLValue(E
->getSubExpr(),
732 CodeGenFunction::TCK_Load
);
733 // FIXME: Do we also need to handle property references here?
735 CGF
.EmitDynamicCast(LV
.getAddress(CGF
), cast
<CXXDynamicCastExpr
>(E
));
737 CGF
.CGM
.ErrorUnsupported(E
, "non-simple lvalue dynamic_cast");
739 if (!Dest
.isIgnored())
740 CGF
.CGM
.ErrorUnsupported(E
, "lvalue dynamic_cast with a destination");
745 // Evaluate even if the destination is ignored.
746 if (Dest
.isIgnored()) {
747 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
748 /*ignoreResult=*/true);
752 // GCC union extension
753 QualType Ty
= E
->getSubExpr()->getType();
754 Address CastPtr
= Dest
.getAddress().withElementType(CGF
.ConvertType(Ty
));
755 EmitInitializationToLValue(E
->getSubExpr(),
756 CGF
.MakeAddrLValue(CastPtr
, Ty
));
760 case CK_LValueToRValueBitCast
: {
761 if (Dest
.isIgnored()) {
762 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
763 /*ignoreResult=*/true);
767 LValue SourceLV
= CGF
.EmitLValue(E
->getSubExpr());
768 Address SourceAddress
=
769 SourceLV
.getAddress(CGF
).withElementType(CGF
.Int8Ty
);
770 Address DestAddress
= Dest
.getAddress().withElementType(CGF
.Int8Ty
);
771 llvm::Value
*SizeVal
= llvm::ConstantInt::get(
773 CGF
.getContext().getTypeSizeInChars(E
->getType()).getQuantity());
774 Builder
.CreateMemCpy(DestAddress
, SourceAddress
, SizeVal
);
778 case CK_DerivedToBase
:
779 case CK_BaseToDerived
:
780 case CK_UncheckedDerivedToBase
: {
781 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
782 "should have been unpacked before we got here");
785 case CK_NonAtomicToAtomic
:
786 case CK_AtomicToNonAtomic
: {
787 bool isToAtomic
= (E
->getCastKind() == CK_NonAtomicToAtomic
);
789 // Determine the atomic and value types.
790 QualType atomicType
= E
->getSubExpr()->getType();
791 QualType valueType
= E
->getType();
792 if (isToAtomic
) std::swap(atomicType
, valueType
);
794 assert(atomicType
->isAtomicType());
795 assert(CGF
.getContext().hasSameUnqualifiedType(valueType
,
796 atomicType
->castAs
<AtomicType
>()->getValueType()));
798 // Just recurse normally if we're ignoring the result or the
799 // atomic type doesn't change representation.
800 if (Dest
.isIgnored() || !CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
801 return Visit(E
->getSubExpr());
804 CastKind peepholeTarget
=
805 (isToAtomic
? CK_AtomicToNonAtomic
: CK_NonAtomicToAtomic
);
807 // These two cases are reverses of each other; try to peephole them.
809 findPeephole(E
->getSubExpr(), peepholeTarget
, CGF
.getContext())) {
810 assert(CGF
.getContext().hasSameUnqualifiedType(op
->getType(),
812 "peephole significantly changed types?");
816 // If we're converting an r-value of non-atomic type to an r-value
817 // of atomic type, just emit directly into the relevant sub-object.
819 AggValueSlot valueDest
= Dest
;
820 if (!valueDest
.isIgnored() && CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
821 // Zero-initialize. (Strictly speaking, we only need to initialize
822 // the padding at the end, but this is simpler.)
823 if (!Dest
.isZeroed())
824 CGF
.EmitNullInitialization(Dest
.getAddress(), atomicType
);
826 // Build a GEP to refer to the subobject.
828 CGF
.Builder
.CreateStructGEP(valueDest
.getAddress(), 0);
829 valueDest
= AggValueSlot::forAddr(valueAddr
,
830 valueDest
.getQualifiers(),
831 valueDest
.isExternallyDestructed(),
832 valueDest
.requiresGCollection(),
833 valueDest
.isPotentiallyAliased(),
834 AggValueSlot::DoesNotOverlap
,
835 AggValueSlot::IsZeroed
);
838 CGF
.EmitAggExpr(E
->getSubExpr(), valueDest
);
842 // Otherwise, we're converting an atomic type to a non-atomic type.
843 // Make an atomic temporary, emit into that, and then copy the value out.
844 AggValueSlot atomicSlot
=
845 CGF
.CreateAggTemp(atomicType
, "atomic-to-nonatomic.temp");
846 CGF
.EmitAggExpr(E
->getSubExpr(), atomicSlot
);
848 Address valueAddr
= Builder
.CreateStructGEP(atomicSlot
.getAddress(), 0);
849 RValue rvalue
= RValue::getAggregate(valueAddr
, atomicSlot
.isVolatile());
850 return EmitFinalDestCopy(valueType
, rvalue
);
852 case CK_AddressSpaceConversion
:
853 return Visit(E
->getSubExpr());
855 case CK_LValueToRValue
:
856 // If we're loading from a volatile type, force the destination
858 if (E
->getSubExpr()->getType().isVolatileQualified()) {
860 !Dest
.isExternallyDestructed() &&
861 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
863 Dest
.setExternallyDestructed();
864 EnsureDest(E
->getType());
865 Visit(E
->getSubExpr());
868 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
878 case CK_UserDefinedConversion
:
879 case CK_ConstructorConversion
:
880 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getSubExpr()->getType(),
882 "Implicit cast types must be compatible");
883 Visit(E
->getSubExpr());
886 case CK_LValueBitCast
:
887 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
891 case CK_ArrayToPointerDecay
:
892 case CK_FunctionToPointerDecay
:
893 case CK_NullToPointer
:
894 case CK_NullToMemberPointer
:
895 case CK_BaseToDerivedMemberPointer
:
896 case CK_DerivedToBaseMemberPointer
:
897 case CK_MemberPointerToBoolean
:
898 case CK_ReinterpretMemberPointer
:
899 case CK_IntegralToPointer
:
900 case CK_PointerToIntegral
:
901 case CK_PointerToBoolean
:
904 case CK_IntegralCast
:
905 case CK_BooleanToSignedIntegral
:
906 case CK_IntegralToBoolean
:
907 case CK_IntegralToFloating
:
908 case CK_FloatingToIntegral
:
909 case CK_FloatingToBoolean
:
910 case CK_FloatingCast
:
911 case CK_CPointerToObjCPointerCast
:
912 case CK_BlockPointerToObjCPointerCast
:
913 case CK_AnyPointerToBlockPointerCast
:
914 case CK_ObjCObjectLValueCast
:
915 case CK_FloatingRealToComplex
:
916 case CK_FloatingComplexToReal
:
917 case CK_FloatingComplexToBoolean
:
918 case CK_FloatingComplexCast
:
919 case CK_FloatingComplexToIntegralComplex
:
920 case CK_IntegralRealToComplex
:
921 case CK_IntegralComplexToReal
:
922 case CK_IntegralComplexToBoolean
:
923 case CK_IntegralComplexCast
:
924 case CK_IntegralComplexToFloatingComplex
:
925 case CK_ARCProduceObject
:
926 case CK_ARCConsumeObject
:
927 case CK_ARCReclaimReturnedObject
:
928 case CK_ARCExtendBlockObject
:
929 case CK_CopyAndAutoreleaseBlockObject
:
930 case CK_BuiltinFnToFnPtr
:
931 case CK_ZeroToOCLOpaqueType
:
934 case CK_IntToOCLSampler
:
935 case CK_FloatingToFixedPoint
:
936 case CK_FixedPointToFloating
:
937 case CK_FixedPointCast
:
938 case CK_FixedPointToBoolean
:
939 case CK_FixedPointToIntegral
:
940 case CK_IntegralToFixedPoint
:
941 llvm_unreachable("cast kind invalid for aggregate types");
945 void AggExprEmitter::VisitCallExpr(const CallExpr
*E
) {
946 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType()) {
947 EmitAggLoadOfLValue(E
);
951 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
952 return CGF
.EmitCallExpr(E
, Slot
);
956 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
957 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
958 return CGF
.EmitObjCMessageExpr(E
, Slot
);
962 void AggExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
963 CGF
.EmitIgnoredExpr(E
->getLHS());
967 void AggExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
968 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
969 CGF
.EmitCompoundStmt(*E
->getSubStmt(), true, Dest
);
978 static llvm::Value
*EmitCompare(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
979 const BinaryOperator
*E
, llvm::Value
*LHS
,
980 llvm::Value
*RHS
, CompareKind Kind
,
981 const char *NameSuffix
= "") {
982 QualType ArgTy
= E
->getLHS()->getType();
983 if (const ComplexType
*CT
= ArgTy
->getAs
<ComplexType
>())
984 ArgTy
= CT
->getElementType();
986 if (const auto *MPT
= ArgTy
->getAs
<MemberPointerType
>()) {
987 assert(Kind
== CK_Equal
&&
988 "member pointers may only be compared for equality");
989 return CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
990 CGF
, LHS
, RHS
, MPT
, /*IsInequality*/ false);
993 // Compute the comparison instructions for the specified comparison kind.
996 llvm::CmpInst::Predicate FCmp
;
997 llvm::CmpInst::Predicate SCmp
;
998 llvm::CmpInst::Predicate UCmp
;
1000 CmpInstInfo InstInfo
= [&]() -> CmpInstInfo
{
1001 using FI
= llvm::FCmpInst
;
1002 using II
= llvm::ICmpInst
;
1005 return {"cmp.lt", FI::FCMP_OLT
, II::ICMP_SLT
, II::ICMP_ULT
};
1007 return {"cmp.gt", FI::FCMP_OGT
, II::ICMP_SGT
, II::ICMP_UGT
};
1009 return {"cmp.eq", FI::FCMP_OEQ
, II::ICMP_EQ
, II::ICMP_EQ
};
1011 llvm_unreachable("Unrecognised CompareKind enum");
1014 if (ArgTy
->hasFloatingRepresentation())
1015 return Builder
.CreateFCmp(InstInfo
.FCmp
, LHS
, RHS
,
1016 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1017 if (ArgTy
->isIntegralOrEnumerationType() || ArgTy
->isPointerType()) {
1019 ArgTy
->hasSignedIntegerRepresentation() ? InstInfo
.SCmp
: InstInfo
.UCmp
;
1020 return Builder
.CreateICmp(Inst
, LHS
, RHS
,
1021 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1024 llvm_unreachable("unsupported aggregate binary expression should have "
1025 "already been handled");
1028 void AggExprEmitter::VisitBinCmp(const BinaryOperator
*E
) {
1029 using llvm::BasicBlock
;
1030 using llvm::PHINode
;
1032 assert(CGF
.getContext().hasSameType(E
->getLHS()->getType(),
1033 E
->getRHS()->getType()));
1034 const ComparisonCategoryInfo
&CmpInfo
=
1035 CGF
.getContext().CompCategories
.getInfoForType(E
->getType());
1036 assert(CmpInfo
.Record
->isTriviallyCopyable() &&
1037 "cannot copy non-trivially copyable aggregate");
1039 QualType ArgTy
= E
->getLHS()->getType();
1041 if (!ArgTy
->isIntegralOrEnumerationType() && !ArgTy
->isRealFloatingType() &&
1042 !ArgTy
->isNullPtrType() && !ArgTy
->isPointerType() &&
1043 !ArgTy
->isMemberPointerType() && !ArgTy
->isAnyComplexType()) {
1044 return CGF
.ErrorUnsupported(E
, "aggregate three-way comparison");
1046 bool IsComplex
= ArgTy
->isAnyComplexType();
1048 // Evaluate the operands to the expression and extract their values.
1049 auto EmitOperand
= [&](Expr
*E
) -> std::pair
<Value
*, Value
*> {
1050 RValue RV
= CGF
.EmitAnyExpr(E
);
1052 return {RV
.getScalarVal(), nullptr};
1053 if (RV
.isAggregate())
1054 return {RV
.getAggregatePointer(), nullptr};
1055 assert(RV
.isComplex());
1056 return RV
.getComplexVal();
1058 auto LHSValues
= EmitOperand(E
->getLHS()),
1059 RHSValues
= EmitOperand(E
->getRHS());
1061 auto EmitCmp
= [&](CompareKind K
) {
1062 Value
*Cmp
= EmitCompare(Builder
, CGF
, E
, LHSValues
.first
, RHSValues
.first
,
1063 K
, IsComplex
? ".r" : "");
1066 assert(K
== CompareKind::CK_Equal
);
1067 Value
*CmpImag
= EmitCompare(Builder
, CGF
, E
, LHSValues
.second
,
1068 RHSValues
.second
, K
, ".i");
1069 return Builder
.CreateAnd(Cmp
, CmpImag
, "and.eq");
1071 auto EmitCmpRes
= [&](const ComparisonCategoryInfo::ValueInfo
*VInfo
) {
1072 return Builder
.getInt(VInfo
->getIntValue());
1076 if (ArgTy
->isNullPtrType()) {
1077 Select
= EmitCmpRes(CmpInfo
.getEqualOrEquiv());
1078 } else if (!CmpInfo
.isPartial()) {
1080 Builder
.CreateSelect(EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()),
1081 EmitCmpRes(CmpInfo
.getGreater()), "sel.lt");
1082 Select
= Builder
.CreateSelect(EmitCmp(CK_Equal
),
1083 EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1084 SelectOne
, "sel.eq");
1086 Value
*SelectEq
= Builder
.CreateSelect(
1087 EmitCmp(CK_Equal
), EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1088 EmitCmpRes(CmpInfo
.getUnordered()), "sel.eq");
1089 Value
*SelectGT
= Builder
.CreateSelect(EmitCmp(CK_Greater
),
1090 EmitCmpRes(CmpInfo
.getGreater()),
1091 SelectEq
, "sel.gt");
1092 Select
= Builder
.CreateSelect(
1093 EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()), SelectGT
, "sel.lt");
1095 // Create the return value in the destination slot.
1096 EnsureDest(E
->getType());
1097 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1099 // Emit the address of the first (and only) field in the comparison category
1100 // type, and initialize it from the constant integer value selected above.
1101 LValue FieldLV
= CGF
.EmitLValueForFieldInitialization(
1102 DestLV
, *CmpInfo
.Record
->field_begin());
1103 CGF
.EmitStoreThroughLValue(RValue::get(Select
), FieldLV
, /*IsInit*/ true);
1105 // All done! The result is in the Dest slot.
1108 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator
*E
) {
1109 if (E
->getOpcode() == BO_PtrMemD
|| E
->getOpcode() == BO_PtrMemI
)
1110 VisitPointerToDataMemberBinaryOperator(E
);
1112 CGF
.ErrorUnsupported(E
, "aggregate binary expression");
1115 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1116 const BinaryOperator
*E
) {
1117 LValue LV
= CGF
.EmitPointerToDataMemberBinaryExpr(E
);
1118 EmitFinalDestCopy(E
->getType(), LV
);
1121 /// Is the value of the given expression possibly a reference to or
1122 /// into a __block variable?
1123 static bool isBlockVarRef(const Expr
*E
) {
1124 // Make sure we look through parens.
1125 E
= E
->IgnoreParens();
1127 // Check for a direct reference to a __block variable.
1128 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
1129 const VarDecl
*var
= dyn_cast
<VarDecl
>(DRE
->getDecl());
1130 return (var
&& var
->hasAttr
<BlocksAttr
>());
1133 // More complicated stuff.
1135 // Binary operators.
1136 if (const BinaryOperator
*op
= dyn_cast
<BinaryOperator
>(E
)) {
1137 // For an assignment or pointer-to-member operation, just care
1139 if (op
->isAssignmentOp() || op
->isPtrMemOp())
1140 return isBlockVarRef(op
->getLHS());
1142 // For a comma, just care about the RHS.
1143 if (op
->getOpcode() == BO_Comma
)
1144 return isBlockVarRef(op
->getRHS());
1146 // FIXME: pointer arithmetic?
1149 // Check both sides of a conditional operator.
1150 } else if (const AbstractConditionalOperator
*op
1151 = dyn_cast
<AbstractConditionalOperator
>(E
)) {
1152 return isBlockVarRef(op
->getTrueExpr())
1153 || isBlockVarRef(op
->getFalseExpr());
1155 // OVEs are required to support BinaryConditionalOperators.
1156 } else if (const OpaqueValueExpr
*op
1157 = dyn_cast
<OpaqueValueExpr
>(E
)) {
1158 if (const Expr
*src
= op
->getSourceExpr())
1159 return isBlockVarRef(src
);
1161 // Casts are necessary to get things like (*(int*)&var) = foo().
1162 // We don't really care about the kind of cast here, except
1163 // we don't want to look through l2r casts, because it's okay
1164 // to get the *value* in a __block variable.
1165 } else if (const CastExpr
*cast
= dyn_cast
<CastExpr
>(E
)) {
1166 if (cast
->getCastKind() == CK_LValueToRValue
)
1168 return isBlockVarRef(cast
->getSubExpr());
1170 // Handle unary operators. Again, just aggressively look through
1171 // it, ignoring the operation.
1172 } else if (const UnaryOperator
*uop
= dyn_cast
<UnaryOperator
>(E
)) {
1173 return isBlockVarRef(uop
->getSubExpr());
1175 // Look into the base of a field access.
1176 } else if (const MemberExpr
*mem
= dyn_cast
<MemberExpr
>(E
)) {
1177 return isBlockVarRef(mem
->getBase());
1179 // Look into the base of a subscript.
1180 } else if (const ArraySubscriptExpr
*sub
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
1181 return isBlockVarRef(sub
->getBase());
1187 void AggExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
1188 // For an assignment to work, the value on the right has
1189 // to be compatible with the value on the left.
1190 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getLHS()->getType(),
1191 E
->getRHS()->getType())
1192 && "Invalid assignment");
1194 // If the LHS might be a __block variable, and the RHS can
1195 // potentially cause a block copy, we need to evaluate the RHS first
1196 // so that the assignment goes the right place.
1197 // This is pretty semantically fragile.
1198 if (isBlockVarRef(E
->getLHS()) &&
1199 E
->getRHS()->HasSideEffects(CGF
.getContext())) {
1200 // Ensure that we have a destination, and evaluate the RHS into that.
1201 EnsureDest(E
->getRHS()->getType());
1204 // Now emit the LHS and copy into it.
1205 LValue LHS
= CGF
.EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
1207 // That copy is an atomic copy if the LHS is atomic.
1208 if (LHS
.getType()->isAtomicType() ||
1209 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1210 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1214 EmitCopy(E
->getLHS()->getType(),
1215 AggValueSlot::forLValue(LHS
, CGF
, AggValueSlot::IsDestructed
,
1216 needsGC(E
->getLHS()->getType()),
1217 AggValueSlot::IsAliased
,
1218 AggValueSlot::MayOverlap
),
1223 LValue LHS
= CGF
.EmitLValue(E
->getLHS());
1225 // If we have an atomic type, evaluate into the destination and then
1226 // do an atomic copy.
1227 if (LHS
.getType()->isAtomicType() ||
1228 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1229 EnsureDest(E
->getRHS()->getType());
1231 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1235 // Codegen the RHS so that it stores directly into the LHS.
1236 AggValueSlot LHSSlot
= AggValueSlot::forLValue(
1237 LHS
, CGF
, AggValueSlot::IsDestructed
, needsGC(E
->getLHS()->getType()),
1238 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
1239 // A non-volatile aggregate destination might have volatile member.
1240 if (!LHSSlot
.isVolatile() &&
1241 CGF
.hasVolatileMember(E
->getLHS()->getType()))
1242 LHSSlot
.setVolatile(true);
1244 CGF
.EmitAggExpr(E
->getRHS(), LHSSlot
);
1246 // Copy into the destination if the assignment isn't ignored.
1247 EmitFinalDestCopy(E
->getType(), LHS
);
1249 if (!Dest
.isIgnored() && !Dest
.isExternallyDestructed() &&
1250 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
)
1251 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1255 void AggExprEmitter::
1256 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
1257 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
1258 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
1259 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
1261 // Bind the common expression if necessary.
1262 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
1264 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
1265 CGF
.EmitBranchOnBoolExpr(E
->getCond(), LHSBlock
, RHSBlock
,
1266 CGF
.getProfileCount(E
));
1268 // Save whether the destination's lifetime is externally managed.
1269 bool isExternallyDestructed
= Dest
.isExternallyDestructed();
1270 bool destructNonTrivialCStruct
=
1271 !isExternallyDestructed
&&
1272 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
1273 isExternallyDestructed
|= destructNonTrivialCStruct
;
1274 Dest
.setExternallyDestructed(isExternallyDestructed
);
1277 CGF
.EmitBlock(LHSBlock
);
1278 CGF
.incrementProfileCounter(E
);
1279 Visit(E
->getTrueExpr());
1282 assert(CGF
.HaveInsertPoint() && "expression evaluation ended with no IP!");
1283 CGF
.Builder
.CreateBr(ContBlock
);
1285 // If the result of an agg expression is unused, then the emission
1286 // of the LHS might need to create a destination slot. That's fine
1287 // with us, and we can safely emit the RHS into the same slot, but
1288 // we shouldn't claim that it's already being destructed.
1289 Dest
.setExternallyDestructed(isExternallyDestructed
);
1292 CGF
.EmitBlock(RHSBlock
);
1293 Visit(E
->getFalseExpr());
1296 if (destructNonTrivialCStruct
)
1297 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1300 CGF
.EmitBlock(ContBlock
);
1303 void AggExprEmitter::VisitChooseExpr(const ChooseExpr
*CE
) {
1304 Visit(CE
->getChosenSubExpr());
1307 void AggExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
1308 Address ArgValue
= Address::invalid();
1309 Address ArgPtr
= CGF
.EmitVAArg(VE
, ArgValue
);
1311 // If EmitVAArg fails, emit an error.
1312 if (!ArgPtr
.isValid()) {
1313 CGF
.ErrorUnsupported(VE
, "aggregate va_arg expression");
1317 EmitFinalDestCopy(VE
->getType(), CGF
.MakeAddrLValue(ArgPtr
, VE
->getType()));
1320 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
) {
1321 // Ensure that we have a slot, but if we already do, remember
1322 // whether it was externally destructed.
1323 bool wasExternallyDestructed
= Dest
.isExternallyDestructed();
1324 EnsureDest(E
->getType());
1326 // We're going to push a destructor if there isn't already one.
1327 Dest
.setExternallyDestructed();
1329 Visit(E
->getSubExpr());
1331 // Push that destructor we promised.
1332 if (!wasExternallyDestructed
)
1333 CGF
.EmitCXXTemporary(E
->getTemporary(), E
->getType(), Dest
.getAddress());
1337 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr
*E
) {
1338 AggValueSlot Slot
= EnsureSlot(E
->getType());
1339 CGF
.EmitCXXConstructExpr(E
, Slot
);
1342 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1343 const CXXInheritedCtorInitExpr
*E
) {
1344 AggValueSlot Slot
= EnsureSlot(E
->getType());
1345 CGF
.EmitInheritedCXXConstructorCall(
1346 E
->getConstructor(), E
->constructsVBase(), Slot
.getAddress(),
1347 E
->inheritedFromVBase(), E
);
1351 AggExprEmitter::VisitLambdaExpr(LambdaExpr
*E
) {
1352 AggValueSlot Slot
= EnsureSlot(E
->getType());
1353 LValue SlotLV
= CGF
.MakeAddrLValue(Slot
.getAddress(), E
->getType());
1355 // We'll need to enter cleanup scopes in case any of the element
1356 // initializers throws an exception.
1357 SmallVector
<EHScopeStack::stable_iterator
, 16> Cleanups
;
1358 llvm::Instruction
*CleanupDominator
= nullptr;
1360 CXXRecordDecl::field_iterator CurField
= E
->getLambdaClass()->field_begin();
1361 for (LambdaExpr::const_capture_init_iterator i
= E
->capture_init_begin(),
1362 e
= E
->capture_init_end();
1363 i
!= e
; ++i
, ++CurField
) {
1364 // Emit initialization
1365 LValue LV
= CGF
.EmitLValueForFieldInitialization(SlotLV
, *CurField
);
1366 if (CurField
->hasCapturedVLAType()) {
1367 CGF
.EmitLambdaVLACapture(CurField
->getCapturedVLAType(), LV
);
1371 EmitInitializationToLValue(*i
, LV
);
1373 // Push a destructor if necessary.
1374 if (QualType::DestructionKind DtorKind
=
1375 CurField
->getType().isDestructedType()) {
1376 assert(LV
.isSimple());
1377 if (CGF
.needsEHCleanup(DtorKind
)) {
1378 if (!CleanupDominator
)
1379 CleanupDominator
= CGF
.Builder
.CreateAlignedLoad(
1381 llvm::Constant::getNullValue(CGF
.Int8PtrTy
),
1382 CharUnits::One()); // placeholder
1384 CGF
.pushDestroy(EHCleanup
, LV
.getAddress(CGF
), CurField
->getType(),
1385 CGF
.getDestroyer(DtorKind
), false);
1386 Cleanups
.push_back(CGF
.EHStack
.stable_begin());
1391 // Deactivate all the partial cleanups in reverse order, which
1392 // generally means popping them.
1393 for (unsigned i
= Cleanups
.size(); i
!= 0; --i
)
1394 CGF
.DeactivateCleanupBlock(Cleanups
[i
-1], CleanupDominator
);
1396 // Destroy the placeholder if we made one.
1397 if (CleanupDominator
)
1398 CleanupDominator
->eraseFromParent();
1401 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
1402 CodeGenFunction::RunCleanupsScope
cleanups(CGF
);
1403 Visit(E
->getSubExpr());
1406 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
) {
1407 QualType T
= E
->getType();
1408 AggValueSlot Slot
= EnsureSlot(T
);
1409 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1412 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
) {
1413 QualType T
= E
->getType();
1414 AggValueSlot Slot
= EnsureSlot(T
);
1415 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1418 /// Determine whether the given cast kind is known to always convert values
1419 /// with all zero bits in their value representation to values with all zero
1420 /// bits in their value representation.
1421 static bool castPreservesZero(const CastExpr
*CE
) {
1422 switch (CE
->getCastKind()) {
1425 case CK_UserDefinedConversion
:
1426 case CK_ConstructorConversion
:
1430 // Conversions between (possibly-complex) integral, (possibly-complex)
1431 // floating-point, and bool.
1432 case CK_BooleanToSignedIntegral
:
1433 case CK_FloatingCast
:
1434 case CK_FloatingComplexCast
:
1435 case CK_FloatingComplexToBoolean
:
1436 case CK_FloatingComplexToIntegralComplex
:
1437 case CK_FloatingComplexToReal
:
1438 case CK_FloatingRealToComplex
:
1439 case CK_FloatingToBoolean
:
1440 case CK_FloatingToIntegral
:
1441 case CK_IntegralCast
:
1442 case CK_IntegralComplexCast
:
1443 case CK_IntegralComplexToBoolean
:
1444 case CK_IntegralComplexToFloatingComplex
:
1445 case CK_IntegralComplexToReal
:
1446 case CK_IntegralRealToComplex
:
1447 case CK_IntegralToBoolean
:
1448 case CK_IntegralToFloating
:
1449 // Reinterpreting integers as pointers and vice versa.
1450 case CK_IntegralToPointer
:
1451 case CK_PointerToIntegral
:
1452 // Language extensions.
1453 case CK_VectorSplat
:
1455 case CK_NonAtomicToAtomic
:
1456 case CK_AtomicToNonAtomic
:
1459 case CK_BaseToDerivedMemberPointer
:
1460 case CK_DerivedToBaseMemberPointer
:
1461 case CK_MemberPointerToBoolean
:
1462 case CK_NullToMemberPointer
:
1463 case CK_ReinterpretMemberPointer
:
1464 // FIXME: ABI-dependent.
1467 case CK_AnyPointerToBlockPointerCast
:
1468 case CK_BlockPointerToObjCPointerCast
:
1469 case CK_CPointerToObjCPointerCast
:
1470 case CK_ObjCObjectLValueCast
:
1471 case CK_IntToOCLSampler
:
1472 case CK_ZeroToOCLOpaqueType
:
1473 // FIXME: Check these.
1476 case CK_FixedPointCast
:
1477 case CK_FixedPointToBoolean
:
1478 case CK_FixedPointToFloating
:
1479 case CK_FixedPointToIntegral
:
1480 case CK_FloatingToFixedPoint
:
1481 case CK_IntegralToFixedPoint
:
1482 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1485 case CK_AddressSpaceConversion
:
1486 case CK_BaseToDerived
:
1487 case CK_DerivedToBase
:
1489 case CK_NullToPointer
:
1490 case CK_PointerToBoolean
:
1491 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1492 // same representation in all involved address spaces.
1495 case CK_ARCConsumeObject
:
1496 case CK_ARCExtendBlockObject
:
1497 case CK_ARCProduceObject
:
1498 case CK_ARCReclaimReturnedObject
:
1499 case CK_CopyAndAutoreleaseBlockObject
:
1500 case CK_ArrayToPointerDecay
:
1501 case CK_FunctionToPointerDecay
:
1502 case CK_BuiltinFnToFnPtr
:
1504 case CK_LValueBitCast
:
1505 case CK_LValueToRValue
:
1506 case CK_LValueToRValueBitCast
:
1507 case CK_UncheckedDerivedToBase
:
1510 llvm_unreachable("Unhandled clang::CastKind enum");
1513 /// isSimpleZero - If emitting this value will obviously just cause a store of
1514 /// zero to memory, return true. This can return false if uncertain, so it just
1515 /// handles simple cases.
1516 static bool isSimpleZero(const Expr
*E
, CodeGenFunction
&CGF
) {
1517 E
= E
->IgnoreParens();
1518 while (auto *CE
= dyn_cast
<CastExpr
>(E
)) {
1519 if (!castPreservesZero(CE
))
1521 E
= CE
->getSubExpr()->IgnoreParens();
1525 if (const IntegerLiteral
*IL
= dyn_cast
<IntegerLiteral
>(E
))
1526 return IL
->getValue() == 0;
1528 if (const FloatingLiteral
*FL
= dyn_cast
<FloatingLiteral
>(E
))
1529 return FL
->getValue().isPosZero();
1531 if ((isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) &&
1532 CGF
.getTypes().isZeroInitializable(E
->getType()))
1534 // (int*)0 - Null pointer expressions.
1535 if (const CastExpr
*ICE
= dyn_cast
<CastExpr
>(E
))
1536 return ICE
->getCastKind() == CK_NullToPointer
&&
1537 CGF
.getTypes().isPointerZeroInitializable(E
->getType()) &&
1538 !E
->HasSideEffects(CGF
.getContext());
1540 if (const CharacterLiteral
*CL
= dyn_cast
<CharacterLiteral
>(E
))
1541 return CL
->getValue() == 0;
1543 // Otherwise, hard case: conservatively return false.
1549 AggExprEmitter::EmitInitializationToLValue(Expr
*E
, LValue LV
) {
1550 QualType type
= LV
.getType();
1551 // FIXME: Ignore result?
1552 // FIXME: Are initializers affected by volatile?
1553 if (Dest
.isZeroed() && isSimpleZero(E
, CGF
)) {
1554 // Storing "i32 0" to a zero'd memory location is a noop.
1556 } else if (isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) {
1557 return EmitNullInitializationToLValue(LV
);
1558 } else if (isa
<NoInitExpr
>(E
)) {
1561 } else if (type
->isReferenceType()) {
1562 RValue RV
= CGF
.EmitReferenceBindingToExpr(E
);
1563 return CGF
.EmitStoreThroughLValue(RV
, LV
);
1566 switch (CGF
.getEvaluationKind(type
)) {
1568 CGF
.EmitComplexExprIntoLValue(E
, LV
, /*isInit*/ true);
1572 E
, AggValueSlot::forLValue(LV
, CGF
, AggValueSlot::IsDestructed
,
1573 AggValueSlot::DoesNotNeedGCBarriers
,
1574 AggValueSlot::IsNotAliased
,
1575 AggValueSlot::MayOverlap
, Dest
.isZeroed()));
1578 if (LV
.isSimple()) {
1579 CGF
.EmitScalarInit(E
, /*D=*/nullptr, LV
, /*Captured=*/false);
1581 CGF
.EmitStoreThroughLValue(RValue::get(CGF
.EmitScalarExpr(E
)), LV
);
1585 llvm_unreachable("bad evaluation kind");
1588 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv
) {
1589 QualType type
= lv
.getType();
1591 // If the destination slot is already zeroed out before the aggregate is
1592 // copied into it, we don't have to emit any zeros here.
1593 if (Dest
.isZeroed() && CGF
.getTypes().isZeroInitializable(type
))
1596 if (CGF
.hasScalarEvaluationKind(type
)) {
1597 // For non-aggregates, we can store the appropriate null constant.
1598 llvm::Value
*null
= CGF
.CGM
.EmitNullConstant(type
);
1599 // Note that the following is not equivalent to
1600 // EmitStoreThroughBitfieldLValue for ARC types.
1601 if (lv
.isBitField()) {
1602 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(null
), lv
);
1604 assert(lv
.isSimple());
1605 CGF
.EmitStoreOfScalar(null
, lv
, /* isInitialization */ true);
1608 // There's a potential optimization opportunity in combining
1609 // memsets; that would be easy for arrays, but relatively
1610 // difficult for structures with the current code.
1611 CGF
.EmitNullInitialization(lv
.getAddress(CGF
), lv
.getType());
1615 void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
) {
1616 VisitCXXParenListOrInitListExpr(E
, E
->getInitExprs(),
1617 E
->getInitializedFieldInUnion(),
1618 E
->getArrayFiller());
1621 void AggExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
1622 if (E
->hadArrayRangeDesignator())
1623 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
1625 if (E
->isTransparent())
1626 return Visit(E
->getInit(0));
1628 VisitCXXParenListOrInitListExpr(
1629 E
, E
->inits(), E
->getInitializedFieldInUnion(), E
->getArrayFiller());
1632 void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1633 Expr
*ExprToVisit
, ArrayRef
<Expr
*> InitExprs
,
1634 FieldDecl
*InitializedFieldInUnion
, Expr
*ArrayFiller
) {
1636 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1637 // (Length of globals? Chunks of zeroed-out space?).
1639 // If we can, prefer a copy from a global; this is a lot less code for long
1640 // globals, and it's easier for the current optimizers to analyze.
1641 if (llvm::Constant
*C
=
1642 CGF
.CGM
.EmitConstantExpr(ExprToVisit
, ExprToVisit
->getType(), &CGF
)) {
1643 llvm::GlobalVariable
* GV
=
1644 new llvm::GlobalVariable(CGF
.CGM
.getModule(), C
->getType(), true,
1645 llvm::GlobalValue::InternalLinkage
, C
, "");
1646 EmitFinalDestCopy(ExprToVisit
->getType(),
1647 CGF
.MakeAddrLValue(GV
, ExprToVisit
->getType()));
1652 AggValueSlot Dest
= EnsureSlot(ExprToVisit
->getType());
1654 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), ExprToVisit
->getType());
1656 // Handle initialization of an array.
1657 if (ExprToVisit
->getType()->isConstantArrayType()) {
1658 auto AType
= cast
<llvm::ArrayType
>(Dest
.getAddress().getElementType());
1659 EmitArrayInit(Dest
.getAddress(), AType
, ExprToVisit
->getType(), ExprToVisit
,
1660 InitExprs
, ArrayFiller
);
1662 } else if (ExprToVisit
->getType()->isVariableArrayType()) {
1663 // A variable array type that has an initializer can only do empty
1664 // initialization. And because this feature is not exposed as an extension
1665 // in C++, we can safely memset the array memory to zero.
1666 assert(InitExprs
.size() == 0 &&
1667 "you can only use an empty initializer with VLAs");
1668 CGF
.EmitNullInitialization(Dest
.getAddress(), ExprToVisit
->getType());
1672 assert(ExprToVisit
->getType()->isRecordType() &&
1673 "Only support structs/unions here!");
1675 // Do struct initialization; this code just sets each individual member
1676 // to the approprate value. This makes bitfield support automatic;
1677 // the disadvantage is that the generated code is more difficult for
1678 // the optimizer, especially with bitfields.
1679 unsigned NumInitElements
= InitExprs
.size();
1680 RecordDecl
*record
= ExprToVisit
->getType()->castAs
<RecordType
>()->getDecl();
1682 // We'll need to enter cleanup scopes in case any of the element
1683 // initializers throws an exception.
1684 SmallVector
<EHScopeStack::stable_iterator
, 16> cleanups
;
1685 llvm::Instruction
*cleanupDominator
= nullptr;
1686 auto addCleanup
= [&](const EHScopeStack::stable_iterator
&cleanup
) {
1687 cleanups
.push_back(cleanup
);
1688 if (!cleanupDominator
) // create placeholder once needed
1689 cleanupDominator
= CGF
.Builder
.CreateAlignedLoad(
1690 CGF
.Int8Ty
, llvm::Constant::getNullValue(CGF
.Int8PtrTy
),
1694 unsigned curInitIndex
= 0;
1696 // Emit initialization of base classes.
1697 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(record
)) {
1698 assert(NumInitElements
>= CXXRD
->getNumBases() &&
1699 "missing initializer for base class");
1700 for (auto &Base
: CXXRD
->bases()) {
1701 assert(!Base
.isVirtual() && "should not see vbases here");
1702 auto *BaseRD
= Base
.getType()->getAsCXXRecordDecl();
1703 Address V
= CGF
.GetAddressOfDirectBaseInCompleteClass(
1704 Dest
.getAddress(), CXXRD
, BaseRD
,
1705 /*isBaseVirtual*/ false);
1706 AggValueSlot AggSlot
= AggValueSlot::forAddr(
1708 AggValueSlot::IsDestructed
,
1709 AggValueSlot::DoesNotNeedGCBarriers
,
1710 AggValueSlot::IsNotAliased
,
1711 CGF
.getOverlapForBaseInit(CXXRD
, BaseRD
, Base
.isVirtual()));
1712 CGF
.EmitAggExpr(InitExprs
[curInitIndex
++], AggSlot
);
1714 if (QualType::DestructionKind dtorKind
=
1715 Base
.getType().isDestructedType()) {
1716 CGF
.pushDestroy(dtorKind
, V
, Base
.getType());
1717 addCleanup(CGF
.EHStack
.stable_begin());
1722 // Prepare a 'this' for CXXDefaultInitExprs.
1723 CodeGenFunction::FieldConstructionScope
FCS(CGF
, Dest
.getAddress());
1725 if (record
->isUnion()) {
1726 // Only initialize one field of a union. The field itself is
1727 // specified by the initializer list.
1728 if (!InitializedFieldInUnion
) {
1729 // Empty union; we have nothing to do.
1732 // Make sure that it's really an empty and not a failure of
1733 // semantic analysis.
1734 for (const auto *Field
: record
->fields())
1735 assert((Field
->isUnnamedBitfield() || Field
->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
1740 // FIXME: volatility
1741 FieldDecl
*Field
= InitializedFieldInUnion
;
1743 LValue FieldLoc
= CGF
.EmitLValueForFieldInitialization(DestLV
, Field
);
1744 if (NumInitElements
) {
1745 // Store the initializer into the field
1746 EmitInitializationToLValue(InitExprs
[0], FieldLoc
);
1748 // Default-initialize to null.
1749 EmitNullInitializationToLValue(FieldLoc
);
1755 // Here we iterate over the fields; this makes it simpler to both
1756 // default-initialize fields and skip over unnamed fields.
1757 for (const auto *field
: record
->fields()) {
1758 // We're done once we hit the flexible array member.
1759 if (field
->getType()->isIncompleteArrayType())
1762 // Always skip anonymous bitfields.
1763 if (field
->isUnnamedBitfield())
1766 // We're done if we reach the end of the explicit initializers, we
1767 // have a zeroed object, and the rest of the fields are
1768 // zero-initializable.
1769 if (curInitIndex
== NumInitElements
&& Dest
.isZeroed() &&
1770 CGF
.getTypes().isZeroInitializable(ExprToVisit
->getType()))
1774 LValue LV
= CGF
.EmitLValueForFieldInitialization(DestLV
, field
);
1775 // We never generate write-barries for initialized fields.
1778 if (curInitIndex
< NumInitElements
) {
1779 // Store the initializer into the field.
1780 EmitInitializationToLValue(InitExprs
[curInitIndex
++], LV
);
1782 // We're out of initializers; default-initialize to null
1783 EmitNullInitializationToLValue(LV
);
1786 // Push a destructor if necessary.
1787 // FIXME: if we have an array of structures, all explicitly
1788 // initialized, we can end up pushing a linear number of cleanups.
1789 bool pushedCleanup
= false;
1790 if (QualType::DestructionKind dtorKind
1791 = field
->getType().isDestructedType()) {
1792 assert(LV
.isSimple());
1793 if (CGF
.needsEHCleanup(dtorKind
)) {
1794 CGF
.pushDestroy(EHCleanup
, LV
.getAddress(CGF
), field
->getType(),
1795 CGF
.getDestroyer(dtorKind
), false);
1796 addCleanup(CGF
.EHStack
.stable_begin());
1797 pushedCleanup
= true;
1801 // If the GEP didn't get used because of a dead zero init or something
1802 // else, clean it up for -O0 builds and general tidiness.
1803 if (!pushedCleanup
&& LV
.isSimple())
1804 if (llvm::GetElementPtrInst
*GEP
=
1805 dyn_cast
<llvm::GetElementPtrInst
>(LV
.getPointer(CGF
)))
1806 if (GEP
->use_empty())
1807 GEP
->eraseFromParent();
1810 // Deactivate all the partial cleanups in reverse order, which
1811 // generally means popping them.
1812 assert((cleanupDominator
|| cleanups
.empty()) &&
1813 "Missing cleanupDominator before deactivating cleanup blocks");
1814 for (unsigned i
= cleanups
.size(); i
!= 0; --i
)
1815 CGF
.DeactivateCleanupBlock(cleanups
[i
-1], cleanupDominator
);
1817 // Destroy the placeholder if we made one.
1818 if (cleanupDominator
)
1819 cleanupDominator
->eraseFromParent();
1822 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
1823 llvm::Value
*outerBegin
) {
1824 // Emit the common subexpression.
1825 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
->getCommonExpr());
1827 Address destPtr
= EnsureSlot(E
->getType()).getAddress();
1828 uint64_t numElements
= E
->getArraySize().getZExtValue();
1833 // destPtr is an array*. Construct an elementType* by drilling down a level.
1834 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
1835 llvm::Value
*indices
[] = {zero
, zero
};
1836 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(
1837 destPtr
.getElementType(), destPtr
.getPointer(), indices
,
1840 // Prepare to special-case multidimensional array initialization: we avoid
1841 // emitting multiple destructor loops in that case.
1844 ArrayInitLoopExpr
*InnerLoop
= dyn_cast
<ArrayInitLoopExpr
>(E
->getSubExpr());
1846 QualType elementType
=
1847 CGF
.getContext().getAsArrayType(E
->getType())->getElementType();
1848 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
1849 CharUnits elementAlign
=
1850 destPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
1851 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
1853 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
1854 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
1856 // Jump into the body.
1857 CGF
.EmitBlock(bodyBB
);
1858 llvm::PHINode
*index
=
1859 Builder
.CreatePHI(zero
->getType(), 2, "arrayinit.index");
1860 index
->addIncoming(zero
, entryBB
);
1861 llvm::Value
*element
=
1862 Builder
.CreateInBoundsGEP(llvmElementType
, begin
, index
);
1864 // Prepare for a cleanup.
1865 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
1866 EHScopeStack::stable_iterator cleanup
;
1867 if (CGF
.needsEHCleanup(dtorKind
) && !InnerLoop
) {
1868 if (outerBegin
->getType() != element
->getType())
1869 outerBegin
= Builder
.CreateBitCast(outerBegin
, element
->getType());
1870 CGF
.pushRegularPartialArrayCleanup(outerBegin
, element
, elementType
,
1872 CGF
.getDestroyer(dtorKind
));
1873 cleanup
= CGF
.EHStack
.stable_begin();
1875 dtorKind
= QualType::DK_none
;
1878 // Emit the actual filler expression.
1880 // Temporaries created in an array initialization loop are destroyed
1881 // at the end of each iteration.
1882 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
1883 CodeGenFunction::ArrayInitLoopExprScope
Scope(CGF
, index
);
1884 LValue elementLV
= CGF
.MakeAddrLValue(
1885 Address(element
, llvmElementType
, elementAlign
), elementType
);
1888 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1889 auto elementSlot
= AggValueSlot::forLValue(
1890 elementLV
, CGF
, AggValueSlot::IsDestructed
,
1891 AggValueSlot::DoesNotNeedGCBarriers
, AggValueSlot::IsNotAliased
,
1892 AggValueSlot::DoesNotOverlap
);
1893 AggExprEmitter(CGF
, elementSlot
, false)
1894 .VisitArrayInitLoopExpr(InnerLoop
, outerBegin
);
1896 EmitInitializationToLValue(E
->getSubExpr(), elementLV
);
1899 // Move on to the next element.
1900 llvm::Value
*nextIndex
= Builder
.CreateNUWAdd(
1901 index
, llvm::ConstantInt::get(CGF
.SizeTy
, 1), "arrayinit.next");
1902 index
->addIncoming(nextIndex
, Builder
.GetInsertBlock());
1904 // Leave the loop if we're done.
1905 llvm::Value
*done
= Builder
.CreateICmpEQ(
1906 nextIndex
, llvm::ConstantInt::get(CGF
.SizeTy
, numElements
),
1908 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
1909 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
1911 CGF
.EmitBlock(endBB
);
1913 // Leave the partial-array cleanup if we entered one.
1915 CGF
.DeactivateCleanupBlock(cleanup
, index
);
1918 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
) {
1919 AggValueSlot Dest
= EnsureSlot(E
->getType());
1921 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1922 EmitInitializationToLValue(E
->getBase(), DestLV
);
1923 VisitInitListExpr(E
->getUpdater());
1926 //===----------------------------------------------------------------------===//
1927 // Entry Points into this File
1928 //===----------------------------------------------------------------------===//
1930 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1931 /// non-zero bytes that will be stored when outputting the initializer for the
1932 /// specified initializer expression.
1933 static CharUnits
GetNumNonZeroBytesInInit(const Expr
*E
, CodeGenFunction
&CGF
) {
1934 if (auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(E
))
1935 E
= MTE
->getSubExpr();
1936 E
= E
->IgnoreParenNoopCasts(CGF
.getContext());
1938 // 0 and 0.0 won't require any non-zero stores!
1939 if (isSimpleZero(E
, CGF
)) return CharUnits::Zero();
1941 // If this is an initlist expr, sum up the size of sizes of the (present)
1942 // elements. If this is something weird, assume the whole thing is non-zero.
1943 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
);
1944 while (ILE
&& ILE
->isTransparent())
1945 ILE
= dyn_cast
<InitListExpr
>(ILE
->getInit(0));
1946 if (!ILE
|| !CGF
.getTypes().isZeroInitializable(ILE
->getType()))
1947 return CGF
.getContext().getTypeSizeInChars(E
->getType());
1949 // InitListExprs for structs have to be handled carefully. If there are
1950 // reference members, we need to consider the size of the reference, not the
1951 // referencee. InitListExprs for unions and arrays can't have references.
1952 if (const RecordType
*RT
= E
->getType()->getAs
<RecordType
>()) {
1953 if (!RT
->isUnionType()) {
1954 RecordDecl
*SD
= RT
->getDecl();
1955 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1957 unsigned ILEElement
= 0;
1958 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(SD
))
1959 while (ILEElement
!= CXXRD
->getNumBases())
1961 GetNumNonZeroBytesInInit(ILE
->getInit(ILEElement
++), CGF
);
1962 for (const auto *Field
: SD
->fields()) {
1963 // We're done once we hit the flexible array member or run out of
1964 // InitListExpr elements.
1965 if (Field
->getType()->isIncompleteArrayType() ||
1966 ILEElement
== ILE
->getNumInits())
1968 if (Field
->isUnnamedBitfield())
1971 const Expr
*E
= ILE
->getInit(ILEElement
++);
1973 // Reference values are always non-null and have the width of a pointer.
1974 if (Field
->getType()->isReferenceType())
1975 NumNonZeroBytes
+= CGF
.getContext().toCharUnitsFromBits(
1976 CGF
.getTarget().getPointerWidth(LangAS::Default
));
1978 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(E
, CGF
);
1981 return NumNonZeroBytes
;
1985 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1986 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1987 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1988 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(ILE
->getInit(i
), CGF
);
1989 return NumNonZeroBytes
;
1992 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1993 /// zeros in it, emit a memset and avoid storing the individual zeros.
1995 static void CheckAggExprForMemSetUse(AggValueSlot
&Slot
, const Expr
*E
,
1996 CodeGenFunction
&CGF
) {
1997 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1999 if (Slot
.isZeroed() || Slot
.isVolatile() || !Slot
.getAddress().isValid())
2002 // C++ objects with a user-declared constructor don't need zero'ing.
2003 if (CGF
.getLangOpts().CPlusPlus
)
2004 if (const RecordType
*RT
= CGF
.getContext()
2005 .getBaseElementType(E
->getType())->getAs
<RecordType
>()) {
2006 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
2007 if (RD
->hasUserDeclaredConstructor())
2011 // If the type is 16-bytes or smaller, prefer individual stores over memset.
2012 CharUnits Size
= Slot
.getPreferredSize(CGF
.getContext(), E
->getType());
2013 if (Size
<= CharUnits::fromQuantity(16))
2016 // Check to see if over 3/4 of the initializer are known to be zero. If so,
2017 // we prefer to emit memset + individual stores for the rest.
2018 CharUnits NumNonZeroBytes
= GetNumNonZeroBytesInInit(E
, CGF
);
2019 if (NumNonZeroBytes
*4 > Size
)
2022 // Okay, it seems like a good idea to use an initial memset, emit the call.
2023 llvm::Constant
*SizeVal
= CGF
.Builder
.getInt64(Size
.getQuantity());
2025 Address Loc
= Slot
.getAddress().withElementType(CGF
.Int8Ty
);
2026 CGF
.Builder
.CreateMemSet(Loc
, CGF
.Builder
.getInt8(0), SizeVal
, false);
2028 // Tell the AggExprEmitter that the slot is known zero.
2035 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
2036 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
2037 /// the value of the aggregate expression is not needed. If VolatileDest is
2038 /// true, DestPtr cannot be 0.
2039 void CodeGenFunction::EmitAggExpr(const Expr
*E
, AggValueSlot Slot
) {
2040 assert(E
&& hasAggregateEvaluationKind(E
->getType()) &&
2041 "Invalid aggregate expression to emit");
2042 assert((Slot
.getAddress().isValid() || Slot
.isIgnored()) &&
2043 "slot has bits but no address");
2045 // Optimize the slot if possible.
2046 CheckAggExprForMemSetUse(Slot
, E
, *this);
2048 AggExprEmitter(*this, Slot
, Slot
.isIgnored()).Visit(const_cast<Expr
*>(E
));
2051 LValue
CodeGenFunction::EmitAggExprToLValue(const Expr
*E
) {
2052 assert(hasAggregateEvaluationKind(E
->getType()) && "Invalid argument!");
2053 Address Temp
= CreateMemTemp(E
->getType());
2054 LValue LV
= MakeAddrLValue(Temp
, E
->getType());
2055 EmitAggExpr(E
, AggValueSlot::forLValue(
2056 LV
, *this, AggValueSlot::IsNotDestructed
,
2057 AggValueSlot::DoesNotNeedGCBarriers
,
2058 AggValueSlot::IsNotAliased
, AggValueSlot::DoesNotOverlap
));
2062 AggValueSlot::Overlap_t
2063 CodeGenFunction::getOverlapForFieldInit(const FieldDecl
*FD
) {
2064 if (!FD
->hasAttr
<NoUniqueAddressAttr
>() || !FD
->getType()->isRecordType())
2065 return AggValueSlot::DoesNotOverlap
;
2067 // If the field lies entirely within the enclosing class's nvsize, its tail
2068 // padding cannot overlap any already-initialized object. (The only subobjects
2069 // with greater addresses that might already be initialized are vbases.)
2070 const RecordDecl
*ClassRD
= FD
->getParent();
2071 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(ClassRD
);
2072 if (Layout
.getFieldOffset(FD
->getFieldIndex()) +
2073 getContext().getTypeSize(FD
->getType()) <=
2074 (uint64_t)getContext().toBits(Layout
.getNonVirtualSize()))
2075 return AggValueSlot::DoesNotOverlap
;
2077 // The tail padding may contain values we need to preserve.
2078 return AggValueSlot::MayOverlap
;
2081 AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForBaseInit(
2082 const CXXRecordDecl
*RD
, const CXXRecordDecl
*BaseRD
, bool IsVirtual
) {
2083 // If the most-derived object is a field declared with [[no_unique_address]],
2084 // the tail padding of any virtual base could be reused for other subobjects
2085 // of that field's class.
2087 return AggValueSlot::MayOverlap
;
2089 // If the base class is laid out entirely within the nvsize of the derived
2090 // class, its tail padding cannot yet be initialized, so we can issue
2091 // stores at the full width of the base class.
2092 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(RD
);
2093 if (Layout
.getBaseClassOffset(BaseRD
) +
2094 getContext().getASTRecordLayout(BaseRD
).getSize() <=
2095 Layout
.getNonVirtualSize())
2096 return AggValueSlot::DoesNotOverlap
;
2098 // The tail padding may contain values we need to preserve.
2099 return AggValueSlot::MayOverlap
;
2102 void CodeGenFunction::EmitAggregateCopy(LValue Dest
, LValue Src
, QualType Ty
,
2103 AggValueSlot::Overlap_t MayOverlap
,
2105 assert(!Ty
->isAnyComplexType() && "Shouldn't happen for complex");
2107 Address DestPtr
= Dest
.getAddress(*this);
2108 Address SrcPtr
= Src
.getAddress(*this);
2110 if (getLangOpts().CPlusPlus
) {
2111 if (const RecordType
*RT
= Ty
->getAs
<RecordType
>()) {
2112 CXXRecordDecl
*Record
= cast
<CXXRecordDecl
>(RT
->getDecl());
2113 assert((Record
->hasTrivialCopyConstructor() ||
2114 Record
->hasTrivialCopyAssignment() ||
2115 Record
->hasTrivialMoveConstructor() ||
2116 Record
->hasTrivialMoveAssignment() ||
2117 Record
->hasAttr
<TrivialABIAttr
>() || Record
->isUnion()) &&
2118 "Trying to aggregate-copy a type without a trivial copy/move "
2119 "constructor or assignment operator");
2120 // Ignore empty classes in C++.
2121 if (Record
->isEmpty())
2126 if (getLangOpts().CUDAIsDevice
) {
2127 if (Ty
->isCUDADeviceBuiltinSurfaceType()) {
2128 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest
,
2131 } else if (Ty
->isCUDADeviceBuiltinTextureType()) {
2132 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest
,
2138 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2139 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2140 // read from another object that overlaps in anyway the storage of the first
2141 // object, then the overlap shall be exact and the two objects shall have
2142 // qualified or unqualified versions of a compatible type."
2144 // memcpy is not defined if the source and destination pointers are exactly
2145 // equal, but other compilers do this optimization, and almost every memcpy
2146 // implementation handles this case safely. If there is a libc that does not
2147 // safely handle this, we can add a target hook.
2149 // Get data size info for this aggregate. Don't copy the tail padding if this
2150 // might be a potentially-overlapping subobject, since the tail padding might
2151 // be occupied by a different object. Otherwise, copying it is fine.
2152 TypeInfoChars TypeInfo
;
2154 TypeInfo
= getContext().getTypeInfoDataSizeInChars(Ty
);
2156 TypeInfo
= getContext().getTypeInfoInChars(Ty
);
2158 llvm::Value
*SizeVal
= nullptr;
2159 if (TypeInfo
.Width
.isZero()) {
2160 // But note that getTypeInfo returns 0 for a VLA.
2161 if (auto *VAT
= dyn_cast_or_null
<VariableArrayType
>(
2162 getContext().getAsArrayType(Ty
))) {
2164 SizeVal
= emitArrayLength(VAT
, BaseEltTy
, DestPtr
);
2165 TypeInfo
= getContext().getTypeInfoInChars(BaseEltTy
);
2166 assert(!TypeInfo
.Width
.isZero());
2167 SizeVal
= Builder
.CreateNUWMul(
2169 llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity()));
2173 SizeVal
= llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity());
2176 // FIXME: If we have a volatile struct, the optimizer can remove what might
2177 // appear to be `extra' memory ops:
2179 // volatile struct { int i; } a, b;
2186 // we need to use a different call here. We use isVolatile to indicate when
2187 // either the source or the destination is volatile.
2189 DestPtr
= DestPtr
.withElementType(Int8Ty
);
2190 SrcPtr
= SrcPtr
.withElementType(Int8Ty
);
2192 // Don't do any of the memmove_collectable tests if GC isn't set.
2193 if (CGM
.getLangOpts().getGC() == LangOptions::NonGC
) {
2195 } else if (const RecordType
*RecordTy
= Ty
->getAs
<RecordType
>()) {
2196 RecordDecl
*Record
= RecordTy
->getDecl();
2197 if (Record
->hasObjectMember()) {
2198 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2202 } else if (Ty
->isArrayType()) {
2203 QualType BaseType
= getContext().getBaseElementType(Ty
);
2204 if (const RecordType
*RecordTy
= BaseType
->getAs
<RecordType
>()) {
2205 if (RecordTy
->getDecl()->hasObjectMember()) {
2206 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2213 auto Inst
= Builder
.CreateMemCpy(DestPtr
, SrcPtr
, SizeVal
, isVolatile
);
2215 // Determine the metadata to describe the position of any padding in this
2216 // memcpy, as well as the TBAA tags for the members of the struct, in case
2217 // the optimizer wishes to expand it in to scalar memory operations.
2218 if (llvm::MDNode
*TBAAStructTag
= CGM
.getTBAAStructInfo(Ty
))
2219 Inst
->setMetadata(llvm::LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
2221 if (CGM
.getCodeGenOpts().NewStructPathTBAA
) {
2222 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForMemoryTransfer(
2223 Dest
.getTBAAInfo(), Src
.getTBAAInfo());
2224 CGM
.DecorateInstructionWithTBAA(Inst
, TBAAInfo
);