1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGObjCRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "EHScopeStack.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/Attr.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclTemplate.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 using namespace clang
;
32 using namespace CodeGen
;
34 //===----------------------------------------------------------------------===//
35 // Aggregate Expression Emitter
36 //===----------------------------------------------------------------------===//
39 extern cl::opt
<bool> EnableSingleByteCoverage
;
43 class AggExprEmitter
: public StmtVisitor
<AggExprEmitter
> {
49 AggValueSlot
EnsureSlot(QualType T
) {
50 if (!Dest
.isIgnored()) return Dest
;
51 return CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
53 void EnsureDest(QualType T
) {
54 if (!Dest
.isIgnored()) return;
55 Dest
= CGF
.CreateAggTemp(T
, "agg.tmp.ensured");
58 // Calls `Fn` with a valid return value slot, potentially creating a temporary
59 // to do so. If a temporary is created, an appropriate copy into `Dest` will
60 // be emitted, as will lifetime markers.
62 // The given function should take a ReturnValueSlot, and return an RValue that
63 // points to said slot.
64 void withReturnValueSlot(const Expr
*E
,
65 llvm::function_ref
<RValue(ReturnValueSlot
)> Fn
);
68 AggExprEmitter(CodeGenFunction
&cgf
, AggValueSlot Dest
, bool IsResultUnused
)
69 : CGF(cgf
), Builder(CGF
.Builder
), Dest(Dest
),
70 IsResultUnused(IsResultUnused
) { }
72 //===--------------------------------------------------------------------===//
74 //===--------------------------------------------------------------------===//
76 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
77 /// represents a value lvalue, this method emits the address of the lvalue,
78 /// then loads the result into DestPtr.
79 void EmitAggLoadOfLValue(const Expr
*E
);
81 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
82 /// SrcIsRValue is true if source comes from an RValue.
83 void EmitFinalDestCopy(QualType type
, const LValue
&src
,
84 CodeGenFunction::ExprValueKind SrcValueKind
=
85 CodeGenFunction::EVK_NonRValue
);
86 void EmitFinalDestCopy(QualType type
, RValue src
);
87 void EmitCopy(QualType type
, const AggValueSlot
&dest
,
88 const AggValueSlot
&src
);
90 void EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
, QualType ArrayQTy
,
91 Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
94 AggValueSlot::NeedsGCBarriers_t
needsGC(QualType T
) {
95 if (CGF
.getLangOpts().getGC() && TypeRequiresGCollection(T
))
96 return AggValueSlot::NeedsGCBarriers
;
97 return AggValueSlot::DoesNotNeedGCBarriers
;
100 bool TypeRequiresGCollection(QualType T
);
102 //===--------------------------------------------------------------------===//
104 //===--------------------------------------------------------------------===//
106 void Visit(Expr
*E
) {
107 ApplyDebugLocation
DL(CGF
, E
);
108 StmtVisitor
<AggExprEmitter
>::Visit(E
);
111 void VisitStmt(Stmt
*S
) {
112 CGF
.ErrorUnsupported(S
, "aggregate expression");
114 void VisitParenExpr(ParenExpr
*PE
) { Visit(PE
->getSubExpr()); }
115 void VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
116 Visit(GE
->getResultExpr());
118 void VisitCoawaitExpr(CoawaitExpr
*E
) {
119 CGF
.EmitCoawaitExpr(*E
, Dest
, IsResultUnused
);
121 void VisitCoyieldExpr(CoyieldExpr
*E
) {
122 CGF
.EmitCoyieldExpr(*E
, Dest
, IsResultUnused
);
124 void VisitUnaryCoawait(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
125 void VisitUnaryExtension(UnaryOperator
*E
) { Visit(E
->getSubExpr()); }
126 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
127 return Visit(E
->getReplacement());
130 void VisitConstantExpr(ConstantExpr
*E
) {
131 EnsureDest(E
->getType());
133 if (llvm::Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
134 CGF
.CreateCoercedStore(
135 Result
, Dest
.getAddress(),
136 llvm::TypeSize::getFixed(
137 Dest
.getPreferredSize(CGF
.getContext(), E
->getType())
139 E
->getType().isVolatileQualified());
142 return Visit(E
->getSubExpr());
146 void VisitDeclRefExpr(DeclRefExpr
*E
) { EmitAggLoadOfLValue(E
); }
147 void VisitMemberExpr(MemberExpr
*ME
) { EmitAggLoadOfLValue(ME
); }
148 void VisitUnaryDeref(UnaryOperator
*E
) { EmitAggLoadOfLValue(E
); }
149 void VisitStringLiteral(StringLiteral
*E
) { EmitAggLoadOfLValue(E
); }
150 void VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
);
151 void VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
152 EmitAggLoadOfLValue(E
);
154 void VisitPredefinedExpr(const PredefinedExpr
*E
) {
155 EmitAggLoadOfLValue(E
);
159 void VisitCastExpr(CastExpr
*E
);
160 void VisitCallExpr(const CallExpr
*E
);
161 void VisitStmtExpr(const StmtExpr
*E
);
162 void VisitBinaryOperator(const BinaryOperator
*BO
);
163 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator
*BO
);
164 void VisitBinAssign(const BinaryOperator
*E
);
165 void VisitBinComma(const BinaryOperator
*E
);
166 void VisitBinCmp(const BinaryOperator
*E
);
167 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
168 Visit(E
->getSemanticForm());
171 void VisitObjCMessageExpr(ObjCMessageExpr
*E
);
172 void VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
173 EmitAggLoadOfLValue(E
);
176 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
);
177 void VisitAbstractConditionalOperator(const AbstractConditionalOperator
*CO
);
178 void VisitChooseExpr(const ChooseExpr
*CE
);
179 void VisitInitListExpr(InitListExpr
*E
);
180 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
181 FieldDecl
*InitializedFieldInUnion
,
183 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
184 llvm::Value
*outerBegin
= nullptr);
185 void VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
);
186 void VisitNoInitExpr(NoInitExpr
*E
) { } // Do nothing.
187 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
188 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
189 Visit(DAE
->getExpr());
191 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
192 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
193 Visit(DIE
->getExpr());
195 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
);
196 void VisitCXXConstructExpr(const CXXConstructExpr
*E
);
197 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr
*E
);
198 void VisitLambdaExpr(LambdaExpr
*E
);
199 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
);
200 void VisitExprWithCleanups(ExprWithCleanups
*E
);
201 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
);
202 void VisitCXXTypeidExpr(CXXTypeidExpr
*E
) { EmitAggLoadOfLValue(E
); }
203 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
);
204 void VisitOpaqueValueExpr(OpaqueValueExpr
*E
);
206 void VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
207 if (E
->isGLValue()) {
208 LValue LV
= CGF
.EmitPseudoObjectLValue(E
);
209 return EmitFinalDestCopy(E
->getType(), LV
);
212 AggValueSlot Slot
= EnsureSlot(E
->getType());
213 bool NeedsDestruction
=
214 !Slot
.isExternallyDestructed() &&
215 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
216 if (NeedsDestruction
)
217 Slot
.setExternallyDestructed();
218 CGF
.EmitPseudoObjectRValue(E
, Slot
);
219 if (NeedsDestruction
)
220 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Slot
.getAddress(),
224 void VisitVAArgExpr(VAArgExpr
*E
);
225 void VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
);
226 void VisitCXXParenListOrInitListExpr(Expr
*ExprToVisit
, ArrayRef
<Expr
*> Args
,
229 void EmitInitializationToLValue(Expr
*E
, LValue Address
);
230 void EmitNullInitializationToLValue(LValue Address
);
231 // case Expr::ChooseExprClass:
232 void VisitCXXThrowExpr(const CXXThrowExpr
*E
) { CGF
.EmitCXXThrowExpr(E
); }
233 void VisitAtomicExpr(AtomicExpr
*E
) {
234 RValue Res
= CGF
.EmitAtomicExpr(E
);
235 EmitFinalDestCopy(E
->getType(), Res
);
237 void VisitPackIndexingExpr(PackIndexingExpr
*E
) {
238 Visit(E
->getSelectedExpr());
241 } // end anonymous namespace.
243 //===----------------------------------------------------------------------===//
245 //===----------------------------------------------------------------------===//
247 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
248 /// represents a value lvalue, this method emits the address of the lvalue,
249 /// then loads the result into DestPtr.
250 void AggExprEmitter::EmitAggLoadOfLValue(const Expr
*E
) {
251 LValue LV
= CGF
.EmitLValue(E
);
253 // If the type of the l-value is atomic, then do an atomic load.
254 if (LV
.getType()->isAtomicType() || CGF
.LValueIsSuitableForInlineAtomic(LV
)) {
255 CGF
.EmitAtomicLoad(LV
, E
->getExprLoc(), Dest
);
259 EmitFinalDestCopy(E
->getType(), LV
);
262 /// True if the given aggregate type requires special GC API calls.
263 bool AggExprEmitter::TypeRequiresGCollection(QualType T
) {
264 // Only record types have members that might require garbage collection.
265 const RecordType
*RecordTy
= T
->getAs
<RecordType
>();
266 if (!RecordTy
) return false;
268 // Don't mess with non-trivial C++ types.
269 RecordDecl
*Record
= RecordTy
->getDecl();
270 if (isa
<CXXRecordDecl
>(Record
) &&
271 (cast
<CXXRecordDecl
>(Record
)->hasNonTrivialCopyConstructor() ||
272 !cast
<CXXRecordDecl
>(Record
)->hasTrivialDestructor()))
275 // Check whether the type has an object member.
276 return Record
->hasObjectMember();
279 void AggExprEmitter::withReturnValueSlot(
280 const Expr
*E
, llvm::function_ref
<RValue(ReturnValueSlot
)> EmitCall
) {
281 QualType RetTy
= E
->getType();
282 bool RequiresDestruction
=
283 !Dest
.isExternallyDestructed() &&
284 RetTy
.isDestructedType() == QualType::DK_nontrivial_c_struct
;
286 // If it makes no observable difference, save a memcpy + temporary.
288 // We need to always provide our own temporary if destruction is required.
289 // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
290 // its lifetime before we have the chance to emit a proper destructor call.
291 bool UseTemp
= Dest
.isPotentiallyAliased() || Dest
.requiresGCollection() ||
292 (RequiresDestruction
&& Dest
.isIgnored());
294 Address RetAddr
= Address::invalid();
295 RawAddress RetAllocaAddr
= RawAddress::invalid();
297 EHScopeStack::stable_iterator LifetimeEndBlock
;
298 llvm::Value
*LifetimeSizePtr
= nullptr;
299 llvm::IntrinsicInst
*LifetimeStartInst
= nullptr;
301 RetAddr
= Dest
.getAddress();
303 RetAddr
= CGF
.CreateMemTemp(RetTy
, "tmp", &RetAllocaAddr
);
304 llvm::TypeSize Size
=
305 CGF
.CGM
.getDataLayout().getTypeAllocSize(CGF
.ConvertTypeForMem(RetTy
));
306 LifetimeSizePtr
= CGF
.EmitLifetimeStart(Size
, RetAllocaAddr
.getPointer());
307 if (LifetimeSizePtr
) {
309 cast
<llvm::IntrinsicInst
>(std::prev(Builder
.GetInsertPoint()));
310 assert(LifetimeStartInst
->getIntrinsicID() ==
311 llvm::Intrinsic::lifetime_start
&&
312 "Last insertion wasn't a lifetime.start?");
314 CGF
.pushFullExprCleanup
<CodeGenFunction::CallLifetimeEnd
>(
315 NormalEHLifetimeMarker
, RetAllocaAddr
, LifetimeSizePtr
);
316 LifetimeEndBlock
= CGF
.EHStack
.stable_begin();
321 EmitCall(ReturnValueSlot(RetAddr
, Dest
.isVolatile(), IsResultUnused
,
322 Dest
.isExternallyDestructed()));
327 assert(Dest
.isIgnored() || Dest
.emitRawPointer(CGF
) !=
328 Src
.getAggregatePointer(E
->getType(), CGF
));
329 EmitFinalDestCopy(E
->getType(), Src
);
331 if (!RequiresDestruction
&& LifetimeStartInst
) {
332 // If there's no dtor to run, the copy was the last use of our temporary.
333 // Since we're not guaranteed to be in an ExprWithCleanups, clean up
335 CGF
.DeactivateCleanupBlock(LifetimeEndBlock
, LifetimeStartInst
);
336 CGF
.EmitLifetimeEnd(LifetimeSizePtr
, RetAllocaAddr
.getPointer());
340 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
341 void AggExprEmitter::EmitFinalDestCopy(QualType type
, RValue src
) {
342 assert(src
.isAggregate() && "value must be aggregate value!");
343 LValue srcLV
= CGF
.MakeAddrLValue(src
.getAggregateAddress(), type
);
344 EmitFinalDestCopy(type
, srcLV
, CodeGenFunction::EVK_RValue
);
347 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
348 void AggExprEmitter::EmitFinalDestCopy(
349 QualType type
, const LValue
&src
,
350 CodeGenFunction::ExprValueKind SrcValueKind
) {
351 // If Dest is ignored, then we're evaluating an aggregate expression
352 // in a context that doesn't care about the result. Note that loads
353 // from volatile l-values force the existence of a non-ignored
355 if (Dest
.isIgnored())
358 // Copy non-trivial C structs here.
359 LValue DstLV
= CGF
.MakeAddrLValue(
360 Dest
.getAddress(), Dest
.isVolatile() ? type
.withVolatile() : type
);
362 if (SrcValueKind
== CodeGenFunction::EVK_RValue
) {
363 if (type
.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct
) {
364 if (Dest
.isPotentiallyAliased())
365 CGF
.callCStructMoveAssignmentOperator(DstLV
, src
);
367 CGF
.callCStructMoveConstructor(DstLV
, src
);
371 if (type
.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct
) {
372 if (Dest
.isPotentiallyAliased())
373 CGF
.callCStructCopyAssignmentOperator(DstLV
, src
);
375 CGF
.callCStructCopyConstructor(DstLV
, src
);
380 AggValueSlot srcAgg
= AggValueSlot::forLValue(
381 src
, AggValueSlot::IsDestructed
, needsGC(type
), AggValueSlot::IsAliased
,
382 AggValueSlot::MayOverlap
);
383 EmitCopy(type
, Dest
, srcAgg
);
386 /// Perform a copy from the source into the destination.
388 /// \param type - the type of the aggregate being copied; qualifiers are
390 void AggExprEmitter::EmitCopy(QualType type
, const AggValueSlot
&dest
,
391 const AggValueSlot
&src
) {
392 if (dest
.requiresGCollection()) {
393 CharUnits sz
= dest
.getPreferredSize(CGF
.getContext(), type
);
394 llvm::Value
*size
= llvm::ConstantInt::get(CGF
.SizeTy
, sz
.getQuantity());
395 CGF
.CGM
.getObjCRuntime().EmitGCMemmoveCollectable(CGF
,
402 // If the result of the assignment is used, copy the LHS there also.
403 // It's volatile if either side is. Use the minimum alignment of
405 LValue DestLV
= CGF
.MakeAddrLValue(dest
.getAddress(), type
);
406 LValue SrcLV
= CGF
.MakeAddrLValue(src
.getAddress(), type
);
407 CGF
.EmitAggregateCopy(DestLV
, SrcLV
, type
, dest
.mayOverlap(),
408 dest
.isVolatile() || src
.isVolatile());
411 /// Emit the initializer for a std::initializer_list initialized with a
412 /// real initializer list.
414 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr
*E
) {
415 // Emit an array containing the elements. The array is externally destructed
416 // if the std::initializer_list object is.
417 ASTContext
&Ctx
= CGF
.getContext();
418 LValue Array
= CGF
.EmitLValue(E
->getSubExpr());
419 assert(Array
.isSimple() && "initializer_list array not a simple lvalue");
420 Address ArrayPtr
= Array
.getAddress();
422 const ConstantArrayType
*ArrayType
=
423 Ctx
.getAsConstantArrayType(E
->getSubExpr()->getType());
424 assert(ArrayType
&& "std::initializer_list constructed from non-array");
426 RecordDecl
*Record
= E
->getType()->castAs
<RecordType
>()->getDecl();
427 RecordDecl::field_iterator Field
= Record
->field_begin();
428 assert(Field
!= Record
->field_end() &&
429 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
430 ArrayType
->getElementType()) &&
431 "Expected std::initializer_list first field to be const E *");
434 AggValueSlot Dest
= EnsureSlot(E
->getType());
435 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
436 LValue Start
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
437 llvm::Value
*ArrayStart
= ArrayPtr
.emitRawPointer(CGF
);
438 CGF
.EmitStoreThroughLValue(RValue::get(ArrayStart
), Start
);
440 assert(Field
!= Record
->field_end() &&
441 "Expected std::initializer_list to have two fields");
443 llvm::Value
*Size
= Builder
.getInt(ArrayType
->getSize());
444 LValue EndOrLength
= CGF
.EmitLValueForFieldInitialization(DestLV
, *Field
);
445 if (Ctx
.hasSameType(Field
->getType(), Ctx
.getSizeType())) {
447 CGF
.EmitStoreThroughLValue(RValue::get(Size
), EndOrLength
);
451 assert(Field
->getType()->isPointerType() &&
452 Ctx
.hasSameType(Field
->getType()->getPointeeType(),
453 ArrayType
->getElementType()) &&
454 "Expected std::initializer_list second field to be const E *");
455 llvm::Value
*Zero
= llvm::ConstantInt::get(CGF
.PtrDiffTy
, 0);
456 llvm::Value
*IdxEnd
[] = { Zero
, Size
};
457 llvm::Value
*ArrayEnd
= Builder
.CreateInBoundsGEP(
458 ArrayPtr
.getElementType(), ArrayPtr
.emitRawPointer(CGF
), IdxEnd
,
460 CGF
.EmitStoreThroughLValue(RValue::get(ArrayEnd
), EndOrLength
);
463 assert(++Field
== Record
->field_end() &&
464 "Expected std::initializer_list to only have two fields");
467 /// Determine if E is a trivial array filler, that is, one that is
468 /// equivalent to zero-initialization.
469 static bool isTrivialFiller(Expr
*E
) {
473 if (isa
<ImplicitValueInitExpr
>(E
))
476 if (auto *ILE
= dyn_cast
<InitListExpr
>(E
)) {
477 if (ILE
->getNumInits())
479 return isTrivialFiller(ILE
->getArrayFiller());
482 if (auto *Cons
= dyn_cast_or_null
<CXXConstructExpr
>(E
))
483 return Cons
->getConstructor()->isDefaultConstructor() &&
484 Cons
->getConstructor()->isTrivial();
486 // FIXME: Are there other cases where we can avoid emitting an initializer?
490 /// Emit initialization of an array from an initializer list. ExprToVisit must
491 /// be either an InitListEpxr a CXXParenInitListExpr.
492 void AggExprEmitter::EmitArrayInit(Address DestPtr
, llvm::ArrayType
*AType
,
493 QualType ArrayQTy
, Expr
*ExprToVisit
,
494 ArrayRef
<Expr
*> Args
, Expr
*ArrayFiller
) {
495 uint64_t NumInitElements
= Args
.size();
497 uint64_t NumArrayElements
= AType
->getNumElements();
498 for (const auto *Init
: Args
) {
499 if (const auto *Embed
= dyn_cast
<EmbedExpr
>(Init
->IgnoreParenImpCasts())) {
500 NumInitElements
+= Embed
->getDataElementCount() - 1;
501 if (NumInitElements
> NumArrayElements
) {
502 NumInitElements
= NumArrayElements
;
508 assert(NumInitElements
<= NumArrayElements
);
510 QualType elementType
=
511 CGF
.getContext().getAsArrayType(ArrayQTy
)->getElementType();
512 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
513 CharUnits elementAlign
=
514 DestPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
515 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
517 // Consider initializing the array by copying from a global. For this to be
518 // more efficient than per-element initialization, the size of the elements
519 // with explicit initializers should be large enough.
520 if (NumInitElements
* elementSize
.getQuantity() > 16 &&
521 elementType
.isTriviallyCopyableType(CGF
.getContext())) {
522 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
523 ConstantEmitter
Emitter(CGF
);
524 QualType GVArrayQTy
= CGM
.getContext().getAddrSpaceQualType(
525 CGM
.getContext().removeAddrSpaceQualType(ArrayQTy
),
526 CGM
.GetGlobalConstantAddressSpace());
527 LangAS AS
= GVArrayQTy
.getAddressSpace();
528 if (llvm::Constant
*C
=
529 Emitter
.tryEmitForInitializer(ExprToVisit
, AS
, GVArrayQTy
)) {
530 auto GV
= new llvm::GlobalVariable(
531 CGM
.getModule(), C
->getType(),
532 /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage
, C
,
534 /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal
,
535 CGM
.getContext().getTargetAddressSpace(AS
));
536 Emitter
.finalize(GV
);
537 CharUnits Align
= CGM
.getContext().getTypeAlignInChars(GVArrayQTy
);
538 GV
->setAlignment(Align
.getAsAlign());
539 Address
GVAddr(GV
, GV
->getValueType(), Align
);
540 EmitFinalDestCopy(ArrayQTy
, CGF
.MakeAddrLValue(GVAddr
, GVArrayQTy
));
545 // Exception safety requires us to destroy all the
546 // already-constructed members if an initializer throws.
547 // For that, we'll need an EH cleanup.
548 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
549 Address endOfInit
= Address::invalid();
550 CodeGenFunction::CleanupDeactivationScope
deactivation(CGF
);
552 llvm::Value
*begin
= DestPtr
.emitRawPointer(CGF
);
554 CodeGenFunction::AllocaTrackerRAII
allocaTracker(CGF
);
555 // In principle we could tell the cleanup where we are more
556 // directly, but the control flow can get so varied here that it
557 // would actually be quite complex. Therefore we go through an
559 llvm::Instruction
*dominatingIP
=
560 Builder
.CreateFlagLoad(llvm::ConstantInt::getNullValue(CGF
.Int8PtrTy
));
561 endOfInit
= CGF
.CreateTempAlloca(begin
->getType(), CGF
.getPointerAlign(),
562 "arrayinit.endOfInit");
563 Builder
.CreateStore(begin
, endOfInit
);
564 CGF
.pushIrregularPartialArrayCleanup(begin
, endOfInit
, elementType
,
566 CGF
.getDestroyer(dtorKind
));
567 cast
<EHCleanupScope
>(*CGF
.EHStack
.find(CGF
.EHStack
.stable_begin()))
568 .AddAuxAllocas(allocaTracker
.Take());
570 CGF
.DeferredDeactivationCleanupStack
.push_back(
571 {CGF
.EHStack
.stable_begin(), dominatingIP
});
574 llvm::Value
*one
= llvm::ConstantInt::get(CGF
.SizeTy
, 1);
576 auto Emit
= [&](Expr
*Init
, uint64_t ArrayIndex
) {
577 llvm::Value
*element
= begin
;
578 if (ArrayIndex
> 0) {
579 element
= Builder
.CreateInBoundsGEP(
580 llvmElementType
, begin
,
581 llvm::ConstantInt::get(CGF
.SizeTy
, ArrayIndex
), "arrayinit.element");
583 // Tell the cleanup that it needs to destroy up to this
584 // element. TODO: some of these stores can be trivially
585 // observed to be unnecessary.
586 if (endOfInit
.isValid())
587 Builder
.CreateStore(element
, endOfInit
);
590 LValue elementLV
= CGF
.MakeAddrLValue(
591 Address(element
, llvmElementType
, elementAlign
), elementType
);
592 EmitInitializationToLValue(Init
, elementLV
);
596 unsigned ArrayIndex
= 0;
597 // Emit the explicit initializers.
598 for (uint64_t i
= 0; i
!= NumInitElements
; ++i
) {
599 if (ArrayIndex
>= NumInitElements
)
601 if (auto *EmbedS
= dyn_cast
<EmbedExpr
>(Args
[i
]->IgnoreParenImpCasts())) {
602 EmbedS
->doForEachDataElement(Emit
, ArrayIndex
);
604 Emit(Args
[i
], ArrayIndex
);
609 // Check whether there's a non-trivial array-fill expression.
610 bool hasTrivialFiller
= isTrivialFiller(ArrayFiller
);
612 // Any remaining elements need to be zero-initialized, possibly
613 // using the filler expression. We can skip this if the we're
614 // emitting to zeroed memory.
615 if (NumInitElements
!= NumArrayElements
&&
616 !(Dest
.isZeroed() && hasTrivialFiller
&&
617 CGF
.getTypes().isZeroInitializable(elementType
))) {
619 // Use an actual loop. This is basically
620 // do { *array++ = filler; } while (array != end);
622 // Advance to the start of the rest of the array.
623 llvm::Value
*element
= begin
;
624 if (NumInitElements
) {
625 element
= Builder
.CreateInBoundsGEP(
626 llvmElementType
, element
,
627 llvm::ConstantInt::get(CGF
.SizeTy
, NumInitElements
),
629 if (endOfInit
.isValid()) Builder
.CreateStore(element
, endOfInit
);
632 // Compute the end of the array.
633 llvm::Value
*end
= Builder
.CreateInBoundsGEP(
634 llvmElementType
, begin
,
635 llvm::ConstantInt::get(CGF
.SizeTy
, NumArrayElements
), "arrayinit.end");
637 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
638 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
640 // Jump into the body.
641 CGF
.EmitBlock(bodyBB
);
642 llvm::PHINode
*currentElement
=
643 Builder
.CreatePHI(element
->getType(), 2, "arrayinit.cur");
644 currentElement
->addIncoming(element
, entryBB
);
646 // Emit the actual filler expression.
648 // C++1z [class.temporary]p5:
649 // when a default constructor is called to initialize an element of
650 // an array with no corresponding initializer [...] the destruction of
651 // every temporary created in a default argument is sequenced before
652 // the construction of the next array element, if any
653 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
654 LValue elementLV
= CGF
.MakeAddrLValue(
655 Address(currentElement
, llvmElementType
, elementAlign
), elementType
);
657 EmitInitializationToLValue(ArrayFiller
, elementLV
);
659 EmitNullInitializationToLValue(elementLV
);
662 // Move on to the next element.
663 llvm::Value
*nextElement
= Builder
.CreateInBoundsGEP(
664 llvmElementType
, currentElement
, one
, "arrayinit.next");
666 // Tell the EH cleanup that we finished with the last element.
667 if (endOfInit
.isValid()) Builder
.CreateStore(nextElement
, endOfInit
);
669 // Leave the loop if we're done.
670 llvm::Value
*done
= Builder
.CreateICmpEQ(nextElement
, end
,
672 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
673 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
674 currentElement
->addIncoming(nextElement
, Builder
.GetInsertBlock());
676 CGF
.EmitBlock(endBB
);
680 //===----------------------------------------------------------------------===//
682 //===----------------------------------------------------------------------===//
684 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr
*E
){
685 Visit(E
->getSubExpr());
688 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr
*e
) {
689 // If this is a unique OVE, just visit its source expression.
691 Visit(e
->getSourceExpr());
693 EmitFinalDestCopy(e
->getType(), CGF
.getOrCreateOpaqueLValueMapping(e
));
697 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
698 if (Dest
.isPotentiallyAliased() &&
699 E
->getType().isPODType(CGF
.getContext())) {
700 // For a POD type, just emit a load of the lvalue + a copy, because our
701 // compound literal might alias the destination.
702 EmitAggLoadOfLValue(E
);
706 AggValueSlot Slot
= EnsureSlot(E
->getType());
708 // Block-scope compound literals are destroyed at the end of the enclosing
711 !CGF
.getLangOpts().CPlusPlus
&& !Slot
.isExternallyDestructed();
713 Slot
.setExternallyDestructed();
715 CGF
.EmitAggExpr(E
->getInitializer(), Slot
);
718 if (QualType::DestructionKind DtorKind
= E
->getType().isDestructedType())
719 CGF
.pushLifetimeExtendedDestroy(
720 CGF
.getCleanupKind(DtorKind
), Slot
.getAddress(), E
->getType(),
721 CGF
.getDestroyer(DtorKind
), DtorKind
& EHCleanup
);
724 /// Attempt to look through various unimportant expressions to find a
725 /// cast of the given kind.
726 static Expr
*findPeephole(Expr
*op
, CastKind kind
, const ASTContext
&ctx
) {
727 op
= op
->IgnoreParenNoopCasts(ctx
);
728 if (auto castE
= dyn_cast
<CastExpr
>(op
)) {
729 if (castE
->getCastKind() == kind
)
730 return castE
->getSubExpr();
735 void AggExprEmitter::VisitCastExpr(CastExpr
*E
) {
736 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(E
))
737 CGF
.CGM
.EmitExplicitCastExprType(ECE
, &CGF
);
738 switch (E
->getCastKind()) {
740 // FIXME: Can this actually happen? We have no test coverage for it.
741 assert(isa
<CXXDynamicCastExpr
>(E
) && "CK_Dynamic without a dynamic_cast?");
742 LValue LV
= CGF
.EmitCheckedLValue(E
->getSubExpr(),
743 CodeGenFunction::TCK_Load
);
744 // FIXME: Do we also need to handle property references here?
746 CGF
.EmitDynamicCast(LV
.getAddress(), cast
<CXXDynamicCastExpr
>(E
));
748 CGF
.CGM
.ErrorUnsupported(E
, "non-simple lvalue dynamic_cast");
750 if (!Dest
.isIgnored())
751 CGF
.CGM
.ErrorUnsupported(E
, "lvalue dynamic_cast with a destination");
756 // Evaluate even if the destination is ignored.
757 if (Dest
.isIgnored()) {
758 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
759 /*ignoreResult=*/true);
763 // GCC union extension
764 QualType Ty
= E
->getSubExpr()->getType();
765 Address CastPtr
= Dest
.getAddress().withElementType(CGF
.ConvertType(Ty
));
766 EmitInitializationToLValue(E
->getSubExpr(),
767 CGF
.MakeAddrLValue(CastPtr
, Ty
));
771 case CK_LValueToRValueBitCast
: {
772 if (Dest
.isIgnored()) {
773 CGF
.EmitAnyExpr(E
->getSubExpr(), AggValueSlot::ignored(),
774 /*ignoreResult=*/true);
778 LValue SourceLV
= CGF
.EmitLValue(E
->getSubExpr());
779 Address SourceAddress
= SourceLV
.getAddress().withElementType(CGF
.Int8Ty
);
780 Address DestAddress
= Dest
.getAddress().withElementType(CGF
.Int8Ty
);
781 llvm::Value
*SizeVal
= llvm::ConstantInt::get(
783 CGF
.getContext().getTypeSizeInChars(E
->getType()).getQuantity());
784 Builder
.CreateMemCpy(DestAddress
, SourceAddress
, SizeVal
);
788 case CK_DerivedToBase
:
789 case CK_BaseToDerived
:
790 case CK_UncheckedDerivedToBase
: {
791 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
792 "should have been unpacked before we got here");
795 case CK_NonAtomicToAtomic
:
796 case CK_AtomicToNonAtomic
: {
797 bool isToAtomic
= (E
->getCastKind() == CK_NonAtomicToAtomic
);
799 // Determine the atomic and value types.
800 QualType atomicType
= E
->getSubExpr()->getType();
801 QualType valueType
= E
->getType();
802 if (isToAtomic
) std::swap(atomicType
, valueType
);
804 assert(atomicType
->isAtomicType());
805 assert(CGF
.getContext().hasSameUnqualifiedType(valueType
,
806 atomicType
->castAs
<AtomicType
>()->getValueType()));
808 // Just recurse normally if we're ignoring the result or the
809 // atomic type doesn't change representation.
810 if (Dest
.isIgnored() || !CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
811 return Visit(E
->getSubExpr());
814 CastKind peepholeTarget
=
815 (isToAtomic
? CK_AtomicToNonAtomic
: CK_NonAtomicToAtomic
);
817 // These two cases are reverses of each other; try to peephole them.
819 findPeephole(E
->getSubExpr(), peepholeTarget
, CGF
.getContext())) {
820 assert(CGF
.getContext().hasSameUnqualifiedType(op
->getType(),
822 "peephole significantly changed types?");
826 // If we're converting an r-value of non-atomic type to an r-value
827 // of atomic type, just emit directly into the relevant sub-object.
829 AggValueSlot valueDest
= Dest
;
830 if (!valueDest
.isIgnored() && CGF
.CGM
.isPaddedAtomicType(atomicType
)) {
831 // Zero-initialize. (Strictly speaking, we only need to initialize
832 // the padding at the end, but this is simpler.)
833 if (!Dest
.isZeroed())
834 CGF
.EmitNullInitialization(Dest
.getAddress(), atomicType
);
836 // Build a GEP to refer to the subobject.
838 CGF
.Builder
.CreateStructGEP(valueDest
.getAddress(), 0);
839 valueDest
= AggValueSlot::forAddr(valueAddr
,
840 valueDest
.getQualifiers(),
841 valueDest
.isExternallyDestructed(),
842 valueDest
.requiresGCollection(),
843 valueDest
.isPotentiallyAliased(),
844 AggValueSlot::DoesNotOverlap
,
845 AggValueSlot::IsZeroed
);
848 CGF
.EmitAggExpr(E
->getSubExpr(), valueDest
);
852 // Otherwise, we're converting an atomic type to a non-atomic type.
853 // Make an atomic temporary, emit into that, and then copy the value out.
854 AggValueSlot atomicSlot
=
855 CGF
.CreateAggTemp(atomicType
, "atomic-to-nonatomic.temp");
856 CGF
.EmitAggExpr(E
->getSubExpr(), atomicSlot
);
858 Address valueAddr
= Builder
.CreateStructGEP(atomicSlot
.getAddress(), 0);
859 RValue rvalue
= RValue::getAggregate(valueAddr
, atomicSlot
.isVolatile());
860 return EmitFinalDestCopy(valueType
, rvalue
);
862 case CK_AddressSpaceConversion
:
863 return Visit(E
->getSubExpr());
865 case CK_LValueToRValue
:
866 // If we're loading from a volatile type, force the destination
868 if (E
->getSubExpr()->getType().isVolatileQualified()) {
870 !Dest
.isExternallyDestructed() &&
871 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
873 Dest
.setExternallyDestructed();
874 EnsureDest(E
->getType());
875 Visit(E
->getSubExpr());
878 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
886 case CK_HLSLArrayRValue
:
887 Visit(E
->getSubExpr());
891 case CK_UserDefinedConversion
:
892 case CK_ConstructorConversion
:
893 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getSubExpr()->getType(),
895 "Implicit cast types must be compatible");
896 Visit(E
->getSubExpr());
899 case CK_LValueBitCast
:
900 llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
904 case CK_ArrayToPointerDecay
:
905 case CK_FunctionToPointerDecay
:
906 case CK_NullToPointer
:
907 case CK_NullToMemberPointer
:
908 case CK_BaseToDerivedMemberPointer
:
909 case CK_DerivedToBaseMemberPointer
:
910 case CK_MemberPointerToBoolean
:
911 case CK_ReinterpretMemberPointer
:
912 case CK_IntegralToPointer
:
913 case CK_PointerToIntegral
:
914 case CK_PointerToBoolean
:
917 case CK_IntegralCast
:
918 case CK_BooleanToSignedIntegral
:
919 case CK_IntegralToBoolean
:
920 case CK_IntegralToFloating
:
921 case CK_FloatingToIntegral
:
922 case CK_FloatingToBoolean
:
923 case CK_FloatingCast
:
924 case CK_CPointerToObjCPointerCast
:
925 case CK_BlockPointerToObjCPointerCast
:
926 case CK_AnyPointerToBlockPointerCast
:
927 case CK_ObjCObjectLValueCast
:
928 case CK_FloatingRealToComplex
:
929 case CK_FloatingComplexToReal
:
930 case CK_FloatingComplexToBoolean
:
931 case CK_FloatingComplexCast
:
932 case CK_FloatingComplexToIntegralComplex
:
933 case CK_IntegralRealToComplex
:
934 case CK_IntegralComplexToReal
:
935 case CK_IntegralComplexToBoolean
:
936 case CK_IntegralComplexCast
:
937 case CK_IntegralComplexToFloatingComplex
:
938 case CK_ARCProduceObject
:
939 case CK_ARCConsumeObject
:
940 case CK_ARCReclaimReturnedObject
:
941 case CK_ARCExtendBlockObject
:
942 case CK_CopyAndAutoreleaseBlockObject
:
943 case CK_BuiltinFnToFnPtr
:
944 case CK_ZeroToOCLOpaqueType
:
946 case CK_HLSLVectorTruncation
:
948 case CK_IntToOCLSampler
:
949 case CK_FloatingToFixedPoint
:
950 case CK_FixedPointToFloating
:
951 case CK_FixedPointCast
:
952 case CK_FixedPointToBoolean
:
953 case CK_FixedPointToIntegral
:
954 case CK_IntegralToFixedPoint
:
955 llvm_unreachable("cast kind invalid for aggregate types");
959 void AggExprEmitter::VisitCallExpr(const CallExpr
*E
) {
960 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType()) {
961 EmitAggLoadOfLValue(E
);
965 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
966 return CGF
.EmitCallExpr(E
, Slot
);
970 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
971 withReturnValueSlot(E
, [&](ReturnValueSlot Slot
) {
972 return CGF
.EmitObjCMessageExpr(E
, Slot
);
976 void AggExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
977 CGF
.EmitIgnoredExpr(E
->getLHS());
981 void AggExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
982 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
983 CGF
.EmitCompoundStmt(*E
->getSubStmt(), true, Dest
);
992 static llvm::Value
*EmitCompare(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
993 const BinaryOperator
*E
, llvm::Value
*LHS
,
994 llvm::Value
*RHS
, CompareKind Kind
,
995 const char *NameSuffix
= "") {
996 QualType ArgTy
= E
->getLHS()->getType();
997 if (const ComplexType
*CT
= ArgTy
->getAs
<ComplexType
>())
998 ArgTy
= CT
->getElementType();
1000 if (const auto *MPT
= ArgTy
->getAs
<MemberPointerType
>()) {
1001 assert(Kind
== CK_Equal
&&
1002 "member pointers may only be compared for equality");
1003 return CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
1004 CGF
, LHS
, RHS
, MPT
, /*IsInequality*/ false);
1007 // Compute the comparison instructions for the specified comparison kind.
1008 struct CmpInstInfo
{
1010 llvm::CmpInst::Predicate FCmp
;
1011 llvm::CmpInst::Predicate SCmp
;
1012 llvm::CmpInst::Predicate UCmp
;
1014 CmpInstInfo InstInfo
= [&]() -> CmpInstInfo
{
1015 using FI
= llvm::FCmpInst
;
1016 using II
= llvm::ICmpInst
;
1019 return {"cmp.lt", FI::FCMP_OLT
, II::ICMP_SLT
, II::ICMP_ULT
};
1021 return {"cmp.gt", FI::FCMP_OGT
, II::ICMP_SGT
, II::ICMP_UGT
};
1023 return {"cmp.eq", FI::FCMP_OEQ
, II::ICMP_EQ
, II::ICMP_EQ
};
1025 llvm_unreachable("Unrecognised CompareKind enum");
1028 if (ArgTy
->hasFloatingRepresentation())
1029 return Builder
.CreateFCmp(InstInfo
.FCmp
, LHS
, RHS
,
1030 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1031 if (ArgTy
->isIntegralOrEnumerationType() || ArgTy
->isPointerType()) {
1033 ArgTy
->hasSignedIntegerRepresentation() ? InstInfo
.SCmp
: InstInfo
.UCmp
;
1034 return Builder
.CreateICmp(Inst
, LHS
, RHS
,
1035 llvm::Twine(InstInfo
.Name
) + NameSuffix
);
1038 llvm_unreachable("unsupported aggregate binary expression should have "
1039 "already been handled");
1042 void AggExprEmitter::VisitBinCmp(const BinaryOperator
*E
) {
1043 using llvm::BasicBlock
;
1044 using llvm::PHINode
;
1046 assert(CGF
.getContext().hasSameType(E
->getLHS()->getType(),
1047 E
->getRHS()->getType()));
1048 const ComparisonCategoryInfo
&CmpInfo
=
1049 CGF
.getContext().CompCategories
.getInfoForType(E
->getType());
1050 assert(CmpInfo
.Record
->isTriviallyCopyable() &&
1051 "cannot copy non-trivially copyable aggregate");
1053 QualType ArgTy
= E
->getLHS()->getType();
1055 if (!ArgTy
->isIntegralOrEnumerationType() && !ArgTy
->isRealFloatingType() &&
1056 !ArgTy
->isNullPtrType() && !ArgTy
->isPointerType() &&
1057 !ArgTy
->isMemberPointerType() && !ArgTy
->isAnyComplexType()) {
1058 return CGF
.ErrorUnsupported(E
, "aggregate three-way comparison");
1060 bool IsComplex
= ArgTy
->isAnyComplexType();
1062 // Evaluate the operands to the expression and extract their values.
1063 auto EmitOperand
= [&](Expr
*E
) -> std::pair
<Value
*, Value
*> {
1064 RValue RV
= CGF
.EmitAnyExpr(E
);
1066 return {RV
.getScalarVal(), nullptr};
1067 if (RV
.isAggregate())
1068 return {RV
.getAggregatePointer(E
->getType(), CGF
), nullptr};
1069 assert(RV
.isComplex());
1070 return RV
.getComplexVal();
1072 auto LHSValues
= EmitOperand(E
->getLHS()),
1073 RHSValues
= EmitOperand(E
->getRHS());
1075 auto EmitCmp
= [&](CompareKind K
) {
1076 Value
*Cmp
= EmitCompare(Builder
, CGF
, E
, LHSValues
.first
, RHSValues
.first
,
1077 K
, IsComplex
? ".r" : "");
1080 assert(K
== CompareKind::CK_Equal
);
1081 Value
*CmpImag
= EmitCompare(Builder
, CGF
, E
, LHSValues
.second
,
1082 RHSValues
.second
, K
, ".i");
1083 return Builder
.CreateAnd(Cmp
, CmpImag
, "and.eq");
1085 auto EmitCmpRes
= [&](const ComparisonCategoryInfo::ValueInfo
*VInfo
) {
1086 return Builder
.getInt(VInfo
->getIntValue());
1090 if (ArgTy
->isNullPtrType()) {
1091 Select
= EmitCmpRes(CmpInfo
.getEqualOrEquiv());
1092 } else if (!CmpInfo
.isPartial()) {
1094 Builder
.CreateSelect(EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()),
1095 EmitCmpRes(CmpInfo
.getGreater()), "sel.lt");
1096 Select
= Builder
.CreateSelect(EmitCmp(CK_Equal
),
1097 EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1098 SelectOne
, "sel.eq");
1100 Value
*SelectEq
= Builder
.CreateSelect(
1101 EmitCmp(CK_Equal
), EmitCmpRes(CmpInfo
.getEqualOrEquiv()),
1102 EmitCmpRes(CmpInfo
.getUnordered()), "sel.eq");
1103 Value
*SelectGT
= Builder
.CreateSelect(EmitCmp(CK_Greater
),
1104 EmitCmpRes(CmpInfo
.getGreater()),
1105 SelectEq
, "sel.gt");
1106 Select
= Builder
.CreateSelect(
1107 EmitCmp(CK_Less
), EmitCmpRes(CmpInfo
.getLess()), SelectGT
, "sel.lt");
1109 // Create the return value in the destination slot.
1110 EnsureDest(E
->getType());
1111 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1113 // Emit the address of the first (and only) field in the comparison category
1114 // type, and initialize it from the constant integer value selected above.
1115 LValue FieldLV
= CGF
.EmitLValueForFieldInitialization(
1116 DestLV
, *CmpInfo
.Record
->field_begin());
1117 CGF
.EmitStoreThroughLValue(RValue::get(Select
), FieldLV
, /*IsInit*/ true);
1119 // All done! The result is in the Dest slot.
1122 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator
*E
) {
1123 if (E
->getOpcode() == BO_PtrMemD
|| E
->getOpcode() == BO_PtrMemI
)
1124 VisitPointerToDataMemberBinaryOperator(E
);
1126 CGF
.ErrorUnsupported(E
, "aggregate binary expression");
1129 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1130 const BinaryOperator
*E
) {
1131 LValue LV
= CGF
.EmitPointerToDataMemberBinaryExpr(E
);
1132 EmitFinalDestCopy(E
->getType(), LV
);
1135 /// Is the value of the given expression possibly a reference to or
1136 /// into a __block variable?
1137 static bool isBlockVarRef(const Expr
*E
) {
1138 // Make sure we look through parens.
1139 E
= E
->IgnoreParens();
1141 // Check for a direct reference to a __block variable.
1142 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
1143 const VarDecl
*var
= dyn_cast
<VarDecl
>(DRE
->getDecl());
1144 return (var
&& var
->hasAttr
<BlocksAttr
>());
1147 // More complicated stuff.
1149 // Binary operators.
1150 if (const BinaryOperator
*op
= dyn_cast
<BinaryOperator
>(E
)) {
1151 // For an assignment or pointer-to-member operation, just care
1153 if (op
->isAssignmentOp() || op
->isPtrMemOp())
1154 return isBlockVarRef(op
->getLHS());
1156 // For a comma, just care about the RHS.
1157 if (op
->getOpcode() == BO_Comma
)
1158 return isBlockVarRef(op
->getRHS());
1160 // FIXME: pointer arithmetic?
1163 // Check both sides of a conditional operator.
1164 } else if (const AbstractConditionalOperator
*op
1165 = dyn_cast
<AbstractConditionalOperator
>(E
)) {
1166 return isBlockVarRef(op
->getTrueExpr())
1167 || isBlockVarRef(op
->getFalseExpr());
1169 // OVEs are required to support BinaryConditionalOperators.
1170 } else if (const OpaqueValueExpr
*op
1171 = dyn_cast
<OpaqueValueExpr
>(E
)) {
1172 if (const Expr
*src
= op
->getSourceExpr())
1173 return isBlockVarRef(src
);
1175 // Casts are necessary to get things like (*(int*)&var) = foo().
1176 // We don't really care about the kind of cast here, except
1177 // we don't want to look through l2r casts, because it's okay
1178 // to get the *value* in a __block variable.
1179 } else if (const CastExpr
*cast
= dyn_cast
<CastExpr
>(E
)) {
1180 if (cast
->getCastKind() == CK_LValueToRValue
)
1182 return isBlockVarRef(cast
->getSubExpr());
1184 // Handle unary operators. Again, just aggressively look through
1185 // it, ignoring the operation.
1186 } else if (const UnaryOperator
*uop
= dyn_cast
<UnaryOperator
>(E
)) {
1187 return isBlockVarRef(uop
->getSubExpr());
1189 // Look into the base of a field access.
1190 } else if (const MemberExpr
*mem
= dyn_cast
<MemberExpr
>(E
)) {
1191 return isBlockVarRef(mem
->getBase());
1193 // Look into the base of a subscript.
1194 } else if (const ArraySubscriptExpr
*sub
= dyn_cast
<ArraySubscriptExpr
>(E
)) {
1195 return isBlockVarRef(sub
->getBase());
1201 void AggExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
1202 // For an assignment to work, the value on the right has
1203 // to be compatible with the value on the left.
1204 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getLHS()->getType(),
1205 E
->getRHS()->getType())
1206 && "Invalid assignment");
1208 // If the LHS might be a __block variable, and the RHS can
1209 // potentially cause a block copy, we need to evaluate the RHS first
1210 // so that the assignment goes the right place.
1211 // This is pretty semantically fragile.
1212 if (isBlockVarRef(E
->getLHS()) &&
1213 E
->getRHS()->HasSideEffects(CGF
.getContext())) {
1214 // Ensure that we have a destination, and evaluate the RHS into that.
1215 EnsureDest(E
->getRHS()->getType());
1218 // Now emit the LHS and copy into it.
1219 LValue LHS
= CGF
.EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
1221 // That copy is an atomic copy if the LHS is atomic.
1222 if (LHS
.getType()->isAtomicType() ||
1223 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1224 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1228 EmitCopy(E
->getLHS()->getType(),
1229 AggValueSlot::forLValue(LHS
, AggValueSlot::IsDestructed
,
1230 needsGC(E
->getLHS()->getType()),
1231 AggValueSlot::IsAliased
,
1232 AggValueSlot::MayOverlap
),
1237 LValue LHS
= CGF
.EmitLValue(E
->getLHS());
1239 // If we have an atomic type, evaluate into the destination and then
1240 // do an atomic copy.
1241 if (LHS
.getType()->isAtomicType() ||
1242 CGF
.LValueIsSuitableForInlineAtomic(LHS
)) {
1243 EnsureDest(E
->getRHS()->getType());
1245 CGF
.EmitAtomicStore(Dest
.asRValue(), LHS
, /*isInit*/ false);
1249 // Codegen the RHS so that it stores directly into the LHS.
1250 AggValueSlot LHSSlot
= AggValueSlot::forLValue(
1251 LHS
, AggValueSlot::IsDestructed
, needsGC(E
->getLHS()->getType()),
1252 AggValueSlot::IsAliased
, AggValueSlot::MayOverlap
);
1253 // A non-volatile aggregate destination might have volatile member.
1254 if (!LHSSlot
.isVolatile() &&
1255 CGF
.hasVolatileMember(E
->getLHS()->getType()))
1256 LHSSlot
.setVolatile(true);
1258 CGF
.EmitAggExpr(E
->getRHS(), LHSSlot
);
1260 // Copy into the destination if the assignment isn't ignored.
1261 EmitFinalDestCopy(E
->getType(), LHS
);
1263 if (!Dest
.isIgnored() && !Dest
.isExternallyDestructed() &&
1264 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
)
1265 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1269 void AggExprEmitter::
1270 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
1271 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
1272 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
1273 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
1275 // Bind the common expression if necessary.
1276 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
1278 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
1279 CGF
.EmitBranchOnBoolExpr(E
->getCond(), LHSBlock
, RHSBlock
,
1280 CGF
.getProfileCount(E
));
1282 // Save whether the destination's lifetime is externally managed.
1283 bool isExternallyDestructed
= Dest
.isExternallyDestructed();
1284 bool destructNonTrivialCStruct
=
1285 !isExternallyDestructed
&&
1286 E
->getType().isDestructedType() == QualType::DK_nontrivial_c_struct
;
1287 isExternallyDestructed
|= destructNonTrivialCStruct
;
1288 Dest
.setExternallyDestructed(isExternallyDestructed
);
1291 CGF
.EmitBlock(LHSBlock
);
1292 if (llvm::EnableSingleByteCoverage
)
1293 CGF
.incrementProfileCounter(E
->getTrueExpr());
1295 CGF
.incrementProfileCounter(E
);
1296 Visit(E
->getTrueExpr());
1299 assert(CGF
.HaveInsertPoint() && "expression evaluation ended with no IP!");
1300 CGF
.Builder
.CreateBr(ContBlock
);
1302 // If the result of an agg expression is unused, then the emission
1303 // of the LHS might need to create a destination slot. That's fine
1304 // with us, and we can safely emit the RHS into the same slot, but
1305 // we shouldn't claim that it's already being destructed.
1306 Dest
.setExternallyDestructed(isExternallyDestructed
);
1309 CGF
.EmitBlock(RHSBlock
);
1310 if (llvm::EnableSingleByteCoverage
)
1311 CGF
.incrementProfileCounter(E
->getFalseExpr());
1312 Visit(E
->getFalseExpr());
1315 if (destructNonTrivialCStruct
)
1316 CGF
.pushDestroy(QualType::DK_nontrivial_c_struct
, Dest
.getAddress(),
1319 CGF
.EmitBlock(ContBlock
);
1320 if (llvm::EnableSingleByteCoverage
)
1321 CGF
.incrementProfileCounter(E
);
1324 void AggExprEmitter::VisitChooseExpr(const ChooseExpr
*CE
) {
1325 Visit(CE
->getChosenSubExpr());
1328 void AggExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
1329 Address ArgValue
= Address::invalid();
1330 CGF
.EmitVAArg(VE
, ArgValue
, Dest
);
1332 // If EmitVAArg fails, emit an error.
1333 if (!ArgValue
.isValid()) {
1334 CGF
.ErrorUnsupported(VE
, "aggregate va_arg expression");
1339 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr
*E
) {
1340 // Ensure that we have a slot, but if we already do, remember
1341 // whether it was externally destructed.
1342 bool wasExternallyDestructed
= Dest
.isExternallyDestructed();
1343 EnsureDest(E
->getType());
1345 // We're going to push a destructor if there isn't already one.
1346 Dest
.setExternallyDestructed();
1348 Visit(E
->getSubExpr());
1350 // Push that destructor we promised.
1351 if (!wasExternallyDestructed
)
1352 CGF
.EmitCXXTemporary(E
->getTemporary(), E
->getType(), Dest
.getAddress());
1356 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr
*E
) {
1357 AggValueSlot Slot
= EnsureSlot(E
->getType());
1358 CGF
.EmitCXXConstructExpr(E
, Slot
);
1361 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1362 const CXXInheritedCtorInitExpr
*E
) {
1363 AggValueSlot Slot
= EnsureSlot(E
->getType());
1364 CGF
.EmitInheritedCXXConstructorCall(
1365 E
->getConstructor(), E
->constructsVBase(), Slot
.getAddress(),
1366 E
->inheritedFromVBase(), E
);
1370 AggExprEmitter::VisitLambdaExpr(LambdaExpr
*E
) {
1371 AggValueSlot Slot
= EnsureSlot(E
->getType());
1372 LValue SlotLV
= CGF
.MakeAddrLValue(Slot
.getAddress(), E
->getType());
1374 // We'll need to enter cleanup scopes in case any of the element
1375 // initializers throws an exception or contains branch out of the expressions.
1376 CodeGenFunction::CleanupDeactivationScope
scope(CGF
);
1378 CXXRecordDecl::field_iterator CurField
= E
->getLambdaClass()->field_begin();
1379 for (LambdaExpr::const_capture_init_iterator i
= E
->capture_init_begin(),
1380 e
= E
->capture_init_end();
1381 i
!= e
; ++i
, ++CurField
) {
1382 // Emit initialization
1383 LValue LV
= CGF
.EmitLValueForFieldInitialization(SlotLV
, *CurField
);
1384 if (CurField
->hasCapturedVLAType()) {
1385 CGF
.EmitLambdaVLACapture(CurField
->getCapturedVLAType(), LV
);
1389 EmitInitializationToLValue(*i
, LV
);
1391 // Push a destructor if necessary.
1392 if (QualType::DestructionKind DtorKind
=
1393 CurField
->getType().isDestructedType()) {
1394 assert(LV
.isSimple());
1396 CGF
.pushDestroyAndDeferDeactivation(NormalAndEHCleanup
, LV
.getAddress(),
1397 CurField
->getType(),
1398 CGF
.getDestroyer(DtorKind
), false);
1403 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
1404 CodeGenFunction::RunCleanupsScope
cleanups(CGF
);
1405 Visit(E
->getSubExpr());
1408 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr
*E
) {
1409 QualType T
= E
->getType();
1410 AggValueSlot Slot
= EnsureSlot(T
);
1411 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1414 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr
*E
) {
1415 QualType T
= E
->getType();
1416 AggValueSlot Slot
= EnsureSlot(T
);
1417 EmitNullInitializationToLValue(CGF
.MakeAddrLValue(Slot
.getAddress(), T
));
1420 /// Determine whether the given cast kind is known to always convert values
1421 /// with all zero bits in their value representation to values with all zero
1422 /// bits in their value representation.
1423 static bool castPreservesZero(const CastExpr
*CE
) {
1424 switch (CE
->getCastKind()) {
1427 case CK_UserDefinedConversion
:
1428 case CK_ConstructorConversion
:
1432 // Conversions between (possibly-complex) integral, (possibly-complex)
1433 // floating-point, and bool.
1434 case CK_BooleanToSignedIntegral
:
1435 case CK_FloatingCast
:
1436 case CK_FloatingComplexCast
:
1437 case CK_FloatingComplexToBoolean
:
1438 case CK_FloatingComplexToIntegralComplex
:
1439 case CK_FloatingComplexToReal
:
1440 case CK_FloatingRealToComplex
:
1441 case CK_FloatingToBoolean
:
1442 case CK_FloatingToIntegral
:
1443 case CK_IntegralCast
:
1444 case CK_IntegralComplexCast
:
1445 case CK_IntegralComplexToBoolean
:
1446 case CK_IntegralComplexToFloatingComplex
:
1447 case CK_IntegralComplexToReal
:
1448 case CK_IntegralRealToComplex
:
1449 case CK_IntegralToBoolean
:
1450 case CK_IntegralToFloating
:
1451 // Reinterpreting integers as pointers and vice versa.
1452 case CK_IntegralToPointer
:
1453 case CK_PointerToIntegral
:
1454 // Language extensions.
1455 case CK_VectorSplat
:
1457 case CK_NonAtomicToAtomic
:
1458 case CK_AtomicToNonAtomic
:
1459 case CK_HLSLVectorTruncation
:
1462 case CK_BaseToDerivedMemberPointer
:
1463 case CK_DerivedToBaseMemberPointer
:
1464 case CK_MemberPointerToBoolean
:
1465 case CK_NullToMemberPointer
:
1466 case CK_ReinterpretMemberPointer
:
1467 // FIXME: ABI-dependent.
1470 case CK_AnyPointerToBlockPointerCast
:
1471 case CK_BlockPointerToObjCPointerCast
:
1472 case CK_CPointerToObjCPointerCast
:
1473 case CK_ObjCObjectLValueCast
:
1474 case CK_IntToOCLSampler
:
1475 case CK_ZeroToOCLOpaqueType
:
1476 // FIXME: Check these.
1479 case CK_FixedPointCast
:
1480 case CK_FixedPointToBoolean
:
1481 case CK_FixedPointToFloating
:
1482 case CK_FixedPointToIntegral
:
1483 case CK_FloatingToFixedPoint
:
1484 case CK_IntegralToFixedPoint
:
1485 // FIXME: Do all fixed-point types represent zero as all 0 bits?
1488 case CK_AddressSpaceConversion
:
1489 case CK_BaseToDerived
:
1490 case CK_DerivedToBase
:
1492 case CK_NullToPointer
:
1493 case CK_PointerToBoolean
:
1494 // FIXME: Preserves zeroes only if zero pointers and null pointers have the
1495 // same representation in all involved address spaces.
1498 case CK_ARCConsumeObject
:
1499 case CK_ARCExtendBlockObject
:
1500 case CK_ARCProduceObject
:
1501 case CK_ARCReclaimReturnedObject
:
1502 case CK_CopyAndAutoreleaseBlockObject
:
1503 case CK_ArrayToPointerDecay
:
1504 case CK_FunctionToPointerDecay
:
1505 case CK_BuiltinFnToFnPtr
:
1507 case CK_LValueBitCast
:
1508 case CK_LValueToRValue
:
1509 case CK_LValueToRValueBitCast
:
1510 case CK_UncheckedDerivedToBase
:
1511 case CK_HLSLArrayRValue
:
1514 llvm_unreachable("Unhandled clang::CastKind enum");
1517 /// isSimpleZero - If emitting this value will obviously just cause a store of
1518 /// zero to memory, return true. This can return false if uncertain, so it just
1519 /// handles simple cases.
1520 static bool isSimpleZero(const Expr
*E
, CodeGenFunction
&CGF
) {
1521 E
= E
->IgnoreParens();
1522 while (auto *CE
= dyn_cast
<CastExpr
>(E
)) {
1523 if (!castPreservesZero(CE
))
1525 E
= CE
->getSubExpr()->IgnoreParens();
1529 if (const IntegerLiteral
*IL
= dyn_cast
<IntegerLiteral
>(E
))
1530 return IL
->getValue() == 0;
1532 if (const FloatingLiteral
*FL
= dyn_cast
<FloatingLiteral
>(E
))
1533 return FL
->getValue().isPosZero();
1535 if ((isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) &&
1536 CGF
.getTypes().isZeroInitializable(E
->getType()))
1538 // (int*)0 - Null pointer expressions.
1539 if (const CastExpr
*ICE
= dyn_cast
<CastExpr
>(E
))
1540 return ICE
->getCastKind() == CK_NullToPointer
&&
1541 CGF
.getTypes().isPointerZeroInitializable(E
->getType()) &&
1542 !E
->HasSideEffects(CGF
.getContext());
1544 if (const CharacterLiteral
*CL
= dyn_cast
<CharacterLiteral
>(E
))
1545 return CL
->getValue() == 0;
1547 // Otherwise, hard case: conservatively return false.
1553 AggExprEmitter::EmitInitializationToLValue(Expr
*E
, LValue LV
) {
1554 QualType type
= LV
.getType();
1555 // FIXME: Ignore result?
1556 // FIXME: Are initializers affected by volatile?
1557 if (Dest
.isZeroed() && isSimpleZero(E
, CGF
)) {
1558 // Storing "i32 0" to a zero'd memory location is a noop.
1560 } else if (isa
<ImplicitValueInitExpr
>(E
) || isa
<CXXScalarValueInitExpr
>(E
)) {
1561 return EmitNullInitializationToLValue(LV
);
1562 } else if (isa
<NoInitExpr
>(E
)) {
1565 } else if (type
->isReferenceType()) {
1566 RValue RV
= CGF
.EmitReferenceBindingToExpr(E
);
1567 return CGF
.EmitStoreThroughLValue(RV
, LV
);
1570 switch (CGF
.getEvaluationKind(type
)) {
1572 CGF
.EmitComplexExprIntoLValue(E
, LV
, /*isInit*/ true);
1576 E
, AggValueSlot::forLValue(LV
, AggValueSlot::IsDestructed
,
1577 AggValueSlot::DoesNotNeedGCBarriers
,
1578 AggValueSlot::IsNotAliased
,
1579 AggValueSlot::MayOverlap
, Dest
.isZeroed()));
1582 if (LV
.isSimple()) {
1583 CGF
.EmitScalarInit(E
, /*D=*/nullptr, LV
, /*Captured=*/false);
1585 CGF
.EmitStoreThroughLValue(RValue::get(CGF
.EmitScalarExpr(E
)), LV
);
1589 llvm_unreachable("bad evaluation kind");
1592 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv
) {
1593 QualType type
= lv
.getType();
1595 // If the destination slot is already zeroed out before the aggregate is
1596 // copied into it, we don't have to emit any zeros here.
1597 if (Dest
.isZeroed() && CGF
.getTypes().isZeroInitializable(type
))
1600 if (CGF
.hasScalarEvaluationKind(type
)) {
1601 // For non-aggregates, we can store the appropriate null constant.
1602 llvm::Value
*null
= CGF
.CGM
.EmitNullConstant(type
);
1603 // Note that the following is not equivalent to
1604 // EmitStoreThroughBitfieldLValue for ARC types.
1605 if (lv
.isBitField()) {
1606 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(null
), lv
);
1608 assert(lv
.isSimple());
1609 CGF
.EmitStoreOfScalar(null
, lv
, /* isInitialization */ true);
1612 // There's a potential optimization opportunity in combining
1613 // memsets; that would be easy for arrays, but relatively
1614 // difficult for structures with the current code.
1615 CGF
.EmitNullInitialization(lv
.getAddress(), lv
.getType());
1619 void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr
*E
) {
1620 VisitCXXParenListOrInitListExpr(E
, E
->getInitExprs(),
1621 E
->getInitializedFieldInUnion(),
1622 E
->getArrayFiller());
1625 void AggExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
1626 if (E
->hadArrayRangeDesignator())
1627 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
1629 if (E
->isTransparent())
1630 return Visit(E
->getInit(0));
1632 VisitCXXParenListOrInitListExpr(
1633 E
, E
->inits(), E
->getInitializedFieldInUnion(), E
->getArrayFiller());
1636 void AggExprEmitter::VisitCXXParenListOrInitListExpr(
1637 Expr
*ExprToVisit
, ArrayRef
<Expr
*> InitExprs
,
1638 FieldDecl
*InitializedFieldInUnion
, Expr
*ArrayFiller
) {
1640 // FIXME: Assess perf here? Figure out what cases are worth optimizing here
1641 // (Length of globals? Chunks of zeroed-out space?).
1643 // If we can, prefer a copy from a global; this is a lot less code for long
1644 // globals, and it's easier for the current optimizers to analyze.
1645 if (llvm::Constant
*C
=
1646 CGF
.CGM
.EmitConstantExpr(ExprToVisit
, ExprToVisit
->getType(), &CGF
)) {
1647 llvm::GlobalVariable
* GV
=
1648 new llvm::GlobalVariable(CGF
.CGM
.getModule(), C
->getType(), true,
1649 llvm::GlobalValue::InternalLinkage
, C
, "");
1650 EmitFinalDestCopy(ExprToVisit
->getType(),
1651 CGF
.MakeAddrLValue(GV
, ExprToVisit
->getType()));
1656 AggValueSlot Dest
= EnsureSlot(ExprToVisit
->getType());
1658 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), ExprToVisit
->getType());
1660 // Handle initialization of an array.
1661 if (ExprToVisit
->getType()->isConstantArrayType()) {
1662 auto AType
= cast
<llvm::ArrayType
>(Dest
.getAddress().getElementType());
1663 EmitArrayInit(Dest
.getAddress(), AType
, ExprToVisit
->getType(), ExprToVisit
,
1664 InitExprs
, ArrayFiller
);
1666 } else if (ExprToVisit
->getType()->isVariableArrayType()) {
1667 // A variable array type that has an initializer can only do empty
1668 // initialization. And because this feature is not exposed as an extension
1669 // in C++, we can safely memset the array memory to zero.
1670 assert(InitExprs
.size() == 0 &&
1671 "you can only use an empty initializer with VLAs");
1672 CGF
.EmitNullInitialization(Dest
.getAddress(), ExprToVisit
->getType());
1676 assert(ExprToVisit
->getType()->isRecordType() &&
1677 "Only support structs/unions here!");
1679 // Do struct initialization; this code just sets each individual member
1680 // to the approprate value. This makes bitfield support automatic;
1681 // the disadvantage is that the generated code is more difficult for
1682 // the optimizer, especially with bitfields.
1683 unsigned NumInitElements
= InitExprs
.size();
1684 RecordDecl
*record
= ExprToVisit
->getType()->castAs
<RecordType
>()->getDecl();
1686 // We'll need to enter cleanup scopes in case any of the element
1687 // initializers throws an exception.
1688 SmallVector
<EHScopeStack::stable_iterator
, 16> cleanups
;
1689 CodeGenFunction::CleanupDeactivationScope
DeactivateCleanups(CGF
);
1691 unsigned curInitIndex
= 0;
1693 // Emit initialization of base classes.
1694 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(record
)) {
1695 assert(NumInitElements
>= CXXRD
->getNumBases() &&
1696 "missing initializer for base class");
1697 for (auto &Base
: CXXRD
->bases()) {
1698 assert(!Base
.isVirtual() && "should not see vbases here");
1699 auto *BaseRD
= Base
.getType()->getAsCXXRecordDecl();
1700 Address V
= CGF
.GetAddressOfDirectBaseInCompleteClass(
1701 Dest
.getAddress(), CXXRD
, BaseRD
,
1702 /*isBaseVirtual*/ false);
1703 AggValueSlot AggSlot
= AggValueSlot::forAddr(
1705 AggValueSlot::IsDestructed
,
1706 AggValueSlot::DoesNotNeedGCBarriers
,
1707 AggValueSlot::IsNotAliased
,
1708 CGF
.getOverlapForBaseInit(CXXRD
, BaseRD
, Base
.isVirtual()));
1709 CGF
.EmitAggExpr(InitExprs
[curInitIndex
++], AggSlot
);
1711 if (QualType::DestructionKind dtorKind
=
1712 Base
.getType().isDestructedType())
1713 CGF
.pushDestroyAndDeferDeactivation(dtorKind
, V
, Base
.getType());
1717 // Prepare a 'this' for CXXDefaultInitExprs.
1718 CodeGenFunction::FieldConstructionScope
FCS(CGF
, Dest
.getAddress());
1720 if (record
->isUnion()) {
1721 // Only initialize one field of a union. The field itself is
1722 // specified by the initializer list.
1723 if (!InitializedFieldInUnion
) {
1724 // Empty union; we have nothing to do.
1727 // Make sure that it's really an empty and not a failure of
1728 // semantic analysis.
1729 for (const auto *Field
: record
->fields())
1731 (Field
->isUnnamedBitField() || Field
->isAnonymousStructOrUnion()) &&
1732 "Only unnamed bitfields or anonymous class allowed");
1737 // FIXME: volatility
1738 FieldDecl
*Field
= InitializedFieldInUnion
;
1740 LValue FieldLoc
= CGF
.EmitLValueForFieldInitialization(DestLV
, Field
);
1741 if (NumInitElements
) {
1742 // Store the initializer into the field
1743 EmitInitializationToLValue(InitExprs
[0], FieldLoc
);
1745 // Default-initialize to null.
1746 EmitNullInitializationToLValue(FieldLoc
);
1752 // Here we iterate over the fields; this makes it simpler to both
1753 // default-initialize fields and skip over unnamed fields.
1754 for (const auto *field
: record
->fields()) {
1755 // We're done once we hit the flexible array member.
1756 if (field
->getType()->isIncompleteArrayType())
1759 // Always skip anonymous bitfields.
1760 if (field
->isUnnamedBitField())
1763 // We're done if we reach the end of the explicit initializers, we
1764 // have a zeroed object, and the rest of the fields are
1765 // zero-initializable.
1766 if (curInitIndex
== NumInitElements
&& Dest
.isZeroed() &&
1767 CGF
.getTypes().isZeroInitializable(ExprToVisit
->getType()))
1771 LValue LV
= CGF
.EmitLValueForFieldInitialization(DestLV
, field
);
1772 // We never generate write-barries for initialized fields.
1775 if (curInitIndex
< NumInitElements
) {
1776 // Store the initializer into the field.
1777 EmitInitializationToLValue(InitExprs
[curInitIndex
++], LV
);
1779 // We're out of initializers; default-initialize to null
1780 EmitNullInitializationToLValue(LV
);
1783 // Push a destructor if necessary.
1784 // FIXME: if we have an array of structures, all explicitly
1785 // initialized, we can end up pushing a linear number of cleanups.
1786 if (QualType::DestructionKind dtorKind
1787 = field
->getType().isDestructedType()) {
1788 assert(LV
.isSimple());
1790 CGF
.pushDestroyAndDeferDeactivation(NormalAndEHCleanup
, LV
.getAddress(),
1792 CGF
.getDestroyer(dtorKind
), false);
1798 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr
*E
,
1799 llvm::Value
*outerBegin
) {
1800 // Emit the common subexpression.
1801 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
->getCommonExpr());
1803 Address destPtr
= EnsureSlot(E
->getType()).getAddress();
1804 uint64_t numElements
= E
->getArraySize().getZExtValue();
1809 // destPtr is an array*. Construct an elementType* by drilling down a level.
1810 llvm::Value
*zero
= llvm::ConstantInt::get(CGF
.SizeTy
, 0);
1811 llvm::Value
*indices
[] = {zero
, zero
};
1812 llvm::Value
*begin
= Builder
.CreateInBoundsGEP(destPtr
.getElementType(),
1813 destPtr
.emitRawPointer(CGF
),
1814 indices
, "arrayinit.begin");
1816 // Prepare to special-case multidimensional array initialization: we avoid
1817 // emitting multiple destructor loops in that case.
1820 ArrayInitLoopExpr
*InnerLoop
= dyn_cast
<ArrayInitLoopExpr
>(E
->getSubExpr());
1822 QualType elementType
=
1823 CGF
.getContext().getAsArrayType(E
->getType())->getElementType();
1824 CharUnits elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
1825 CharUnits elementAlign
=
1826 destPtr
.getAlignment().alignmentOfArrayElement(elementSize
);
1827 llvm::Type
*llvmElementType
= CGF
.ConvertTypeForMem(elementType
);
1829 llvm::BasicBlock
*entryBB
= Builder
.GetInsertBlock();
1830 llvm::BasicBlock
*bodyBB
= CGF
.createBasicBlock("arrayinit.body");
1832 // Jump into the body.
1833 CGF
.EmitBlock(bodyBB
);
1834 llvm::PHINode
*index
=
1835 Builder
.CreatePHI(zero
->getType(), 2, "arrayinit.index");
1836 index
->addIncoming(zero
, entryBB
);
1837 llvm::Value
*element
=
1838 Builder
.CreateInBoundsGEP(llvmElementType
, begin
, index
);
1840 // Prepare for a cleanup.
1841 QualType::DestructionKind dtorKind
= elementType
.isDestructedType();
1842 EHScopeStack::stable_iterator cleanup
;
1843 if (CGF
.needsEHCleanup(dtorKind
) && !InnerLoop
) {
1844 if (outerBegin
->getType() != element
->getType())
1845 outerBegin
= Builder
.CreateBitCast(outerBegin
, element
->getType());
1846 CGF
.pushRegularPartialArrayCleanup(outerBegin
, element
, elementType
,
1848 CGF
.getDestroyer(dtorKind
));
1849 cleanup
= CGF
.EHStack
.stable_begin();
1851 dtorKind
= QualType::DK_none
;
1854 // Emit the actual filler expression.
1856 // Temporaries created in an array initialization loop are destroyed
1857 // at the end of each iteration.
1858 CodeGenFunction::RunCleanupsScope
CleanupsScope(CGF
);
1859 CodeGenFunction::ArrayInitLoopExprScope
Scope(CGF
, index
);
1860 LValue elementLV
= CGF
.MakeAddrLValue(
1861 Address(element
, llvmElementType
, elementAlign
), elementType
);
1864 // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1865 auto elementSlot
= AggValueSlot::forLValue(
1866 elementLV
, AggValueSlot::IsDestructed
,
1867 AggValueSlot::DoesNotNeedGCBarriers
, AggValueSlot::IsNotAliased
,
1868 AggValueSlot::DoesNotOverlap
);
1869 AggExprEmitter(CGF
, elementSlot
, false)
1870 .VisitArrayInitLoopExpr(InnerLoop
, outerBegin
);
1872 EmitInitializationToLValue(E
->getSubExpr(), elementLV
);
1875 // Move on to the next element.
1876 llvm::Value
*nextIndex
= Builder
.CreateNUWAdd(
1877 index
, llvm::ConstantInt::get(CGF
.SizeTy
, 1), "arrayinit.next");
1878 index
->addIncoming(nextIndex
, Builder
.GetInsertBlock());
1880 // Leave the loop if we're done.
1881 llvm::Value
*done
= Builder
.CreateICmpEQ(
1882 nextIndex
, llvm::ConstantInt::get(CGF
.SizeTy
, numElements
),
1884 llvm::BasicBlock
*endBB
= CGF
.createBasicBlock("arrayinit.end");
1885 Builder
.CreateCondBr(done
, endBB
, bodyBB
);
1887 CGF
.EmitBlock(endBB
);
1889 // Leave the partial-array cleanup if we entered one.
1891 CGF
.DeactivateCleanupBlock(cleanup
, index
);
1894 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr
*E
) {
1895 AggValueSlot Dest
= EnsureSlot(E
->getType());
1897 LValue DestLV
= CGF
.MakeAddrLValue(Dest
.getAddress(), E
->getType());
1898 EmitInitializationToLValue(E
->getBase(), DestLV
);
1899 VisitInitListExpr(E
->getUpdater());
1902 //===----------------------------------------------------------------------===//
1903 // Entry Points into this File
1904 //===----------------------------------------------------------------------===//
1906 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1907 /// non-zero bytes that will be stored when outputting the initializer for the
1908 /// specified initializer expression.
1909 static CharUnits
GetNumNonZeroBytesInInit(const Expr
*E
, CodeGenFunction
&CGF
) {
1910 if (auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(E
))
1911 E
= MTE
->getSubExpr();
1912 E
= E
->IgnoreParenNoopCasts(CGF
.getContext());
1914 // 0 and 0.0 won't require any non-zero stores!
1915 if (isSimpleZero(E
, CGF
)) return CharUnits::Zero();
1917 // If this is an initlist expr, sum up the size of sizes of the (present)
1918 // elements. If this is something weird, assume the whole thing is non-zero.
1919 const InitListExpr
*ILE
= dyn_cast
<InitListExpr
>(E
);
1920 while (ILE
&& ILE
->isTransparent())
1921 ILE
= dyn_cast
<InitListExpr
>(ILE
->getInit(0));
1922 if (!ILE
|| !CGF
.getTypes().isZeroInitializable(ILE
->getType()))
1923 return CGF
.getContext().getTypeSizeInChars(E
->getType());
1925 // InitListExprs for structs have to be handled carefully. If there are
1926 // reference members, we need to consider the size of the reference, not the
1927 // referencee. InitListExprs for unions and arrays can't have references.
1928 if (const RecordType
*RT
= E
->getType()->getAs
<RecordType
>()) {
1929 if (!RT
->isUnionType()) {
1930 RecordDecl
*SD
= RT
->getDecl();
1931 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1933 unsigned ILEElement
= 0;
1934 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(SD
))
1935 while (ILEElement
!= CXXRD
->getNumBases())
1937 GetNumNonZeroBytesInInit(ILE
->getInit(ILEElement
++), CGF
);
1938 for (const auto *Field
: SD
->fields()) {
1939 // We're done once we hit the flexible array member or run out of
1940 // InitListExpr elements.
1941 if (Field
->getType()->isIncompleteArrayType() ||
1942 ILEElement
== ILE
->getNumInits())
1944 if (Field
->isUnnamedBitField())
1947 const Expr
*E
= ILE
->getInit(ILEElement
++);
1949 // Reference values are always non-null and have the width of a pointer.
1950 if (Field
->getType()->isReferenceType())
1951 NumNonZeroBytes
+= CGF
.getContext().toCharUnitsFromBits(
1952 CGF
.getTarget().getPointerWidth(LangAS::Default
));
1954 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(E
, CGF
);
1957 return NumNonZeroBytes
;
1961 // FIXME: This overestimates the number of non-zero bytes for bit-fields.
1962 CharUnits NumNonZeroBytes
= CharUnits::Zero();
1963 for (unsigned i
= 0, e
= ILE
->getNumInits(); i
!= e
; ++i
)
1964 NumNonZeroBytes
+= GetNumNonZeroBytesInInit(ILE
->getInit(i
), CGF
);
1965 return NumNonZeroBytes
;
1968 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1969 /// zeros in it, emit a memset and avoid storing the individual zeros.
1971 static void CheckAggExprForMemSetUse(AggValueSlot
&Slot
, const Expr
*E
,
1972 CodeGenFunction
&CGF
) {
1973 // If the slot is already known to be zeroed, nothing to do. Don't mess with
1975 if (Slot
.isZeroed() || Slot
.isVolatile() || !Slot
.getAddress().isValid())
1978 // C++ objects with a user-declared constructor don't need zero'ing.
1979 if (CGF
.getLangOpts().CPlusPlus
)
1980 if (const RecordType
*RT
= CGF
.getContext()
1981 .getBaseElementType(E
->getType())->getAs
<RecordType
>()) {
1982 const CXXRecordDecl
*RD
= cast
<CXXRecordDecl
>(RT
->getDecl());
1983 if (RD
->hasUserDeclaredConstructor())
1987 // If the type is 16-bytes or smaller, prefer individual stores over memset.
1988 CharUnits Size
= Slot
.getPreferredSize(CGF
.getContext(), E
->getType());
1989 if (Size
<= CharUnits::fromQuantity(16))
1992 // Check to see if over 3/4 of the initializer are known to be zero. If so,
1993 // we prefer to emit memset + individual stores for the rest.
1994 CharUnits NumNonZeroBytes
= GetNumNonZeroBytesInInit(E
, CGF
);
1995 if (NumNonZeroBytes
*4 > Size
)
1998 // Okay, it seems like a good idea to use an initial memset, emit the call.
1999 llvm::Constant
*SizeVal
= CGF
.Builder
.getInt64(Size
.getQuantity());
2001 Address Loc
= Slot
.getAddress().withElementType(CGF
.Int8Ty
);
2002 CGF
.Builder
.CreateMemSet(Loc
, CGF
.Builder
.getInt8(0), SizeVal
, false);
2004 // Tell the AggExprEmitter that the slot is known zero.
2011 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
2012 /// type. The result is computed into DestPtr. Note that if DestPtr is null,
2013 /// the value of the aggregate expression is not needed. If VolatileDest is
2014 /// true, DestPtr cannot be 0.
2015 void CodeGenFunction::EmitAggExpr(const Expr
*E
, AggValueSlot Slot
) {
2016 assert(E
&& hasAggregateEvaluationKind(E
->getType()) &&
2017 "Invalid aggregate expression to emit");
2018 assert((Slot
.getAddress().isValid() || Slot
.isIgnored()) &&
2019 "slot has bits but no address");
2021 // Optimize the slot if possible.
2022 CheckAggExprForMemSetUse(Slot
, E
, *this);
2024 AggExprEmitter(*this, Slot
, Slot
.isIgnored()).Visit(const_cast<Expr
*>(E
));
2027 LValue
CodeGenFunction::EmitAggExprToLValue(const Expr
*E
) {
2028 assert(hasAggregateEvaluationKind(E
->getType()) && "Invalid argument!");
2029 Address Temp
= CreateMemTemp(E
->getType());
2030 LValue LV
= MakeAddrLValue(Temp
, E
->getType());
2031 EmitAggExpr(E
, AggValueSlot::forLValue(LV
, AggValueSlot::IsNotDestructed
,
2032 AggValueSlot::DoesNotNeedGCBarriers
,
2033 AggValueSlot::IsNotAliased
,
2034 AggValueSlot::DoesNotOverlap
));
2038 void CodeGenFunction::EmitAggFinalDestCopy(QualType Type
, AggValueSlot Dest
,
2040 ExprValueKind SrcKind
) {
2041 return AggExprEmitter(*this, Dest
, Dest
.isIgnored())
2042 .EmitFinalDestCopy(Type
, Src
, SrcKind
);
2045 AggValueSlot::Overlap_t
2046 CodeGenFunction::getOverlapForFieldInit(const FieldDecl
*FD
) {
2047 if (!FD
->hasAttr
<NoUniqueAddressAttr
>() || !FD
->getType()->isRecordType())
2048 return AggValueSlot::DoesNotOverlap
;
2050 // Empty fields can overlap earlier fields.
2051 if (FD
->getType()->getAsCXXRecordDecl()->isEmpty())
2052 return AggValueSlot::MayOverlap
;
2054 // If the field lies entirely within the enclosing class's nvsize, its tail
2055 // padding cannot overlap any already-initialized object. (The only subobjects
2056 // with greater addresses that might already be initialized are vbases.)
2057 const RecordDecl
*ClassRD
= FD
->getParent();
2058 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(ClassRD
);
2059 if (Layout
.getFieldOffset(FD
->getFieldIndex()) +
2060 getContext().getTypeSize(FD
->getType()) <=
2061 (uint64_t)getContext().toBits(Layout
.getNonVirtualSize()))
2062 return AggValueSlot::DoesNotOverlap
;
2064 // The tail padding may contain values we need to preserve.
2065 return AggValueSlot::MayOverlap
;
2068 AggValueSlot::Overlap_t
CodeGenFunction::getOverlapForBaseInit(
2069 const CXXRecordDecl
*RD
, const CXXRecordDecl
*BaseRD
, bool IsVirtual
) {
2070 // If the most-derived object is a field declared with [[no_unique_address]],
2071 // the tail padding of any virtual base could be reused for other subobjects
2072 // of that field's class.
2074 return AggValueSlot::MayOverlap
;
2076 // Empty bases can overlap earlier bases.
2077 if (BaseRD
->isEmpty())
2078 return AggValueSlot::MayOverlap
;
2080 // If the base class is laid out entirely within the nvsize of the derived
2081 // class, its tail padding cannot yet be initialized, so we can issue
2082 // stores at the full width of the base class.
2083 const ASTRecordLayout
&Layout
= getContext().getASTRecordLayout(RD
);
2084 if (Layout
.getBaseClassOffset(BaseRD
) +
2085 getContext().getASTRecordLayout(BaseRD
).getSize() <=
2086 Layout
.getNonVirtualSize())
2087 return AggValueSlot::DoesNotOverlap
;
2089 // The tail padding may contain values we need to preserve.
2090 return AggValueSlot::MayOverlap
;
2093 void CodeGenFunction::EmitAggregateCopy(LValue Dest
, LValue Src
, QualType Ty
,
2094 AggValueSlot::Overlap_t MayOverlap
,
2096 assert(!Ty
->isAnyComplexType() && "Shouldn't happen for complex");
2098 Address DestPtr
= Dest
.getAddress();
2099 Address SrcPtr
= Src
.getAddress();
2101 if (getLangOpts().CPlusPlus
) {
2102 if (const RecordType
*RT
= Ty
->getAs
<RecordType
>()) {
2103 CXXRecordDecl
*Record
= cast
<CXXRecordDecl
>(RT
->getDecl());
2104 assert((Record
->hasTrivialCopyConstructor() ||
2105 Record
->hasTrivialCopyAssignment() ||
2106 Record
->hasTrivialMoveConstructor() ||
2107 Record
->hasTrivialMoveAssignment() ||
2108 Record
->hasAttr
<TrivialABIAttr
>() || Record
->isUnion()) &&
2109 "Trying to aggregate-copy a type without a trivial copy/move "
2110 "constructor or assignment operator");
2111 // Ignore empty classes in C++.
2112 if (Record
->isEmpty())
2117 if (getLangOpts().CUDAIsDevice
) {
2118 if (Ty
->isCUDADeviceBuiltinSurfaceType()) {
2119 if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest
,
2122 } else if (Ty
->isCUDADeviceBuiltinTextureType()) {
2123 if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest
,
2129 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
2130 // C99 6.5.16.1p3, which states "If the value being stored in an object is
2131 // read from another object that overlaps in anyway the storage of the first
2132 // object, then the overlap shall be exact and the two objects shall have
2133 // qualified or unqualified versions of a compatible type."
2135 // memcpy is not defined if the source and destination pointers are exactly
2136 // equal, but other compilers do this optimization, and almost every memcpy
2137 // implementation handles this case safely. If there is a libc that does not
2138 // safely handle this, we can add a target hook.
2140 // Get data size info for this aggregate. Don't copy the tail padding if this
2141 // might be a potentially-overlapping subobject, since the tail padding might
2142 // be occupied by a different object. Otherwise, copying it is fine.
2143 TypeInfoChars TypeInfo
;
2145 TypeInfo
= getContext().getTypeInfoDataSizeInChars(Ty
);
2147 TypeInfo
= getContext().getTypeInfoInChars(Ty
);
2149 llvm::Value
*SizeVal
= nullptr;
2150 if (TypeInfo
.Width
.isZero()) {
2151 // But note that getTypeInfo returns 0 for a VLA.
2152 if (auto *VAT
= dyn_cast_or_null
<VariableArrayType
>(
2153 getContext().getAsArrayType(Ty
))) {
2155 SizeVal
= emitArrayLength(VAT
, BaseEltTy
, DestPtr
);
2156 TypeInfo
= getContext().getTypeInfoInChars(BaseEltTy
);
2157 assert(!TypeInfo
.Width
.isZero());
2158 SizeVal
= Builder
.CreateNUWMul(
2160 llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity()));
2164 SizeVal
= llvm::ConstantInt::get(SizeTy
, TypeInfo
.Width
.getQuantity());
2167 // FIXME: If we have a volatile struct, the optimizer can remove what might
2168 // appear to be `extra' memory ops:
2170 // volatile struct { int i; } a, b;
2177 // we need to use a different call here. We use isVolatile to indicate when
2178 // either the source or the destination is volatile.
2180 DestPtr
= DestPtr
.withElementType(Int8Ty
);
2181 SrcPtr
= SrcPtr
.withElementType(Int8Ty
);
2183 // Don't do any of the memmove_collectable tests if GC isn't set.
2184 if (CGM
.getLangOpts().getGC() == LangOptions::NonGC
) {
2186 } else if (const RecordType
*RecordTy
= Ty
->getAs
<RecordType
>()) {
2187 RecordDecl
*Record
= RecordTy
->getDecl();
2188 if (Record
->hasObjectMember()) {
2189 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2193 } else if (Ty
->isArrayType()) {
2194 QualType BaseType
= getContext().getBaseElementType(Ty
);
2195 if (const RecordType
*RecordTy
= BaseType
->getAs
<RecordType
>()) {
2196 if (RecordTy
->getDecl()->hasObjectMember()) {
2197 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr
, SrcPtr
,
2204 auto Inst
= Builder
.CreateMemCpy(DestPtr
, SrcPtr
, SizeVal
, isVolatile
);
2206 // Determine the metadata to describe the position of any padding in this
2207 // memcpy, as well as the TBAA tags for the members of the struct, in case
2208 // the optimizer wishes to expand it in to scalar memory operations.
2209 if (llvm::MDNode
*TBAAStructTag
= CGM
.getTBAAStructInfo(Ty
))
2210 Inst
->setMetadata(llvm::LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
2212 if (CGM
.getCodeGenOpts().NewStructPathTBAA
) {
2213 TBAAAccessInfo TBAAInfo
= CGM
.mergeTBAAInfoForMemoryTransfer(
2214 Dest
.getTBAAInfo(), Src
.getTBAAInfo());
2215 CGM
.DecorateInstructionWithTBAA(Inst
, TBAAInfo
);