1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Constant Expr nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "ABIInfoImpl.h"
15 #include "CGObjCRuntime.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "ConstantEmitter.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/APValue.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/Builtins.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/Sequence.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalVariable.h"
35 using namespace clang
;
36 using namespace CodeGen
;
38 //===----------------------------------------------------------------------===//
39 // ConstantAggregateBuilder
40 //===----------------------------------------------------------------------===//
43 class ConstExprEmitter
;
45 struct ConstantAggregateBuilderUtils
{
48 ConstantAggregateBuilderUtils(CodeGenModule
&CGM
) : CGM(CGM
) {}
50 CharUnits
getAlignment(const llvm::Constant
*C
) const {
51 return CharUnits::fromQuantity(
52 CGM
.getDataLayout().getABITypeAlign(C
->getType()));
55 CharUnits
getSize(llvm::Type
*Ty
) const {
56 return CharUnits::fromQuantity(CGM
.getDataLayout().getTypeAllocSize(Ty
));
59 CharUnits
getSize(const llvm::Constant
*C
) const {
60 return getSize(C
->getType());
63 llvm::Constant
*getPadding(CharUnits PadSize
) const {
64 llvm::Type
*Ty
= CGM
.CharTy
;
65 if (PadSize
> CharUnits::One())
66 Ty
= llvm::ArrayType::get(Ty
, PadSize
.getQuantity());
67 return llvm::UndefValue::get(Ty
);
70 llvm::Constant
*getZeroes(CharUnits ZeroSize
) const {
71 llvm::Type
*Ty
= llvm::ArrayType::get(CGM
.CharTy
, ZeroSize
.getQuantity());
72 return llvm::ConstantAggregateZero::get(Ty
);
76 /// Incremental builder for an llvm::Constant* holding a struct or array
78 class ConstantAggregateBuilder
: private ConstantAggregateBuilderUtils
{
79 /// The elements of the constant. These two arrays must have the same size;
80 /// Offsets[i] describes the offset of Elems[i] within the constant. The
81 /// elements are kept in increasing offset order, and we ensure that there
82 /// is no overlap: Offsets[i+1] >= Offsets[i] + getSize(Elemes[i]).
84 /// This may contain explicit padding elements (in order to create a
85 /// natural layout), but need not. Gaps between elements are implicitly
86 /// considered to be filled with undef.
87 llvm::SmallVector
<llvm::Constant
*, 32> Elems
;
88 llvm::SmallVector
<CharUnits
, 32> Offsets
;
90 /// The size of the constant (the maximum end offset of any added element).
91 /// May be larger than the end of Elems.back() if we split the last element
92 /// and removed some trailing undefs.
93 CharUnits Size
= CharUnits::Zero();
95 /// This is true only if laying out Elems in order as the elements of a
96 /// non-packed LLVM struct will give the correct layout.
97 bool NaturalLayout
= true;
99 bool split(size_t Index
, CharUnits Hint
);
100 std::optional
<size_t> splitAt(CharUnits Pos
);
102 static llvm::Constant
*buildFrom(CodeGenModule
&CGM
,
103 ArrayRef
<llvm::Constant
*> Elems
,
104 ArrayRef
<CharUnits
> Offsets
,
105 CharUnits StartOffset
, CharUnits Size
,
106 bool NaturalLayout
, llvm::Type
*DesiredTy
,
107 bool AllowOversized
);
110 ConstantAggregateBuilder(CodeGenModule
&CGM
)
111 : ConstantAggregateBuilderUtils(CGM
) {}
113 /// Update or overwrite the value starting at \p Offset with \c C.
115 /// \param AllowOverwrite If \c true, this constant might overwrite (part of)
116 /// a constant that has already been added. This flag is only used to
118 bool add(llvm::Constant
*C
, CharUnits Offset
, bool AllowOverwrite
);
120 /// Update or overwrite the bits starting at \p OffsetInBits with \p Bits.
121 bool addBits(llvm::APInt Bits
, uint64_t OffsetInBits
, bool AllowOverwrite
);
123 /// Attempt to condense the value starting at \p Offset to a constant of type
125 void condense(CharUnits Offset
, llvm::Type
*DesiredTy
);
127 /// Produce a constant representing the entire accumulated value, ideally of
128 /// the specified type. If \p AllowOversized, the constant might be larger
129 /// than implied by \p DesiredTy (eg, if there is a flexible array member).
130 /// Otherwise, the constant will be of exactly the same size as \p DesiredTy
131 /// even if we can't represent it as that type.
132 llvm::Constant
*build(llvm::Type
*DesiredTy
, bool AllowOversized
) const {
133 return buildFrom(CGM
, Elems
, Offsets
, CharUnits::Zero(), Size
,
134 NaturalLayout
, DesiredTy
, AllowOversized
);
138 template<typename Container
, typename Range
= std::initializer_list
<
139 typename
Container::value_type
>>
140 static void replace(Container
&C
, size_t BeginOff
, size_t EndOff
, Range Vals
) {
141 assert(BeginOff
<= EndOff
&& "invalid replacement range");
142 llvm::replace(C
, C
.begin() + BeginOff
, C
.begin() + EndOff
, Vals
);
145 bool ConstantAggregateBuilder::add(llvm::Constant
*C
, CharUnits Offset
,
146 bool AllowOverwrite
) {
147 // Common case: appending to a layout.
148 if (Offset
>= Size
) {
149 CharUnits Align
= getAlignment(C
);
150 CharUnits AlignedSize
= Size
.alignTo(Align
);
151 if (AlignedSize
> Offset
|| Offset
.alignTo(Align
) != Offset
)
152 NaturalLayout
= false;
153 else if (AlignedSize
< Offset
) {
154 Elems
.push_back(getPadding(Offset
- Size
));
155 Offsets
.push_back(Size
);
158 Offsets
.push_back(Offset
);
159 Size
= Offset
+ getSize(C
);
163 // Uncommon case: constant overlaps what we've already created.
164 std::optional
<size_t> FirstElemToReplace
= splitAt(Offset
);
165 if (!FirstElemToReplace
)
168 CharUnits CSize
= getSize(C
);
169 std::optional
<size_t> LastElemToReplace
= splitAt(Offset
+ CSize
);
170 if (!LastElemToReplace
)
173 assert((FirstElemToReplace
== LastElemToReplace
|| AllowOverwrite
) &&
174 "unexpectedly overwriting field");
176 replace(Elems
, *FirstElemToReplace
, *LastElemToReplace
, {C
});
177 replace(Offsets
, *FirstElemToReplace
, *LastElemToReplace
, {Offset
});
178 Size
= std::max(Size
, Offset
+ CSize
);
179 NaturalLayout
= false;
183 bool ConstantAggregateBuilder::addBits(llvm::APInt Bits
, uint64_t OffsetInBits
,
184 bool AllowOverwrite
) {
185 const ASTContext
&Context
= CGM
.getContext();
186 const uint64_t CharWidth
= CGM
.getContext().getCharWidth();
188 // Offset of where we want the first bit to go within the bits of the
190 unsigned OffsetWithinChar
= OffsetInBits
% CharWidth
;
192 // We split bit-fields up into individual bytes. Walk over the bytes and
194 for (CharUnits OffsetInChars
=
195 Context
.toCharUnitsFromBits(OffsetInBits
- OffsetWithinChar
);
196 /**/; ++OffsetInChars
) {
197 // Number of bits we want to fill in this char.
198 unsigned WantedBits
=
199 std::min((uint64_t)Bits
.getBitWidth(), CharWidth
- OffsetWithinChar
);
201 // Get a char containing the bits we want in the right places. The other
202 // bits have unspecified values.
203 llvm::APInt BitsThisChar
= Bits
;
204 if (BitsThisChar
.getBitWidth() < CharWidth
)
205 BitsThisChar
= BitsThisChar
.zext(CharWidth
);
206 if (CGM
.getDataLayout().isBigEndian()) {
207 // Figure out how much to shift by. We may need to left-shift if we have
208 // less than one byte of Bits left.
209 int Shift
= Bits
.getBitWidth() - CharWidth
+ OffsetWithinChar
;
211 BitsThisChar
.lshrInPlace(Shift
);
213 BitsThisChar
= BitsThisChar
.shl(-Shift
);
215 BitsThisChar
= BitsThisChar
.shl(OffsetWithinChar
);
217 if (BitsThisChar
.getBitWidth() > CharWidth
)
218 BitsThisChar
= BitsThisChar
.trunc(CharWidth
);
220 if (WantedBits
== CharWidth
) {
221 // Got a full byte: just add it directly.
222 add(llvm::ConstantInt::get(CGM
.getLLVMContext(), BitsThisChar
),
223 OffsetInChars
, AllowOverwrite
);
225 // Partial byte: update the existing integer if there is one. If we
226 // can't split out a 1-CharUnit range to update, then we can't add
227 // these bits and fail the entire constant emission.
228 std::optional
<size_t> FirstElemToUpdate
= splitAt(OffsetInChars
);
229 if (!FirstElemToUpdate
)
231 std::optional
<size_t> LastElemToUpdate
=
232 splitAt(OffsetInChars
+ CharUnits::One());
233 if (!LastElemToUpdate
)
235 assert(*LastElemToUpdate
- *FirstElemToUpdate
< 2 &&
236 "should have at most one element covering one byte");
238 // Figure out which bits we want and discard the rest.
239 llvm::APInt
UpdateMask(CharWidth
, 0);
240 if (CGM
.getDataLayout().isBigEndian())
241 UpdateMask
.setBits(CharWidth
- OffsetWithinChar
- WantedBits
,
242 CharWidth
- OffsetWithinChar
);
244 UpdateMask
.setBits(OffsetWithinChar
, OffsetWithinChar
+ WantedBits
);
245 BitsThisChar
&= UpdateMask
;
247 if (*FirstElemToUpdate
== *LastElemToUpdate
||
248 Elems
[*FirstElemToUpdate
]->isNullValue() ||
249 isa
<llvm::UndefValue
>(Elems
[*FirstElemToUpdate
])) {
250 // All existing bits are either zero or undef.
251 add(llvm::ConstantInt::get(CGM
.getLLVMContext(), BitsThisChar
),
252 OffsetInChars
, /*AllowOverwrite*/ true);
254 llvm::Constant
*&ToUpdate
= Elems
[*FirstElemToUpdate
];
255 // In order to perform a partial update, we need the existing bitwise
256 // value, which we can only extract for a constant int.
257 auto *CI
= dyn_cast
<llvm::ConstantInt
>(ToUpdate
);
260 // Because this is a 1-CharUnit range, the constant occupying it must
261 // be exactly one CharUnit wide.
262 assert(CI
->getBitWidth() == CharWidth
&& "splitAt failed");
263 assert((!(CI
->getValue() & UpdateMask
) || AllowOverwrite
) &&
264 "unexpectedly overwriting bitfield");
265 BitsThisChar
|= (CI
->getValue() & ~UpdateMask
);
266 ToUpdate
= llvm::ConstantInt::get(CGM
.getLLVMContext(), BitsThisChar
);
270 // Stop if we've added all the bits.
271 if (WantedBits
== Bits
.getBitWidth())
274 // Remove the consumed bits from Bits.
275 if (!CGM
.getDataLayout().isBigEndian())
276 Bits
.lshrInPlace(WantedBits
);
277 Bits
= Bits
.trunc(Bits
.getBitWidth() - WantedBits
);
279 // The remanining bits go at the start of the following bytes.
280 OffsetWithinChar
= 0;
286 /// Returns a position within Elems and Offsets such that all elements
287 /// before the returned index end before Pos and all elements at or after
288 /// the returned index begin at or after Pos. Splits elements as necessary
289 /// to ensure this. Returns std::nullopt if we find something we can't split.
290 std::optional
<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos
) {
292 return Offsets
.size();
295 auto FirstAfterPos
= llvm::upper_bound(Offsets
, Pos
);
296 if (FirstAfterPos
== Offsets
.begin())
299 // If we already have an element starting at Pos, we're done.
300 size_t LastAtOrBeforePosIndex
= FirstAfterPos
- Offsets
.begin() - 1;
301 if (Offsets
[LastAtOrBeforePosIndex
] == Pos
)
302 return LastAtOrBeforePosIndex
;
304 // We found an element starting before Pos. Check for overlap.
305 if (Offsets
[LastAtOrBeforePosIndex
] +
306 getSize(Elems
[LastAtOrBeforePosIndex
]) <= Pos
)
307 return LastAtOrBeforePosIndex
+ 1;
309 // Try to decompose it into smaller constants.
310 if (!split(LastAtOrBeforePosIndex
, Pos
))
315 /// Split the constant at index Index, if possible. Return true if we did.
316 /// Hint indicates the location at which we'd like to split, but may be
318 bool ConstantAggregateBuilder::split(size_t Index
, CharUnits Hint
) {
319 NaturalLayout
= false;
320 llvm::Constant
*C
= Elems
[Index
];
321 CharUnits Offset
= Offsets
[Index
];
323 if (auto *CA
= dyn_cast
<llvm::ConstantAggregate
>(C
)) {
324 // Expand the sequence into its contained elements.
325 // FIXME: This assumes vector elements are byte-sized.
326 replace(Elems
, Index
, Index
+ 1,
327 llvm::map_range(llvm::seq(0u, CA
->getNumOperands()),
328 [&](unsigned Op
) { return CA
->getOperand(Op
); }));
329 if (isa
<llvm::ArrayType
>(CA
->getType()) ||
330 isa
<llvm::VectorType
>(CA
->getType())) {
333 llvm::GetElementPtrInst::getTypeAtIndex(CA
->getType(), (uint64_t)0);
334 CharUnits ElemSize
= getSize(ElemTy
);
336 Offsets
, Index
, Index
+ 1,
337 llvm::map_range(llvm::seq(0u, CA
->getNumOperands()),
338 [&](unsigned Op
) { return Offset
+ Op
* ElemSize
; }));
341 auto *ST
= cast
<llvm::StructType
>(CA
->getType());
342 const llvm::StructLayout
*Layout
=
343 CGM
.getDataLayout().getStructLayout(ST
);
344 replace(Offsets
, Index
, Index
+ 1,
346 llvm::seq(0u, CA
->getNumOperands()), [&](unsigned Op
) {
347 return Offset
+ CharUnits::fromQuantity(
348 Layout
->getElementOffset(Op
));
354 if (auto *CDS
= dyn_cast
<llvm::ConstantDataSequential
>(C
)) {
355 // Expand the sequence into its contained elements.
356 // FIXME: This assumes vector elements are byte-sized.
357 // FIXME: If possible, split into two ConstantDataSequentials at Hint.
358 CharUnits ElemSize
= getSize(CDS
->getElementType());
359 replace(Elems
, Index
, Index
+ 1,
360 llvm::map_range(llvm::seq(0u, CDS
->getNumElements()),
362 return CDS
->getElementAsConstant(Elem
);
364 replace(Offsets
, Index
, Index
+ 1,
366 llvm::seq(0u, CDS
->getNumElements()),
367 [&](unsigned Elem
) { return Offset
+ Elem
* ElemSize
; }));
371 if (isa
<llvm::ConstantAggregateZero
>(C
)) {
372 // Split into two zeros at the hinted offset.
373 CharUnits ElemSize
= getSize(C
);
374 assert(Hint
> Offset
&& Hint
< Offset
+ ElemSize
&& "nothing to split");
375 replace(Elems
, Index
, Index
+ 1,
376 {getZeroes(Hint
- Offset
), getZeroes(Offset
+ ElemSize
- Hint
)});
377 replace(Offsets
, Index
, Index
+ 1, {Offset
, Hint
});
381 if (isa
<llvm::UndefValue
>(C
)) {
382 // Drop undef; it doesn't contribute to the final layout.
383 replace(Elems
, Index
, Index
+ 1, {});
384 replace(Offsets
, Index
, Index
+ 1, {});
388 // FIXME: We could split a ConstantInt if the need ever arose.
389 // We don't need to do this to handle bit-fields because we always eagerly
390 // split them into 1-byte chunks.
395 static llvm::Constant
*
396 EmitArrayConstant(CodeGenModule
&CGM
, llvm::ArrayType
*DesiredType
,
397 llvm::Type
*CommonElementType
, uint64_t ArrayBound
,
398 SmallVectorImpl
<llvm::Constant
*> &Elements
,
399 llvm::Constant
*Filler
);
401 llvm::Constant
*ConstantAggregateBuilder::buildFrom(
402 CodeGenModule
&CGM
, ArrayRef
<llvm::Constant
*> Elems
,
403 ArrayRef
<CharUnits
> Offsets
, CharUnits StartOffset
, CharUnits Size
,
404 bool NaturalLayout
, llvm::Type
*DesiredTy
, bool AllowOversized
) {
405 ConstantAggregateBuilderUtils
Utils(CGM
);
408 return llvm::UndefValue::get(DesiredTy
);
410 auto Offset
= [&](size_t I
) { return Offsets
[I
] - StartOffset
; };
412 // If we want an array type, see if all the elements are the same type and
413 // appropriately spaced.
414 if (llvm::ArrayType
*ATy
= dyn_cast
<llvm::ArrayType
>(DesiredTy
)) {
415 assert(!AllowOversized
&& "oversized array emission not supported");
417 bool CanEmitArray
= true;
418 llvm::Type
*CommonType
= Elems
[0]->getType();
419 llvm::Constant
*Filler
= llvm::Constant::getNullValue(CommonType
);
420 CharUnits ElemSize
= Utils
.getSize(ATy
->getElementType());
421 SmallVector
<llvm::Constant
*, 32> ArrayElements
;
422 for (size_t I
= 0; I
!= Elems
.size(); ++I
) {
423 // Skip zeroes; we'll use a zero value as our array filler.
424 if (Elems
[I
]->isNullValue())
427 // All remaining elements must be the same type.
428 if (Elems
[I
]->getType() != CommonType
||
429 Offset(I
) % ElemSize
!= 0) {
430 CanEmitArray
= false;
433 ArrayElements
.resize(Offset(I
) / ElemSize
+ 1, Filler
);
434 ArrayElements
.back() = Elems
[I
];
438 return EmitArrayConstant(CGM
, ATy
, CommonType
, ATy
->getNumElements(),
439 ArrayElements
, Filler
);
442 // Can't emit as an array, carry on to emit as a struct.
445 // The size of the constant we plan to generate. This is usually just
446 // the size of the initialized type, but in AllowOversized mode (i.e.
447 // flexible array init), it can be larger.
448 CharUnits DesiredSize
= Utils
.getSize(DesiredTy
);
449 if (Size
> DesiredSize
) {
450 assert(AllowOversized
&& "Elems are oversized");
454 // The natural alignment of an unpacked LLVM struct with the given elements.
455 CharUnits Align
= CharUnits::One();
456 for (llvm::Constant
*C
: Elems
)
457 Align
= std::max(Align
, Utils
.getAlignment(C
));
459 // The natural size of an unpacked LLVM struct with the given elements.
460 CharUnits AlignedSize
= Size
.alignTo(Align
);
463 ArrayRef
<llvm::Constant
*> UnpackedElems
= Elems
;
464 llvm::SmallVector
<llvm::Constant
*, 32> UnpackedElemStorage
;
465 if (DesiredSize
< AlignedSize
|| DesiredSize
.alignTo(Align
) != DesiredSize
) {
466 // The natural layout would be too big; force use of a packed layout.
467 NaturalLayout
= false;
469 } else if (DesiredSize
> AlignedSize
) {
470 // The natural layout would be too small. Add padding to fix it. (This
471 // is ignored if we choose a packed layout.)
472 UnpackedElemStorage
.assign(Elems
.begin(), Elems
.end());
473 UnpackedElemStorage
.push_back(Utils
.getPadding(DesiredSize
- Size
));
474 UnpackedElems
= UnpackedElemStorage
;
477 // If we don't have a natural layout, insert padding as necessary.
478 // As we go, double-check to see if we can actually just emit Elems
479 // as a non-packed struct and do so opportunistically if possible.
480 llvm::SmallVector
<llvm::Constant
*, 32> PackedElems
;
481 if (!NaturalLayout
) {
482 CharUnits SizeSoFar
= CharUnits::Zero();
483 for (size_t I
= 0; I
!= Elems
.size(); ++I
) {
484 CharUnits Align
= Utils
.getAlignment(Elems
[I
]);
485 CharUnits NaturalOffset
= SizeSoFar
.alignTo(Align
);
486 CharUnits DesiredOffset
= Offset(I
);
487 assert(DesiredOffset
>= SizeSoFar
&& "elements out of order");
489 if (DesiredOffset
!= NaturalOffset
)
491 if (DesiredOffset
!= SizeSoFar
)
492 PackedElems
.push_back(Utils
.getPadding(DesiredOffset
- SizeSoFar
));
493 PackedElems
.push_back(Elems
[I
]);
494 SizeSoFar
= DesiredOffset
+ Utils
.getSize(Elems
[I
]);
496 // If we're using the packed layout, pad it out to the desired size if
499 assert(SizeSoFar
<= DesiredSize
&&
500 "requested size is too small for contents");
501 if (SizeSoFar
< DesiredSize
)
502 PackedElems
.push_back(Utils
.getPadding(DesiredSize
- SizeSoFar
));
506 llvm::StructType
*STy
= llvm::ConstantStruct::getTypeForElements(
507 CGM
.getLLVMContext(), Packed
? PackedElems
: UnpackedElems
, Packed
);
509 // Pick the type to use. If the type is layout identical to the desired
510 // type then use it, otherwise use whatever the builder produced for us.
511 if (llvm::StructType
*DesiredSTy
= dyn_cast
<llvm::StructType
>(DesiredTy
)) {
512 if (DesiredSTy
->isLayoutIdentical(STy
))
516 return llvm::ConstantStruct::get(STy
, Packed
? PackedElems
: UnpackedElems
);
519 void ConstantAggregateBuilder::condense(CharUnits Offset
,
520 llvm::Type
*DesiredTy
) {
521 CharUnits Size
= getSize(DesiredTy
);
523 std::optional
<size_t> FirstElemToReplace
= splitAt(Offset
);
524 if (!FirstElemToReplace
)
526 size_t First
= *FirstElemToReplace
;
528 std::optional
<size_t> LastElemToReplace
= splitAt(Offset
+ Size
);
529 if (!LastElemToReplace
)
531 size_t Last
= *LastElemToReplace
;
533 size_t Length
= Last
- First
;
537 if (Length
== 1 && Offsets
[First
] == Offset
&&
538 getSize(Elems
[First
]) == Size
) {
539 // Re-wrap single element structs if necessary. Otherwise, leave any single
540 // element constant of the right size alone even if it has the wrong type.
541 auto *STy
= dyn_cast
<llvm::StructType
>(DesiredTy
);
542 if (STy
&& STy
->getNumElements() == 1 &&
543 STy
->getElementType(0) == Elems
[First
]->getType())
544 Elems
[First
] = llvm::ConstantStruct::get(STy
, Elems
[First
]);
548 llvm::Constant
*Replacement
= buildFrom(
549 CGM
, ArrayRef(Elems
).slice(First
, Length
),
550 ArrayRef(Offsets
).slice(First
, Length
), Offset
, getSize(DesiredTy
),
551 /*known to have natural layout=*/false, DesiredTy
, false);
552 replace(Elems
, First
, Last
, {Replacement
});
553 replace(Offsets
, First
, Last
, {Offset
});
556 //===----------------------------------------------------------------------===//
557 // ConstStructBuilder
558 //===----------------------------------------------------------------------===//
560 class ConstStructBuilder
{
562 ConstantEmitter
&Emitter
;
563 ConstantAggregateBuilder
&Builder
;
564 CharUnits StartOffset
;
567 static llvm::Constant
*BuildStruct(ConstantEmitter
&Emitter
,
568 const InitListExpr
*ILE
,
570 static llvm::Constant
*BuildStruct(ConstantEmitter
&Emitter
,
571 const APValue
&Value
, QualType ValTy
);
572 static bool UpdateStruct(ConstantEmitter
&Emitter
,
573 ConstantAggregateBuilder
&Const
, CharUnits Offset
,
574 const InitListExpr
*Updater
);
577 ConstStructBuilder(ConstantEmitter
&Emitter
,
578 ConstantAggregateBuilder
&Builder
, CharUnits StartOffset
)
579 : CGM(Emitter
.CGM
), Emitter(Emitter
), Builder(Builder
),
580 StartOffset(StartOffset
) {}
582 bool AppendField(const FieldDecl
*Field
, uint64_t FieldOffset
,
583 llvm::Constant
*InitExpr
, bool AllowOverwrite
= false);
585 bool AppendBytes(CharUnits FieldOffsetInChars
, llvm::Constant
*InitCst
,
586 bool AllowOverwrite
= false);
588 bool AppendBitField(const FieldDecl
*Field
, uint64_t FieldOffset
,
589 llvm::Constant
*InitExpr
, bool AllowOverwrite
= false);
591 bool Build(const InitListExpr
*ILE
, bool AllowOverwrite
);
592 bool Build(const APValue
&Val
, const RecordDecl
*RD
, bool IsPrimaryBase
,
593 const CXXRecordDecl
*VTableClass
, CharUnits BaseOffset
);
594 llvm::Constant
*Finalize(QualType Ty
);
597 bool ConstStructBuilder::AppendField(
598 const FieldDecl
*Field
, uint64_t FieldOffset
, llvm::Constant
*InitCst
,
599 bool AllowOverwrite
) {
600 const ASTContext
&Context
= CGM
.getContext();
602 CharUnits FieldOffsetInChars
= Context
.toCharUnitsFromBits(FieldOffset
);
604 return AppendBytes(FieldOffsetInChars
, InitCst
, AllowOverwrite
);
607 bool ConstStructBuilder::AppendBytes(CharUnits FieldOffsetInChars
,
608 llvm::Constant
*InitCst
,
609 bool AllowOverwrite
) {
610 return Builder
.add(InitCst
, StartOffset
+ FieldOffsetInChars
, AllowOverwrite
);
613 bool ConstStructBuilder::AppendBitField(const FieldDecl
*Field
,
614 uint64_t FieldOffset
, llvm::Constant
*C
,
615 bool AllowOverwrite
) {
617 llvm::ConstantInt
*CI
= dyn_cast
<llvm::ConstantInt
>(C
);
619 // Constants for long _BitInt types are sometimes split into individual
620 // bytes. Try to fold these back into an integer constant. If that doesn't
621 // work out, then we are trying to initialize a bitfield with a non-trivial
622 // constant, this must require run-time code.
623 llvm::Type
*LoadType
=
624 CGM
.getTypes().convertTypeForLoadStore(Field
->getType(), C
->getType());
625 llvm::Constant
*FoldedConstant
= llvm::ConstantFoldLoadFromConst(
626 C
, LoadType
, llvm::APInt::getZero(32), CGM
.getDataLayout());
627 CI
= dyn_cast_if_present
<llvm::ConstantInt
>(FoldedConstant
);
632 const CGRecordLayout
&RL
=
633 CGM
.getTypes().getCGRecordLayout(Field
->getParent());
634 const CGBitFieldInfo
&Info
= RL
.getBitFieldInfo(Field
);
635 llvm::APInt FieldValue
= CI
->getValue();
637 // Promote the size of FieldValue if necessary
638 // FIXME: This should never occur, but currently it can because initializer
639 // constants are cast to bool, and because clang is not enforcing bitfield
641 if (Info
.Size
> FieldValue
.getBitWidth())
642 FieldValue
= FieldValue
.zext(Info
.Size
);
644 // Truncate the size of FieldValue to the bit field size.
645 if (Info
.Size
< FieldValue
.getBitWidth())
646 FieldValue
= FieldValue
.trunc(Info
.Size
);
648 return Builder
.addBits(FieldValue
,
649 CGM
.getContext().toBits(StartOffset
) + FieldOffset
,
653 static bool EmitDesignatedInitUpdater(ConstantEmitter
&Emitter
,
654 ConstantAggregateBuilder
&Const
,
655 CharUnits Offset
, QualType Type
,
656 const InitListExpr
*Updater
) {
657 if (Type
->isRecordType())
658 return ConstStructBuilder::UpdateStruct(Emitter
, Const
, Offset
, Updater
);
660 auto CAT
= Emitter
.CGM
.getContext().getAsConstantArrayType(Type
);
663 QualType ElemType
= CAT
->getElementType();
664 CharUnits ElemSize
= Emitter
.CGM
.getContext().getTypeSizeInChars(ElemType
);
665 llvm::Type
*ElemTy
= Emitter
.CGM
.getTypes().ConvertTypeForMem(ElemType
);
667 llvm::Constant
*FillC
= nullptr;
668 if (const Expr
*Filler
= Updater
->getArrayFiller()) {
669 if (!isa
<NoInitExpr
>(Filler
)) {
670 FillC
= Emitter
.tryEmitAbstractForMemory(Filler
, ElemType
);
676 unsigned NumElementsToUpdate
=
677 FillC
? CAT
->getZExtSize() : Updater
->getNumInits();
678 for (unsigned I
= 0; I
!= NumElementsToUpdate
; ++I
, Offset
+= ElemSize
) {
679 const Expr
*Init
= nullptr;
680 if (I
< Updater
->getNumInits())
681 Init
= Updater
->getInit(I
);
683 if (!Init
&& FillC
) {
684 if (!Const
.add(FillC
, Offset
, true))
686 } else if (!Init
|| isa
<NoInitExpr
>(Init
)) {
688 } else if (const auto *ChildILE
= dyn_cast
<InitListExpr
>(Init
)) {
689 if (!EmitDesignatedInitUpdater(Emitter
, Const
, Offset
, ElemType
,
692 // Attempt to reduce the array element to a single constant if necessary.
693 Const
.condense(Offset
, ElemTy
);
695 llvm::Constant
*Val
= Emitter
.tryEmitPrivateForMemory(Init
, ElemType
);
696 if (!Const
.add(Val
, Offset
, true))
704 bool ConstStructBuilder::Build(const InitListExpr
*ILE
, bool AllowOverwrite
) {
705 RecordDecl
*RD
= ILE
->getType()->castAs
<RecordType
>()->getDecl();
706 const ASTRecordLayout
&Layout
= CGM
.getContext().getASTRecordLayout(RD
);
708 unsigned FieldNo
= -1;
709 unsigned ElementNo
= 0;
711 // Bail out if we have base classes. We could support these, but they only
712 // arise in C++1z where we will have already constant folded most interesting
713 // cases. FIXME: There are still a few more cases we can handle this way.
714 if (auto *CXXRD
= dyn_cast
<CXXRecordDecl
>(RD
))
715 if (CXXRD
->getNumBases())
718 for (FieldDecl
*Field
: RD
->fields()) {
721 // If this is a union, skip all the fields that aren't being initialized.
723 !declaresSameEntity(ILE
->getInitializedFieldInUnion(), Field
))
726 // Don't emit anonymous bitfields.
727 if (Field
->isUnnamedBitField())
730 // Get the initializer. A struct can include fields without initializers,
731 // we just use explicit null values for them.
732 const Expr
*Init
= nullptr;
733 if (ElementNo
< ILE
->getNumInits())
734 Init
= ILE
->getInit(ElementNo
++);
735 if (isa_and_nonnull
<NoInitExpr
>(Init
))
738 // Zero-sized fields are not emitted, but their initializers may still
739 // prevent emission of this struct as a constant.
740 if (isEmptyFieldForLayout(CGM
.getContext(), Field
)) {
741 if (Init
->HasSideEffects(CGM
.getContext()))
746 // When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
747 // represents additional overwriting of our current constant value, and not
748 // a new constant to emit independently.
749 if (AllowOverwrite
&&
750 (Field
->getType()->isArrayType() || Field
->getType()->isRecordType())) {
751 if (auto *SubILE
= dyn_cast
<InitListExpr
>(Init
)) {
752 CharUnits Offset
= CGM
.getContext().toCharUnitsFromBits(
753 Layout
.getFieldOffset(FieldNo
));
754 if (!EmitDesignatedInitUpdater(Emitter
, Builder
, StartOffset
+ Offset
,
755 Field
->getType(), SubILE
))
757 // If we split apart the field's value, try to collapse it down to a
759 Builder
.condense(StartOffset
+ Offset
,
760 CGM
.getTypes().ConvertTypeForMem(Field
->getType()));
765 llvm::Constant
*EltInit
=
766 Init
? Emitter
.tryEmitPrivateForMemory(Init
, Field
->getType())
767 : Emitter
.emitNullForMemory(Field
->getType());
771 if (!Field
->isBitField()) {
772 // Handle non-bitfield members.
773 if (!AppendField(Field
, Layout
.getFieldOffset(FieldNo
), EltInit
,
776 // After emitting a non-empty field with [[no_unique_address]], we may
777 // need to overwrite its tail padding.
778 if (Field
->hasAttr
<NoUniqueAddressAttr
>())
779 AllowOverwrite
= true;
781 // Otherwise we have a bitfield.
782 if (!AppendBitField(Field
, Layout
.getFieldOffset(FieldNo
), EltInit
,
793 BaseInfo(const CXXRecordDecl
*Decl
, CharUnits Offset
, unsigned Index
)
794 : Decl(Decl
), Offset(Offset
), Index(Index
) {
797 const CXXRecordDecl
*Decl
;
801 bool operator<(const BaseInfo
&O
) const { return Offset
< O
.Offset
; }
805 bool ConstStructBuilder::Build(const APValue
&Val
, const RecordDecl
*RD
,
807 const CXXRecordDecl
*VTableClass
,
809 const ASTRecordLayout
&Layout
= CGM
.getContext().getASTRecordLayout(RD
);
811 if (const CXXRecordDecl
*CD
= dyn_cast
<CXXRecordDecl
>(RD
)) {
812 // Add a vtable pointer, if we need one and it hasn't already been added.
813 if (Layout
.hasOwnVFPtr()) {
814 llvm::Constant
*VTableAddressPoint
=
815 CGM
.getCXXABI().getVTableAddressPoint(BaseSubobject(CD
, Offset
),
817 if (auto Authentication
= CGM
.getVTablePointerAuthentication(CD
)) {
818 VTableAddressPoint
= Emitter
.tryEmitConstantSignedPointer(
819 VTableAddressPoint
, *Authentication
);
820 if (!VTableAddressPoint
)
823 if (!AppendBytes(Offset
, VTableAddressPoint
))
827 // Accumulate and sort bases, in order to visit them in address order, which
828 // may not be the same as declaration order.
829 SmallVector
<BaseInfo
, 8> Bases
;
830 Bases
.reserve(CD
->getNumBases());
832 for (CXXRecordDecl::base_class_const_iterator Base
= CD
->bases_begin(),
833 BaseEnd
= CD
->bases_end(); Base
!= BaseEnd
; ++Base
, ++BaseNo
) {
834 assert(!Base
->isVirtual() && "should not have virtual bases here");
835 const CXXRecordDecl
*BD
= Base
->getType()->getAsCXXRecordDecl();
836 CharUnits BaseOffset
= Layout
.getBaseClassOffset(BD
);
837 Bases
.push_back(BaseInfo(BD
, BaseOffset
, BaseNo
));
839 llvm::stable_sort(Bases
);
841 for (unsigned I
= 0, N
= Bases
.size(); I
!= N
; ++I
) {
842 BaseInfo
&Base
= Bases
[I
];
844 bool IsPrimaryBase
= Layout
.getPrimaryBase() == Base
.Decl
;
845 Build(Val
.getStructBase(Base
.Index
), Base
.Decl
, IsPrimaryBase
,
846 VTableClass
, Offset
+ Base
.Offset
);
850 unsigned FieldNo
= 0;
851 uint64_t OffsetBits
= CGM
.getContext().toBits(Offset
);
853 bool AllowOverwrite
= false;
854 for (RecordDecl::field_iterator Field
= RD
->field_begin(),
855 FieldEnd
= RD
->field_end(); Field
!= FieldEnd
; ++Field
, ++FieldNo
) {
856 // If this is a union, skip all the fields that aren't being initialized.
857 if (RD
->isUnion() && !declaresSameEntity(Val
.getUnionField(), *Field
))
860 // Don't emit anonymous bitfields or zero-sized fields.
861 if (Field
->isUnnamedBitField() ||
862 isEmptyFieldForLayout(CGM
.getContext(), *Field
))
865 // Emit the value of the initializer.
866 const APValue
&FieldValue
=
867 RD
->isUnion() ? Val
.getUnionValue() : Val
.getStructField(FieldNo
);
868 llvm::Constant
*EltInit
=
869 Emitter
.tryEmitPrivateForMemory(FieldValue
, Field
->getType());
873 if (!Field
->isBitField()) {
874 // Handle non-bitfield members.
875 if (!AppendField(*Field
, Layout
.getFieldOffset(FieldNo
) + OffsetBits
,
876 EltInit
, AllowOverwrite
))
878 // After emitting a non-empty field with [[no_unique_address]], we may
879 // need to overwrite its tail padding.
880 if (Field
->hasAttr
<NoUniqueAddressAttr
>())
881 AllowOverwrite
= true;
883 // Otherwise we have a bitfield.
884 if (!AppendBitField(*Field
, Layout
.getFieldOffset(FieldNo
) + OffsetBits
,
885 EltInit
, AllowOverwrite
))
893 llvm::Constant
*ConstStructBuilder::Finalize(QualType Type
) {
894 Type
= Type
.getNonReferenceType();
895 RecordDecl
*RD
= Type
->castAs
<RecordType
>()->getDecl();
896 llvm::Type
*ValTy
= CGM
.getTypes().ConvertType(Type
);
897 return Builder
.build(ValTy
, RD
->hasFlexibleArrayMember());
900 llvm::Constant
*ConstStructBuilder::BuildStruct(ConstantEmitter
&Emitter
,
901 const InitListExpr
*ILE
,
903 ConstantAggregateBuilder
Const(Emitter
.CGM
);
904 ConstStructBuilder
Builder(Emitter
, Const
, CharUnits::Zero());
906 if (!Builder
.Build(ILE
, /*AllowOverwrite*/false))
909 return Builder
.Finalize(ValTy
);
912 llvm::Constant
*ConstStructBuilder::BuildStruct(ConstantEmitter
&Emitter
,
915 ConstantAggregateBuilder
Const(Emitter
.CGM
);
916 ConstStructBuilder
Builder(Emitter
, Const
, CharUnits::Zero());
918 const RecordDecl
*RD
= ValTy
->castAs
<RecordType
>()->getDecl();
919 const CXXRecordDecl
*CD
= dyn_cast
<CXXRecordDecl
>(RD
);
920 if (!Builder
.Build(Val
, RD
, false, CD
, CharUnits::Zero()))
923 return Builder
.Finalize(ValTy
);
926 bool ConstStructBuilder::UpdateStruct(ConstantEmitter
&Emitter
,
927 ConstantAggregateBuilder
&Const
,
929 const InitListExpr
*Updater
) {
930 return ConstStructBuilder(Emitter
, Const
, Offset
)
931 .Build(Updater
, /*AllowOverwrite*/ true);
934 //===----------------------------------------------------------------------===//
936 //===----------------------------------------------------------------------===//
938 static ConstantAddress
939 tryEmitGlobalCompoundLiteral(ConstantEmitter
&emitter
,
940 const CompoundLiteralExpr
*E
) {
941 CodeGenModule
&CGM
= emitter
.CGM
;
942 CharUnits Align
= CGM
.getContext().getTypeAlignInChars(E
->getType());
943 if (llvm::GlobalVariable
*Addr
=
944 CGM
.getAddrOfConstantCompoundLiteralIfEmitted(E
))
945 return ConstantAddress(Addr
, Addr
->getValueType(), Align
);
947 LangAS addressSpace
= E
->getType().getAddressSpace();
948 llvm::Constant
*C
= emitter
.tryEmitForInitializer(E
->getInitializer(),
949 addressSpace
, E
->getType());
951 assert(!E
->isFileScope() &&
952 "file-scope compound literal did not have constant initializer!");
953 return ConstantAddress::invalid();
956 auto GV
= new llvm::GlobalVariable(
957 CGM
.getModule(), C
->getType(),
958 E
->getType().isConstantStorage(CGM
.getContext(), true, false),
959 llvm::GlobalValue::InternalLinkage
, C
, ".compoundliteral", nullptr,
960 llvm::GlobalVariable::NotThreadLocal
,
961 CGM
.getContext().getTargetAddressSpace(addressSpace
));
962 emitter
.finalize(GV
);
963 GV
->setAlignment(Align
.getAsAlign());
964 CGM
.setAddrOfConstantCompoundLiteral(E
, GV
);
965 return ConstantAddress(GV
, GV
->getValueType(), Align
);
968 static llvm::Constant
*
969 EmitArrayConstant(CodeGenModule
&CGM
, llvm::ArrayType
*DesiredType
,
970 llvm::Type
*CommonElementType
, uint64_t ArrayBound
,
971 SmallVectorImpl
<llvm::Constant
*> &Elements
,
972 llvm::Constant
*Filler
) {
973 // Figure out how long the initial prefix of non-zero elements is.
974 uint64_t NonzeroLength
= ArrayBound
;
975 if (Elements
.size() < NonzeroLength
&& Filler
->isNullValue())
976 NonzeroLength
= Elements
.size();
977 if (NonzeroLength
== Elements
.size()) {
978 while (NonzeroLength
> 0 && Elements
[NonzeroLength
- 1]->isNullValue())
982 if (NonzeroLength
== 0)
983 return llvm::ConstantAggregateZero::get(DesiredType
);
985 // Add a zeroinitializer array filler if we have lots of trailing zeroes.
986 uint64_t TrailingZeroes
= ArrayBound
- NonzeroLength
;
987 if (TrailingZeroes
>= 8) {
988 assert(Elements
.size() >= NonzeroLength
&&
989 "missing initializer for non-zero element");
991 // If all the elements had the same type up to the trailing zeroes, emit a
992 // struct of two arrays (the nonzero data and the zeroinitializer).
993 if (CommonElementType
&& NonzeroLength
>= 8) {
994 llvm::Constant
*Initial
= llvm::ConstantArray::get(
995 llvm::ArrayType::get(CommonElementType
, NonzeroLength
),
996 ArrayRef(Elements
).take_front(NonzeroLength
));
998 Elements
[0] = Initial
;
1000 Elements
.resize(NonzeroLength
+ 1);
1004 CommonElementType
? CommonElementType
: DesiredType
->getElementType();
1005 FillerType
= llvm::ArrayType::get(FillerType
, TrailingZeroes
);
1006 Elements
.back() = llvm::ConstantAggregateZero::get(FillerType
);
1007 CommonElementType
= nullptr;
1008 } else if (Elements
.size() != ArrayBound
) {
1009 // Otherwise pad to the right size with the filler if necessary.
1010 Elements
.resize(ArrayBound
, Filler
);
1011 if (Filler
->getType() != CommonElementType
)
1012 CommonElementType
= nullptr;
1015 // If all elements have the same type, just emit an array constant.
1016 if (CommonElementType
)
1017 return llvm::ConstantArray::get(
1018 llvm::ArrayType::get(CommonElementType
, ArrayBound
), Elements
);
1020 // We have mixed types. Use a packed struct.
1021 llvm::SmallVector
<llvm::Type
*, 16> Types
;
1022 Types
.reserve(Elements
.size());
1023 for (llvm::Constant
*Elt
: Elements
)
1024 Types
.push_back(Elt
->getType());
1025 llvm::StructType
*SType
=
1026 llvm::StructType::get(CGM
.getLLVMContext(), Types
, true);
1027 return llvm::ConstantStruct::get(SType
, Elements
);
1030 // This class only needs to handle arrays, structs and unions. Outside C++11
1031 // mode, we don't currently constant fold those types. All other types are
1032 // handled by constant folding.
1034 // Constant folding is currently missing support for a few features supported
1035 // here: CK_ToUnion, CK_ReinterpretMemberPointer, and DesignatedInitUpdateExpr.
1036 class ConstExprEmitter
1037 : public ConstStmtVisitor
<ConstExprEmitter
, llvm::Constant
*, QualType
> {
1039 ConstantEmitter
&Emitter
;
1040 llvm::LLVMContext
&VMContext
;
1042 ConstExprEmitter(ConstantEmitter
&emitter
)
1043 : CGM(emitter
.CGM
), Emitter(emitter
), VMContext(CGM
.getLLVMContext()) {
1046 //===--------------------------------------------------------------------===//
1048 //===--------------------------------------------------------------------===//
1050 llvm::Constant
*VisitStmt(const Stmt
*S
, QualType T
) { return nullptr; }
1052 llvm::Constant
*VisitConstantExpr(const ConstantExpr
*CE
, QualType T
) {
1053 if (llvm::Constant
*Result
= Emitter
.tryEmitConstantExpr(CE
))
1055 return Visit(CE
->getSubExpr(), T
);
1058 llvm::Constant
*VisitParenExpr(const ParenExpr
*PE
, QualType T
) {
1059 return Visit(PE
->getSubExpr(), T
);
1063 VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr
*PE
,
1065 return Visit(PE
->getReplacement(), T
);
1068 llvm::Constant
*VisitGenericSelectionExpr(const GenericSelectionExpr
*GE
,
1070 return Visit(GE
->getResultExpr(), T
);
1073 llvm::Constant
*VisitChooseExpr(const ChooseExpr
*CE
, QualType T
) {
1074 return Visit(CE
->getChosenSubExpr(), T
);
1077 llvm::Constant
*VisitCompoundLiteralExpr(const CompoundLiteralExpr
*E
,
1079 return Visit(E
->getInitializer(), T
);
1082 llvm::Constant
*ProduceIntToIntCast(const Expr
*E
, QualType DestType
) {
1083 QualType FromType
= E
->getType();
1084 // See also HandleIntToIntCast in ExprConstant.cpp
1085 if (FromType
->isIntegerType())
1086 if (llvm::Constant
*C
= Visit(E
, FromType
))
1087 if (auto *CI
= dyn_cast
<llvm::ConstantInt
>(C
)) {
1088 unsigned SrcWidth
= CGM
.getContext().getIntWidth(FromType
);
1089 unsigned DstWidth
= CGM
.getContext().getIntWidth(DestType
);
1090 if (DstWidth
== SrcWidth
)
1092 llvm::APInt A
= FromType
->isSignedIntegerType()
1093 ? CI
->getValue().sextOrTrunc(DstWidth
)
1094 : CI
->getValue().zextOrTrunc(DstWidth
);
1095 return llvm::ConstantInt::get(CGM
.getLLVMContext(), A
);
1100 llvm::Constant
*VisitCastExpr(const CastExpr
*E
, QualType destType
) {
1101 if (const auto *ECE
= dyn_cast
<ExplicitCastExpr
>(E
))
1102 CGM
.EmitExplicitCastExprType(ECE
, Emitter
.CGF
);
1103 const Expr
*subExpr
= E
->getSubExpr();
1105 switch (E
->getCastKind()) {
1107 // GCC cast to union extension
1108 assert(E
->getType()->isUnionType() &&
1109 "Destination type is not union type!");
1111 auto field
= E
->getTargetUnionField();
1113 auto C
= Emitter
.tryEmitPrivateForMemory(subExpr
, field
->getType());
1114 if (!C
) return nullptr;
1116 auto destTy
= ConvertType(destType
);
1117 if (C
->getType() == destTy
) return C
;
1119 // Build a struct with the union sub-element as the first member,
1120 // and padded to the appropriate size.
1121 SmallVector
<llvm::Constant
*, 2> Elts
;
1122 SmallVector
<llvm::Type
*, 2> Types
;
1124 Types
.push_back(C
->getType());
1125 unsigned CurSize
= CGM
.getDataLayout().getTypeAllocSize(C
->getType());
1126 unsigned TotalSize
= CGM
.getDataLayout().getTypeAllocSize(destTy
);
1128 assert(CurSize
<= TotalSize
&& "Union size mismatch!");
1129 if (unsigned NumPadBytes
= TotalSize
- CurSize
) {
1130 llvm::Type
*Ty
= CGM
.CharTy
;
1131 if (NumPadBytes
> 1)
1132 Ty
= llvm::ArrayType::get(Ty
, NumPadBytes
);
1134 Elts
.push_back(llvm::UndefValue::get(Ty
));
1135 Types
.push_back(Ty
);
1138 llvm::StructType
*STy
= llvm::StructType::get(VMContext
, Types
, false);
1139 return llvm::ConstantStruct::get(STy
, Elts
);
1142 case CK_AddressSpaceConversion
: {
1143 auto C
= Emitter
.tryEmitPrivate(subExpr
, subExpr
->getType());
1144 if (!C
) return nullptr;
1145 LangAS destAS
= E
->getType()->getPointeeType().getAddressSpace();
1146 LangAS srcAS
= subExpr
->getType()->getPointeeType().getAddressSpace();
1147 llvm::Type
*destTy
= ConvertType(E
->getType());
1148 return CGM
.getTargetCodeGenInfo().performAddrSpaceCast(CGM
, C
, srcAS
,
1152 case CK_LValueToRValue
: {
1153 // We don't really support doing lvalue-to-rvalue conversions here; any
1154 // interesting conversions should be done in Evaluate(). But as a
1155 // special case, allow compound literals to support the gcc extension
1156 // allowing "struct x {int x;} x = (struct x) {};".
1158 dyn_cast
<CompoundLiteralExpr
>(subExpr
->IgnoreParens()))
1159 return Visit(E
->getInitializer(), destType
);
1163 case CK_AtomicToNonAtomic
:
1164 case CK_NonAtomicToAtomic
:
1166 case CK_ConstructorConversion
:
1167 return Visit(subExpr
, destType
);
1169 case CK_ArrayToPointerDecay
:
1170 if (const auto *S
= dyn_cast
<StringLiteral
>(subExpr
))
1171 return CGM
.GetAddrOfConstantStringFromLiteral(S
).getPointer();
1173 case CK_NullToPointer
:
1174 if (Visit(subExpr
, destType
))
1175 return CGM
.EmitNullConstant(destType
);
1178 case CK_IntToOCLSampler
:
1179 llvm_unreachable("global sampler variables are not generated");
1181 case CK_IntegralCast
:
1182 return ProduceIntToIntCast(subExpr
, destType
);
1184 case CK_Dependent
: llvm_unreachable("saw dependent cast!");
1186 case CK_BuiltinFnToFnPtr
:
1187 llvm_unreachable("builtin functions are handled elsewhere");
1189 case CK_ReinterpretMemberPointer
:
1190 case CK_DerivedToBaseMemberPointer
:
1191 case CK_BaseToDerivedMemberPointer
: {
1192 auto C
= Emitter
.tryEmitPrivate(subExpr
, subExpr
->getType());
1193 if (!C
) return nullptr;
1194 return CGM
.getCXXABI().EmitMemberPointerConversion(E
, C
);
1197 // These will never be supported.
1198 case CK_ObjCObjectLValueCast
:
1199 case CK_ARCProduceObject
:
1200 case CK_ARCConsumeObject
:
1201 case CK_ARCReclaimReturnedObject
:
1202 case CK_ARCExtendBlockObject
:
1203 case CK_CopyAndAutoreleaseBlockObject
:
1206 // These don't need to be handled here because Evaluate knows how to
1207 // evaluate them in the cases where they can be folded.
1211 case CK_LValueBitCast
:
1212 case CK_LValueToRValueBitCast
:
1213 case CK_NullToMemberPointer
:
1214 case CK_UserDefinedConversion
:
1215 case CK_CPointerToObjCPointerCast
:
1216 case CK_BlockPointerToObjCPointerCast
:
1217 case CK_AnyPointerToBlockPointerCast
:
1218 case CK_FunctionToPointerDecay
:
1219 case CK_BaseToDerived
:
1220 case CK_DerivedToBase
:
1221 case CK_UncheckedDerivedToBase
:
1222 case CK_MemberPointerToBoolean
:
1223 case CK_VectorSplat
:
1224 case CK_FloatingRealToComplex
:
1225 case CK_FloatingComplexToReal
:
1226 case CK_FloatingComplexToBoolean
:
1227 case CK_FloatingComplexCast
:
1228 case CK_FloatingComplexToIntegralComplex
:
1229 case CK_IntegralRealToComplex
:
1230 case CK_IntegralComplexToReal
:
1231 case CK_IntegralComplexToBoolean
:
1232 case CK_IntegralComplexCast
:
1233 case CK_IntegralComplexToFloatingComplex
:
1234 case CK_PointerToIntegral
:
1235 case CK_PointerToBoolean
:
1236 case CK_BooleanToSignedIntegral
:
1237 case CK_IntegralToPointer
:
1238 case CK_IntegralToBoolean
:
1239 case CK_IntegralToFloating
:
1240 case CK_FloatingToIntegral
:
1241 case CK_FloatingToBoolean
:
1242 case CK_FloatingCast
:
1243 case CK_FloatingToFixedPoint
:
1244 case CK_FixedPointToFloating
:
1245 case CK_FixedPointCast
:
1246 case CK_FixedPointToBoolean
:
1247 case CK_FixedPointToIntegral
:
1248 case CK_IntegralToFixedPoint
:
1249 case CK_ZeroToOCLOpaqueType
:
1251 case CK_HLSLVectorTruncation
:
1252 case CK_HLSLArrayRValue
:
1255 llvm_unreachable("Invalid CastKind");
1258 llvm::Constant
*VisitCXXDefaultInitExpr(const CXXDefaultInitExpr
*DIE
,
1260 // No need for a DefaultInitExprScope: we don't handle 'this' in a
1261 // constant expression.
1262 return Visit(DIE
->getExpr(), T
);
1265 llvm::Constant
*VisitExprWithCleanups(const ExprWithCleanups
*E
, QualType T
) {
1266 return Visit(E
->getSubExpr(), T
);
1269 llvm::Constant
*VisitIntegerLiteral(const IntegerLiteral
*I
, QualType T
) {
1270 return llvm::ConstantInt::get(CGM
.getLLVMContext(), I
->getValue());
1273 static APValue
withDestType(ASTContext
&Ctx
, const Expr
*E
, QualType SrcType
,
1274 QualType DestType
, const llvm::APSInt
&Value
) {
1275 if (!Ctx
.hasSameType(SrcType
, DestType
)) {
1276 if (DestType
->isFloatingType()) {
1277 llvm::APFloat Result
=
1278 llvm::APFloat(Ctx
.getFloatTypeSemantics(DestType
), 1);
1279 llvm::RoundingMode RM
=
1280 E
->getFPFeaturesInEffect(Ctx
.getLangOpts()).getRoundingMode();
1281 if (RM
== llvm::RoundingMode::Dynamic
)
1282 RM
= llvm::RoundingMode::NearestTiesToEven
;
1283 Result
.convertFromAPInt(Value
, Value
.isSigned(), RM
);
1284 return APValue(Result
);
1287 return APValue(Value
);
1290 llvm::Constant
*EmitArrayInitialization(const InitListExpr
*ILE
, QualType T
) {
1291 auto *CAT
= CGM
.getContext().getAsConstantArrayType(ILE
->getType());
1292 assert(CAT
&& "can't emit array init for non-constant-bound array");
1293 uint64_t NumInitElements
= ILE
->getNumInits();
1294 const uint64_t NumElements
= CAT
->getZExtSize();
1295 for (const auto *Init
: ILE
->inits()) {
1296 if (const auto *Embed
=
1297 dyn_cast
<EmbedExpr
>(Init
->IgnoreParenImpCasts())) {
1298 NumInitElements
+= Embed
->getDataElementCount() - 1;
1299 if (NumInitElements
> NumElements
) {
1300 NumInitElements
= NumElements
;
1306 // Initialising an array requires us to automatically
1307 // initialise any elements that have not been initialised explicitly
1308 uint64_t NumInitableElts
= std::min
<uint64_t>(NumInitElements
, NumElements
);
1310 QualType EltType
= CAT
->getElementType();
1312 // Initialize remaining array elements.
1313 llvm::Constant
*fillC
= nullptr;
1314 if (const Expr
*filler
= ILE
->getArrayFiller()) {
1315 fillC
= Emitter
.tryEmitAbstractForMemory(filler
, EltType
);
1320 // Copy initializer elements.
1321 SmallVector
<llvm::Constant
*, 16> Elts
;
1322 if (fillC
&& fillC
->isNullValue())
1323 Elts
.reserve(NumInitableElts
+ 1);
1325 Elts
.reserve(NumElements
);
1327 llvm::Type
*CommonElementType
= nullptr;
1328 auto Emit
= [&](const Expr
*Init
, unsigned ArrayIndex
) {
1329 llvm::Constant
*C
= nullptr;
1330 C
= Emitter
.tryEmitPrivateForMemory(Init
, EltType
);
1333 if (ArrayIndex
== 0)
1334 CommonElementType
= C
->getType();
1335 else if (C
->getType() != CommonElementType
)
1336 CommonElementType
= nullptr;
1341 unsigned ArrayIndex
= 0;
1342 QualType DestTy
= CAT
->getElementType();
1343 for (unsigned i
= 0; i
< ILE
->getNumInits(); ++i
) {
1344 const Expr
*Init
= ILE
->getInit(i
);
1345 if (auto *EmbedS
= dyn_cast
<EmbedExpr
>(Init
->IgnoreParenImpCasts())) {
1346 StringLiteral
*SL
= EmbedS
->getDataStringLiteral();
1347 llvm::APSInt
Value(CGM
.getContext().getTypeSize(DestTy
),
1348 DestTy
->isUnsignedIntegerType());
1350 for (unsigned I
= EmbedS
->getStartingElementPos(),
1351 N
= EmbedS
->getDataElementCount();
1352 I
!= EmbedS
->getStartingElementPos() + N
; ++I
) {
1353 Value
= SL
->getCodeUnit(I
);
1354 if (DestTy
->isIntegerType()) {
1355 C
= llvm::ConstantInt::get(CGM
.getLLVMContext(), Value
);
1357 C
= Emitter
.tryEmitPrivateForMemory(
1358 withDestType(CGM
.getContext(), Init
, EmbedS
->getType(), DestTy
,
1367 if ((ArrayIndex
- EmbedS
->getDataElementCount()) == 0)
1368 CommonElementType
= C
->getType();
1369 else if (C
->getType() != CommonElementType
)
1370 CommonElementType
= nullptr;
1372 if (!Emit(Init
, ArrayIndex
))
1378 llvm::ArrayType
*Desired
=
1379 cast
<llvm::ArrayType
>(CGM
.getTypes().ConvertType(ILE
->getType()));
1380 return EmitArrayConstant(CGM
, Desired
, CommonElementType
, NumElements
, Elts
,
1384 llvm::Constant
*EmitRecordInitialization(const InitListExpr
*ILE
,
1386 return ConstStructBuilder::BuildStruct(Emitter
, ILE
, T
);
1389 llvm::Constant
*VisitImplicitValueInitExpr(const ImplicitValueInitExpr
*E
,
1391 return CGM
.EmitNullConstant(T
);
1394 llvm::Constant
*VisitInitListExpr(const InitListExpr
*ILE
, QualType T
) {
1395 if (ILE
->isTransparent())
1396 return Visit(ILE
->getInit(0), T
);
1398 if (ILE
->getType()->isArrayType())
1399 return EmitArrayInitialization(ILE
, T
);
1401 if (ILE
->getType()->isRecordType())
1402 return EmitRecordInitialization(ILE
, T
);
1408 VisitDesignatedInitUpdateExpr(const DesignatedInitUpdateExpr
*E
,
1409 QualType destType
) {
1410 auto C
= Visit(E
->getBase(), destType
);
1414 ConstantAggregateBuilder
Const(CGM
);
1415 Const
.add(C
, CharUnits::Zero(), false);
1417 if (!EmitDesignatedInitUpdater(Emitter
, Const
, CharUnits::Zero(), destType
,
1421 llvm::Type
*ValTy
= CGM
.getTypes().ConvertType(destType
);
1422 bool HasFlexibleArray
= false;
1423 if (const auto *RT
= destType
->getAs
<RecordType
>())
1424 HasFlexibleArray
= RT
->getDecl()->hasFlexibleArrayMember();
1425 return Const
.build(ValTy
, HasFlexibleArray
);
1428 llvm::Constant
*VisitCXXConstructExpr(const CXXConstructExpr
*E
,
1430 if (!E
->getConstructor()->isTrivial())
1433 // Only default and copy/move constructors can be trivial.
1434 if (E
->getNumArgs()) {
1435 assert(E
->getNumArgs() == 1 && "trivial ctor with > 1 argument");
1436 assert(E
->getConstructor()->isCopyOrMoveConstructor() &&
1437 "trivial ctor has argument but isn't a copy/move ctor");
1439 const Expr
*Arg
= E
->getArg(0);
1440 assert(CGM
.getContext().hasSameUnqualifiedType(Ty
, Arg
->getType()) &&
1441 "argument to copy ctor is of wrong type");
1443 // Look through the temporary; it's just converting the value to an
1444 // lvalue to pass it to the constructor.
1445 if (const auto *MTE
= dyn_cast
<MaterializeTemporaryExpr
>(Arg
))
1446 return Visit(MTE
->getSubExpr(), Ty
);
1447 // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
1451 return CGM
.EmitNullConstant(Ty
);
1454 llvm::Constant
*VisitStringLiteral(const StringLiteral
*E
, QualType T
) {
1455 // This is a string literal initializing an array in an initializer.
1456 return CGM
.GetConstantArrayFromStringLiteral(E
);
1459 llvm::Constant
*VisitObjCEncodeExpr(const ObjCEncodeExpr
*E
, QualType T
) {
1460 // This must be an @encode initializing an array in a static initializer.
1461 // Don't emit it as the address of the string, emit the string data itself
1462 // as an inline array.
1464 CGM
.getContext().getObjCEncodingForType(E
->getEncodedType(), Str
);
1465 const ConstantArrayType
*CAT
= CGM
.getContext().getAsConstantArrayType(T
);
1466 assert(CAT
&& "String data not of constant array type!");
1468 // Resize the string to the right size, adding zeros at the end, or
1469 // truncating as needed.
1470 Str
.resize(CAT
->getZExtSize(), '\0');
1471 return llvm::ConstantDataArray::getString(VMContext
, Str
, false);
1474 llvm::Constant
*VisitUnaryExtension(const UnaryOperator
*E
, QualType T
) {
1475 return Visit(E
->getSubExpr(), T
);
1478 llvm::Constant
*VisitUnaryMinus(const UnaryOperator
*U
, QualType T
) {
1479 if (llvm::Constant
*C
= Visit(U
->getSubExpr(), T
))
1480 if (auto *CI
= dyn_cast
<llvm::ConstantInt
>(C
))
1481 return llvm::ConstantInt::get(CGM
.getLLVMContext(), -CI
->getValue());
1485 llvm::Constant
*VisitPackIndexingExpr(const PackIndexingExpr
*E
, QualType T
) {
1486 return Visit(E
->getSelectedExpr(), T
);
1490 llvm::Type
*ConvertType(QualType T
) {
1491 return CGM
.getTypes().ConvertType(T
);
1495 } // end anonymous namespace.
1497 llvm::Constant
*ConstantEmitter::validateAndPopAbstract(llvm::Constant
*C
,
1498 AbstractState saved
) {
1499 Abstract
= saved
.OldValue
;
1501 assert(saved
.OldPlaceholdersSize
== PlaceholderAddresses
.size() &&
1502 "created a placeholder while doing an abstract emission?");
1504 // No validation necessary for now.
1505 // No cleanup to do for now.
1510 ConstantEmitter::tryEmitAbstractForInitializer(const VarDecl
&D
) {
1511 auto state
= pushAbstract();
1512 auto C
= tryEmitPrivateForVarInit(D
);
1513 return validateAndPopAbstract(C
, state
);
1517 ConstantEmitter::tryEmitAbstract(const Expr
*E
, QualType destType
) {
1518 auto state
= pushAbstract();
1519 auto C
= tryEmitPrivate(E
, destType
);
1520 return validateAndPopAbstract(C
, state
);
1524 ConstantEmitter::tryEmitAbstract(const APValue
&value
, QualType destType
) {
1525 auto state
= pushAbstract();
1526 auto C
= tryEmitPrivate(value
, destType
);
1527 return validateAndPopAbstract(C
, state
);
1530 llvm::Constant
*ConstantEmitter::tryEmitConstantExpr(const ConstantExpr
*CE
) {
1531 if (!CE
->hasAPValueResult())
1534 QualType RetType
= CE
->getType();
1535 if (CE
->isGLValue())
1536 RetType
= CGM
.getContext().getLValueReferenceType(RetType
);
1538 return emitAbstract(CE
->getBeginLoc(), CE
->getAPValueResult(), RetType
);
1542 ConstantEmitter::emitAbstract(const Expr
*E
, QualType destType
) {
1543 auto state
= pushAbstract();
1544 auto C
= tryEmitPrivate(E
, destType
);
1545 C
= validateAndPopAbstract(C
, state
);
1547 CGM
.Error(E
->getExprLoc(),
1548 "internal error: could not emit constant value \"abstractly\"");
1549 C
= CGM
.EmitNullConstant(destType
);
1555 ConstantEmitter::emitAbstract(SourceLocation loc
, const APValue
&value
,
1557 bool EnablePtrAuthFunctionTypeDiscrimination
) {
1558 auto state
= pushAbstract();
1560 tryEmitPrivate(value
, destType
, EnablePtrAuthFunctionTypeDiscrimination
);
1561 C
= validateAndPopAbstract(C
, state
);
1564 "internal error: could not emit constant value \"abstractly\"");
1565 C
= CGM
.EmitNullConstant(destType
);
1570 llvm::Constant
*ConstantEmitter::tryEmitForInitializer(const VarDecl
&D
) {
1571 initializeNonAbstract(D
.getType().getAddressSpace());
1572 return markIfFailed(tryEmitPrivateForVarInit(D
));
1575 llvm::Constant
*ConstantEmitter::tryEmitForInitializer(const Expr
*E
,
1576 LangAS destAddrSpace
,
1577 QualType destType
) {
1578 initializeNonAbstract(destAddrSpace
);
1579 return markIfFailed(tryEmitPrivateForMemory(E
, destType
));
1582 llvm::Constant
*ConstantEmitter::emitForInitializer(const APValue
&value
,
1583 LangAS destAddrSpace
,
1584 QualType destType
) {
1585 initializeNonAbstract(destAddrSpace
);
1586 auto C
= tryEmitPrivateForMemory(value
, destType
);
1587 assert(C
&& "couldn't emit constant value non-abstractly?");
1591 llvm::GlobalValue
*ConstantEmitter::getCurrentAddrPrivate() {
1592 assert(!Abstract
&& "cannot get current address for abstract constant");
1596 // Make an obviously ill-formed global that should blow up compilation
1598 auto global
= new llvm::GlobalVariable(CGM
.getModule(), CGM
.Int8Ty
, true,
1599 llvm::GlobalValue::PrivateLinkage
,
1603 llvm::GlobalVariable::NotThreadLocal
,
1604 CGM
.getContext().getTargetAddressSpace(DestAddressSpace
));
1606 PlaceholderAddresses
.push_back(std::make_pair(nullptr, global
));
1611 void ConstantEmitter::registerCurrentAddrPrivate(llvm::Constant
*signal
,
1612 llvm::GlobalValue
*placeholder
) {
1613 assert(!PlaceholderAddresses
.empty());
1614 assert(PlaceholderAddresses
.back().first
== nullptr);
1615 assert(PlaceholderAddresses
.back().second
== placeholder
);
1616 PlaceholderAddresses
.back().first
= signal
;
1620 struct ReplacePlaceholders
{
1623 /// The base address of the global.
1624 llvm::Constant
*Base
;
1625 llvm::Type
*BaseValueTy
= nullptr;
1627 /// The placeholder addresses that were registered during emission.
1628 llvm::DenseMap
<llvm::Constant
*, llvm::GlobalVariable
*> PlaceholderAddresses
;
1630 /// The locations of the placeholder signals.
1631 llvm::DenseMap
<llvm::GlobalVariable
*, llvm::Constant
*> Locations
;
1633 /// The current index stack. We use a simple unsigned stack because
1634 /// we assume that placeholders will be relatively sparse in the
1635 /// initializer, but we cache the index values we find just in case.
1636 llvm::SmallVector
<unsigned, 8> Indices
;
1637 llvm::SmallVector
<llvm::Constant
*, 8> IndexValues
;
1639 ReplacePlaceholders(CodeGenModule
&CGM
, llvm::Constant
*base
,
1640 ArrayRef
<std::pair
<llvm::Constant
*,
1641 llvm::GlobalVariable
*>> addresses
)
1642 : CGM(CGM
), Base(base
),
1643 PlaceholderAddresses(addresses
.begin(), addresses
.end()) {
1646 void replaceInInitializer(llvm::Constant
*init
) {
1647 // Remember the type of the top-most initializer.
1648 BaseValueTy
= init
->getType();
1650 // Initialize the stack.
1651 Indices
.push_back(0);
1652 IndexValues
.push_back(nullptr);
1654 // Recurse into the initializer.
1655 findLocations(init
);
1657 // Check invariants.
1658 assert(IndexValues
.size() == Indices
.size() && "mismatch");
1659 assert(Indices
.size() == 1 && "didn't pop all indices");
1661 // Do the replacement; this basically invalidates 'init'.
1662 assert(Locations
.size() == PlaceholderAddresses
.size() &&
1663 "missed a placeholder?");
1665 // We're iterating over a hashtable, so this would be a source of
1666 // non-determinism in compiler output *except* that we're just
1667 // messing around with llvm::Constant structures, which never itself
1668 // does anything that should be visible in compiler output.
1669 for (auto &entry
: Locations
) {
1670 assert(entry
.first
->getName() == "" && "not a placeholder!");
1671 entry
.first
->replaceAllUsesWith(entry
.second
);
1672 entry
.first
->eraseFromParent();
1677 void findLocations(llvm::Constant
*init
) {
1678 // Recurse into aggregates.
1679 if (auto agg
= dyn_cast
<llvm::ConstantAggregate
>(init
)) {
1680 for (unsigned i
= 0, e
= agg
->getNumOperands(); i
!= e
; ++i
) {
1681 Indices
.push_back(i
);
1682 IndexValues
.push_back(nullptr);
1684 findLocations(agg
->getOperand(i
));
1686 IndexValues
.pop_back();
1692 // Otherwise, check for registered constants.
1694 auto it
= PlaceholderAddresses
.find(init
);
1695 if (it
!= PlaceholderAddresses
.end()) {
1696 setLocation(it
->second
);
1700 // Look through bitcasts or other expressions.
1701 if (auto expr
= dyn_cast
<llvm::ConstantExpr
>(init
)) {
1702 init
= expr
->getOperand(0);
1709 void setLocation(llvm::GlobalVariable
*placeholder
) {
1710 assert(!Locations
.contains(placeholder
) &&
1711 "already found location for placeholder!");
1713 // Lazily fill in IndexValues with the values from Indices.
1714 // We do this in reverse because we should always have a strict
1715 // prefix of indices from the start.
1716 assert(Indices
.size() == IndexValues
.size());
1717 for (size_t i
= Indices
.size() - 1; i
!= size_t(-1); --i
) {
1718 if (IndexValues
[i
]) {
1720 for (size_t j
= 0; j
!= i
+ 1; ++j
) {
1721 assert(IndexValues
[j
] &&
1722 isa
<llvm::ConstantInt
>(IndexValues
[j
]) &&
1723 cast
<llvm::ConstantInt
>(IndexValues
[j
])->getZExtValue()
1730 IndexValues
[i
] = llvm::ConstantInt::get(CGM
.Int32Ty
, Indices
[i
]);
1733 llvm::Constant
*location
= llvm::ConstantExpr::getInBoundsGetElementPtr(
1734 BaseValueTy
, Base
, IndexValues
);
1736 Locations
.insert({placeholder
, location
});
1741 void ConstantEmitter::finalize(llvm::GlobalVariable
*global
) {
1742 assert(InitializedNonAbstract
&&
1743 "finalizing emitter that was used for abstract emission?");
1744 assert(!Finalized
&& "finalizing emitter multiple times");
1745 assert(global
->getInitializer());
1747 // Note that we might also be Failed.
1750 if (!PlaceholderAddresses
.empty()) {
1751 ReplacePlaceholders(CGM
, global
, PlaceholderAddresses
)
1752 .replaceInInitializer(global
->getInitializer());
1753 PlaceholderAddresses
.clear(); // satisfy
1757 ConstantEmitter::~ConstantEmitter() {
1758 assert((!InitializedNonAbstract
|| Finalized
|| Failed
) &&
1759 "not finalized after being initialized for non-abstract emission");
1760 assert(PlaceholderAddresses
.empty() && "unhandled placeholders");
1763 static QualType
getNonMemoryType(CodeGenModule
&CGM
, QualType type
) {
1764 if (auto AT
= type
->getAs
<AtomicType
>()) {
1765 return CGM
.getContext().getQualifiedType(AT
->getValueType(),
1766 type
.getQualifiers());
1771 llvm::Constant
*ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl
&D
) {
1772 // Make a quick check if variable can be default NULL initialized
1773 // and avoid going through rest of code which may do, for c++11,
1774 // initialization of memory to all NULLs.
1775 if (!D
.hasLocalStorage()) {
1776 QualType Ty
= CGM
.getContext().getBaseElementType(D
.getType());
1777 if (Ty
->isRecordType())
1778 if (const CXXConstructExpr
*E
=
1779 dyn_cast_or_null
<CXXConstructExpr
>(D
.getInit())) {
1780 const CXXConstructorDecl
*CD
= E
->getConstructor();
1781 if (CD
->isTrivial() && CD
->isDefaultConstructor())
1782 return CGM
.EmitNullConstant(D
.getType());
1785 InConstantContext
= D
.hasConstantInitialization();
1787 QualType destType
= D
.getType();
1788 const Expr
*E
= D
.getInit();
1789 assert(E
&& "No initializer to emit");
1791 if (!destType
->isReferenceType()) {
1792 QualType nonMemoryDestType
= getNonMemoryType(CGM
, destType
);
1793 if (llvm::Constant
*C
= ConstExprEmitter(*this).Visit(E
, nonMemoryDestType
))
1794 return emitForMemory(C
, destType
);
1797 // Try to emit the initializer. Note that this can allow some things that
1798 // are not allowed by tryEmitPrivateForMemory alone.
1799 if (APValue
*value
= D
.evaluateValue())
1800 return tryEmitPrivateForMemory(*value
, destType
);
1806 ConstantEmitter::tryEmitAbstractForMemory(const Expr
*E
, QualType destType
) {
1807 auto nonMemoryDestType
= getNonMemoryType(CGM
, destType
);
1808 auto C
= tryEmitAbstract(E
, nonMemoryDestType
);
1809 return (C
? emitForMemory(C
, destType
) : nullptr);
1813 ConstantEmitter::tryEmitAbstractForMemory(const APValue
&value
,
1814 QualType destType
) {
1815 auto nonMemoryDestType
= getNonMemoryType(CGM
, destType
);
1816 auto C
= tryEmitAbstract(value
, nonMemoryDestType
);
1817 return (C
? emitForMemory(C
, destType
) : nullptr);
1820 llvm::Constant
*ConstantEmitter::tryEmitPrivateForMemory(const Expr
*E
,
1821 QualType destType
) {
1822 auto nonMemoryDestType
= getNonMemoryType(CGM
, destType
);
1823 llvm::Constant
*C
= tryEmitPrivate(E
, nonMemoryDestType
);
1824 return (C
? emitForMemory(C
, destType
) : nullptr);
1827 llvm::Constant
*ConstantEmitter::tryEmitPrivateForMemory(const APValue
&value
,
1828 QualType destType
) {
1829 auto nonMemoryDestType
= getNonMemoryType(CGM
, destType
);
1830 auto C
= tryEmitPrivate(value
, nonMemoryDestType
);
1831 return (C
? emitForMemory(C
, destType
) : nullptr);
1834 /// Try to emit a constant signed pointer, given a raw pointer and the
1835 /// destination ptrauth qualifier.
1837 /// This can fail if the qualifier needs address discrimination and the
1838 /// emitter is in an abstract mode.
1840 ConstantEmitter::tryEmitConstantSignedPointer(llvm::Constant
*UnsignedPointer
,
1841 PointerAuthQualifier Schema
) {
1842 assert(Schema
&& "applying trivial ptrauth schema");
1844 if (Schema
.hasKeyNone())
1845 return UnsignedPointer
;
1847 unsigned Key
= Schema
.getKey();
1849 // Create an address placeholder if we're using address discrimination.
1850 llvm::GlobalValue
*StorageAddress
= nullptr;
1851 if (Schema
.isAddressDiscriminated()) {
1852 // We can't do this if the emitter is in an abstract state.
1856 StorageAddress
= getCurrentAddrPrivate();
1859 llvm::ConstantInt
*Discriminator
=
1860 llvm::ConstantInt::get(CGM
.IntPtrTy
, Schema
.getExtraDiscriminator());
1862 llvm::Constant
*SignedPointer
= CGM
.getConstantSignedPointer(
1863 UnsignedPointer
, Key
, StorageAddress
, Discriminator
);
1865 if (Schema
.isAddressDiscriminated())
1866 registerCurrentAddrPrivate(SignedPointer
, StorageAddress
);
1868 return SignedPointer
;
1871 llvm::Constant
*ConstantEmitter::emitForMemory(CodeGenModule
&CGM
,
1873 QualType destType
) {
1874 // For an _Atomic-qualified constant, we may need to add tail padding.
1875 if (auto AT
= destType
->getAs
<AtomicType
>()) {
1876 QualType destValueType
= AT
->getValueType();
1877 C
= emitForMemory(CGM
, C
, destValueType
);
1879 uint64_t innerSize
= CGM
.getContext().getTypeSize(destValueType
);
1880 uint64_t outerSize
= CGM
.getContext().getTypeSize(destType
);
1881 if (innerSize
== outerSize
)
1884 assert(innerSize
< outerSize
&& "emitted over-large constant for atomic");
1885 llvm::Constant
*elts
[] = {
1887 llvm::ConstantAggregateZero::get(
1888 llvm::ArrayType::get(CGM
.Int8Ty
, (outerSize
- innerSize
) / 8))
1890 return llvm::ConstantStruct::getAnon(elts
);
1893 // Zero-extend bool.
1894 if (C
->getType()->isIntegerTy(1) && !destType
->isBitIntType()) {
1895 llvm::Type
*boolTy
= CGM
.getTypes().ConvertTypeForMem(destType
);
1896 llvm::Constant
*Res
= llvm::ConstantFoldCastOperand(
1897 llvm::Instruction::ZExt
, C
, boolTy
, CGM
.getDataLayout());
1898 assert(Res
&& "Constant folding must succeed");
1902 if (destType
->isBitIntType()) {
1903 ConstantAggregateBuilder
Builder(CGM
);
1904 llvm::Type
*LoadStoreTy
= CGM
.getTypes().convertTypeForLoadStore(destType
);
1905 // ptrtoint/inttoptr should not involve _BitInt in constant expressions, so
1906 // casting to ConstantInt is safe here.
1907 auto *CI
= cast
<llvm::ConstantInt
>(C
);
1908 llvm::Constant
*Res
= llvm::ConstantFoldCastOperand(
1909 destType
->isSignedIntegerOrEnumerationType() ? llvm::Instruction::SExt
1910 : llvm::Instruction::ZExt
,
1911 CI
, LoadStoreTy
, CGM
.getDataLayout());
1912 if (CGM
.getTypes().typeRequiresSplitIntoByteArray(destType
, C
->getType())) {
1913 // Long _BitInt has array of bytes as in-memory type.
1914 // So, split constant into individual bytes.
1915 llvm::Type
*DesiredTy
= CGM
.getTypes().ConvertTypeForMem(destType
);
1916 llvm::APInt Value
= cast
<llvm::ConstantInt
>(Res
)->getValue();
1917 Builder
.addBits(Value
, /*OffsetInBits=*/0, /*AllowOverwrite=*/false);
1918 return Builder
.build(DesiredTy
, /*AllowOversized*/ false);
1926 llvm::Constant
*ConstantEmitter::tryEmitPrivate(const Expr
*E
,
1927 QualType destType
) {
1928 assert(!destType
->isVoidType() && "can't emit a void constant");
1930 if (!destType
->isReferenceType())
1931 if (llvm::Constant
*C
= ConstExprEmitter(*this).Visit(E
, destType
))
1934 Expr::EvalResult Result
;
1936 bool Success
= false;
1938 if (destType
->isReferenceType())
1939 Success
= E
->EvaluateAsLValue(Result
, CGM
.getContext());
1941 Success
= E
->EvaluateAsRValue(Result
, CGM
.getContext(), InConstantContext
);
1943 if (Success
&& !Result
.HasSideEffects
)
1944 return tryEmitPrivate(Result
.Val
, destType
);
1949 llvm::Constant
*CodeGenModule::getNullPointer(llvm::PointerType
*T
, QualType QT
) {
1950 return getTargetCodeGenInfo().getNullPointer(*this, T
, QT
);
1954 /// A struct which can be used to peephole certain kinds of finalization
1955 /// that normally happen during l-value emission.
1956 struct ConstantLValue
{
1957 llvm::Constant
*Value
;
1958 bool HasOffsetApplied
;
1960 /*implicit*/ ConstantLValue(llvm::Constant
*value
,
1961 bool hasOffsetApplied
= false)
1962 : Value(value
), HasOffsetApplied(hasOffsetApplied
) {}
1964 /*implicit*/ ConstantLValue(ConstantAddress address
)
1965 : ConstantLValue(address
.getPointer()) {}
1968 /// A helper class for emitting constant l-values.
1969 class ConstantLValueEmitter
: public ConstStmtVisitor
<ConstantLValueEmitter
,
1972 ConstantEmitter
&Emitter
;
1973 const APValue
&Value
;
1975 bool EnablePtrAuthFunctionTypeDiscrimination
;
1977 // Befriend StmtVisitorBase so that we don't have to expose Visit*.
1978 friend StmtVisitorBase
;
1981 ConstantLValueEmitter(ConstantEmitter
&emitter
, const APValue
&value
,
1983 bool EnablePtrAuthFunctionTypeDiscrimination
= true)
1984 : CGM(emitter
.CGM
), Emitter(emitter
), Value(value
), DestType(destType
),
1985 EnablePtrAuthFunctionTypeDiscrimination(
1986 EnablePtrAuthFunctionTypeDiscrimination
) {}
1988 llvm::Constant
*tryEmit();
1991 llvm::Constant
*tryEmitAbsolute(llvm::Type
*destTy
);
1992 ConstantLValue
tryEmitBase(const APValue::LValueBase
&base
);
1994 ConstantLValue
VisitStmt(const Stmt
*S
) { return nullptr; }
1995 ConstantLValue
VisitConstantExpr(const ConstantExpr
*E
);
1996 ConstantLValue
VisitCompoundLiteralExpr(const CompoundLiteralExpr
*E
);
1997 ConstantLValue
VisitStringLiteral(const StringLiteral
*E
);
1998 ConstantLValue
VisitObjCBoxedExpr(const ObjCBoxedExpr
*E
);
1999 ConstantLValue
VisitObjCEncodeExpr(const ObjCEncodeExpr
*E
);
2000 ConstantLValue
VisitObjCStringLiteral(const ObjCStringLiteral
*E
);
2001 ConstantLValue
VisitPredefinedExpr(const PredefinedExpr
*E
);
2002 ConstantLValue
VisitAddrLabelExpr(const AddrLabelExpr
*E
);
2003 ConstantLValue
VisitCallExpr(const CallExpr
*E
);
2004 ConstantLValue
VisitBlockExpr(const BlockExpr
*E
);
2005 ConstantLValue
VisitCXXTypeidExpr(const CXXTypeidExpr
*E
);
2006 ConstantLValue
VisitMaterializeTemporaryExpr(
2007 const MaterializeTemporaryExpr
*E
);
2009 ConstantLValue
emitPointerAuthSignConstant(const CallExpr
*E
);
2010 llvm::Constant
*emitPointerAuthPointer(const Expr
*E
);
2011 unsigned emitPointerAuthKey(const Expr
*E
);
2012 std::pair
<llvm::Constant
*, llvm::ConstantInt
*>
2013 emitPointerAuthDiscriminator(const Expr
*E
);
2015 bool hasNonZeroOffset() const {
2016 return !Value
.getLValueOffset().isZero();
2019 /// Return the value offset.
2020 llvm::Constant
*getOffset() {
2021 return llvm::ConstantInt::get(CGM
.Int64Ty
,
2022 Value
.getLValueOffset().getQuantity());
2025 /// Apply the value offset to the given constant.
2026 llvm::Constant
*applyOffset(llvm::Constant
*C
) {
2027 if (!hasNonZeroOffset())
2030 return llvm::ConstantExpr::getGetElementPtr(CGM
.Int8Ty
, C
, getOffset());
2036 llvm::Constant
*ConstantLValueEmitter::tryEmit() {
2037 const APValue::LValueBase
&base
= Value
.getLValueBase();
2039 // The destination type should be a pointer or reference
2040 // type, but it might also be a cast thereof.
2042 // FIXME: the chain of casts required should be reflected in the APValue.
2043 // We need this in order to correctly handle things like a ptrtoint of a
2044 // non-zero null pointer and addrspace casts that aren't trivially
2045 // represented in LLVM IR.
2046 auto destTy
= CGM
.getTypes().ConvertTypeForMem(DestType
);
2047 assert(isa
<llvm::IntegerType
>(destTy
) || isa
<llvm::PointerType
>(destTy
));
2049 // If there's no base at all, this is a null or absolute pointer,
2050 // possibly cast back to an integer type.
2052 return tryEmitAbsolute(destTy
);
2055 // Otherwise, try to emit the base.
2056 ConstantLValue result
= tryEmitBase(base
);
2058 // If that failed, we're done.
2059 llvm::Constant
*value
= result
.Value
;
2060 if (!value
) return nullptr;
2062 // Apply the offset if necessary and not already done.
2063 if (!result
.HasOffsetApplied
) {
2064 value
= applyOffset(value
);
2067 // Convert to the appropriate type; this could be an lvalue for
2068 // an integer. FIXME: performAddrSpaceCast
2069 if (isa
<llvm::PointerType
>(destTy
))
2070 return llvm::ConstantExpr::getPointerCast(value
, destTy
);
2072 return llvm::ConstantExpr::getPtrToInt(value
, destTy
);
2075 /// Try to emit an absolute l-value, such as a null pointer or an integer
2076 /// bitcast to pointer type.
2078 ConstantLValueEmitter::tryEmitAbsolute(llvm::Type
*destTy
) {
2079 // If we're producing a pointer, this is easy.
2080 auto destPtrTy
= cast
<llvm::PointerType
>(destTy
);
2081 if (Value
.isNullPointer()) {
2082 // FIXME: integer offsets from non-zero null pointers.
2083 return CGM
.getNullPointer(destPtrTy
, DestType
);
2086 // Convert the integer to a pointer-sized integer before converting it
2088 // FIXME: signedness depends on the original integer type.
2089 auto intptrTy
= CGM
.getDataLayout().getIntPtrType(destPtrTy
);
2091 C
= llvm::ConstantFoldIntegerCast(getOffset(), intptrTy
, /*isSigned*/ false,
2092 CGM
.getDataLayout());
2093 assert(C
&& "Must have folded, as Offset is a ConstantInt");
2094 C
= llvm::ConstantExpr::getIntToPtr(C
, destPtrTy
);
2099 ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase
&base
) {
2101 if (const ValueDecl
*D
= base
.dyn_cast
<const ValueDecl
*>()) {
2102 // The constant always points to the canonical declaration. We want to look
2103 // at properties of the most recent declaration at the point of emission.
2104 D
= cast
<ValueDecl
>(D
->getMostRecentDecl());
2106 if (D
->hasAttr
<WeakRefAttr
>())
2107 return CGM
.GetWeakRefReference(D
).getPointer();
2109 auto PtrAuthSign
= [&](llvm::Constant
*C
) {
2110 CGPointerAuthInfo AuthInfo
;
2112 if (EnablePtrAuthFunctionTypeDiscrimination
)
2113 AuthInfo
= CGM
.getFunctionPointerAuthInfo(DestType
);
2116 if (hasNonZeroOffset())
2117 return ConstantLValue(nullptr);
2120 C
= CGM
.getConstantSignedPointer(
2121 C
, AuthInfo
.getKey(), nullptr,
2122 cast_or_null
<llvm::ConstantInt
>(AuthInfo
.getDiscriminator()));
2123 return ConstantLValue(C
, /*applied offset*/ true);
2126 return ConstantLValue(C
);
2129 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
))
2130 return PtrAuthSign(CGM
.getRawFunctionPointer(FD
));
2132 if (const auto *VD
= dyn_cast
<VarDecl
>(D
)) {
2133 // We can never refer to a variable with local storage.
2134 if (!VD
->hasLocalStorage()) {
2135 if (VD
->isFileVarDecl() || VD
->hasExternalStorage())
2136 return CGM
.GetAddrOfGlobalVar(VD
);
2138 if (VD
->isLocalVarDecl()) {
2139 return CGM
.getOrCreateStaticVarDecl(
2140 *VD
, CGM
.getLLVMLinkageVarDefinition(VD
));
2145 if (const auto *GD
= dyn_cast
<MSGuidDecl
>(D
))
2146 return CGM
.GetAddrOfMSGuidDecl(GD
);
2148 if (const auto *GCD
= dyn_cast
<UnnamedGlobalConstantDecl
>(D
))
2149 return CGM
.GetAddrOfUnnamedGlobalConstantDecl(GCD
);
2151 if (const auto *TPO
= dyn_cast
<TemplateParamObjectDecl
>(D
))
2152 return CGM
.GetAddrOfTemplateParamObject(TPO
);
2157 // Handle typeid(T).
2158 if (TypeInfoLValue TI
= base
.dyn_cast
<TypeInfoLValue
>())
2159 return CGM
.GetAddrOfRTTIDescriptor(QualType(TI
.getType(), 0));
2161 // Otherwise, it must be an expression.
2162 return Visit(base
.get
<const Expr
*>());
2166 ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr
*E
) {
2167 if (llvm::Constant
*Result
= Emitter
.tryEmitConstantExpr(E
))
2169 return Visit(E
->getSubExpr());
2173 ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr
*E
) {
2174 ConstantEmitter
CompoundLiteralEmitter(CGM
, Emitter
.CGF
);
2175 CompoundLiteralEmitter
.setInConstantContext(Emitter
.isInConstantContext());
2176 return tryEmitGlobalCompoundLiteral(CompoundLiteralEmitter
, E
);
2180 ConstantLValueEmitter::VisitStringLiteral(const StringLiteral
*E
) {
2181 return CGM
.GetAddrOfConstantStringFromLiteral(E
);
2185 ConstantLValueEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr
*E
) {
2186 return CGM
.GetAddrOfConstantStringFromObjCEncode(E
);
2189 static ConstantLValue
emitConstantObjCStringLiteral(const StringLiteral
*S
,
2191 CodeGenModule
&CGM
) {
2192 auto C
= CGM
.getObjCRuntime().GenerateConstantString(S
);
2193 return C
.withElementType(CGM
.getTypes().ConvertTypeForMem(T
));
2197 ConstantLValueEmitter::VisitObjCStringLiteral(const ObjCStringLiteral
*E
) {
2198 return emitConstantObjCStringLiteral(E
->getString(), E
->getType(), CGM
);
2202 ConstantLValueEmitter::VisitObjCBoxedExpr(const ObjCBoxedExpr
*E
) {
2203 assert(E
->isExpressibleAsConstantInitializer() &&
2204 "this boxed expression can't be emitted as a compile-time constant");
2205 const auto *SL
= cast
<StringLiteral
>(E
->getSubExpr()->IgnoreParenCasts());
2206 return emitConstantObjCStringLiteral(SL
, E
->getType(), CGM
);
2210 ConstantLValueEmitter::VisitPredefinedExpr(const PredefinedExpr
*E
) {
2211 return CGM
.GetAddrOfConstantStringFromLiteral(E
->getFunctionName());
2215 ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr
*E
) {
2216 assert(Emitter
.CGF
&& "Invalid address of label expression outside function");
2217 llvm::Constant
*Ptr
= Emitter
.CGF
->GetAddrOfLabel(E
->getLabel());
2222 ConstantLValueEmitter::VisitCallExpr(const CallExpr
*E
) {
2223 unsigned builtin
= E
->getBuiltinCallee();
2224 if (builtin
== Builtin::BI__builtin_function_start
)
2225 return CGM
.GetFunctionStart(
2226 E
->getArg(0)->getAsBuiltinConstantDeclRef(CGM
.getContext()));
2228 if (builtin
== Builtin::BI__builtin_ptrauth_sign_constant
)
2229 return emitPointerAuthSignConstant(E
);
2231 if (builtin
!= Builtin::BI__builtin___CFStringMakeConstantString
&&
2232 builtin
!= Builtin::BI__builtin___NSStringMakeConstantString
)
2235 const auto *Literal
= cast
<StringLiteral
>(E
->getArg(0)->IgnoreParenCasts());
2236 if (builtin
== Builtin::BI__builtin___NSStringMakeConstantString
) {
2237 return CGM
.getObjCRuntime().GenerateConstantString(Literal
);
2239 // FIXME: need to deal with UCN conversion issues.
2240 return CGM
.GetAddrOfConstantCFString(Literal
);
2245 ConstantLValueEmitter::emitPointerAuthSignConstant(const CallExpr
*E
) {
2246 llvm::Constant
*UnsignedPointer
= emitPointerAuthPointer(E
->getArg(0));
2247 unsigned Key
= emitPointerAuthKey(E
->getArg(1));
2248 auto [StorageAddress
, OtherDiscriminator
] =
2249 emitPointerAuthDiscriminator(E
->getArg(2));
2251 llvm::Constant
*SignedPointer
= CGM
.getConstantSignedPointer(
2252 UnsignedPointer
, Key
, StorageAddress
, OtherDiscriminator
);
2253 return SignedPointer
;
2256 llvm::Constant
*ConstantLValueEmitter::emitPointerAuthPointer(const Expr
*E
) {
2257 Expr::EvalResult Result
;
2258 bool Succeeded
= E
->EvaluateAsRValue(Result
, CGM
.getContext());
2262 // The assertions here are all checked by Sema.
2263 assert(Result
.Val
.isLValue());
2264 if (isa
<FunctionDecl
>(Result
.Val
.getLValueBase().get
<const ValueDecl
*>()))
2265 assert(Result
.Val
.getLValueOffset().isZero());
2266 return ConstantEmitter(CGM
, Emitter
.CGF
)
2267 .emitAbstract(E
->getExprLoc(), Result
.Val
, E
->getType(), false);
2270 unsigned ConstantLValueEmitter::emitPointerAuthKey(const Expr
*E
) {
2271 return E
->EvaluateKnownConstInt(CGM
.getContext()).getZExtValue();
2274 std::pair
<llvm::Constant
*, llvm::ConstantInt
*>
2275 ConstantLValueEmitter::emitPointerAuthDiscriminator(const Expr
*E
) {
2276 E
= E
->IgnoreParens();
2278 if (const auto *Call
= dyn_cast
<CallExpr
>(E
)) {
2279 if (Call
->getBuiltinCallee() ==
2280 Builtin::BI__builtin_ptrauth_blend_discriminator
) {
2281 llvm::Constant
*Pointer
= ConstantEmitter(CGM
).emitAbstract(
2282 Call
->getArg(0), Call
->getArg(0)->getType());
2283 auto *Extra
= cast
<llvm::ConstantInt
>(ConstantEmitter(CGM
).emitAbstract(
2284 Call
->getArg(1), Call
->getArg(1)->getType()));
2285 return {Pointer
, Extra
};
2289 llvm::Constant
*Result
= ConstantEmitter(CGM
).emitAbstract(E
, E
->getType());
2290 if (Result
->getType()->isPointerTy())
2291 return {Result
, nullptr};
2292 return {nullptr, cast
<llvm::ConstantInt
>(Result
)};
2296 ConstantLValueEmitter::VisitBlockExpr(const BlockExpr
*E
) {
2297 StringRef functionName
;
2298 if (auto CGF
= Emitter
.CGF
)
2299 functionName
= CGF
->CurFn
->getName();
2301 functionName
= "global";
2303 return CGM
.GetAddrOfGlobalBlock(E
, functionName
);
2307 ConstantLValueEmitter::VisitCXXTypeidExpr(const CXXTypeidExpr
*E
) {
2309 if (E
->isTypeOperand())
2310 T
= E
->getTypeOperand(CGM
.getContext());
2312 T
= E
->getExprOperand()->getType();
2313 return CGM
.GetAddrOfRTTIDescriptor(T
);
2317 ConstantLValueEmitter::VisitMaterializeTemporaryExpr(
2318 const MaterializeTemporaryExpr
*E
) {
2319 assert(E
->getStorageDuration() == SD_Static
);
2320 const Expr
*Inner
= E
->getSubExpr()->skipRValueSubobjectAdjustments();
2321 return CGM
.GetAddrOfGlobalTemporary(E
, Inner
);
2325 ConstantEmitter::tryEmitPrivate(const APValue
&Value
, QualType DestType
,
2326 bool EnablePtrAuthFunctionTypeDiscrimination
) {
2327 switch (Value
.getKind()) {
2329 case APValue::Indeterminate
:
2330 // Out-of-lifetime and indeterminate values can be modeled as 'undef'.
2331 return llvm::UndefValue::get(CGM
.getTypes().ConvertType(DestType
));
2332 case APValue::LValue
:
2333 return ConstantLValueEmitter(*this, Value
, DestType
,
2334 EnablePtrAuthFunctionTypeDiscrimination
)
2337 return llvm::ConstantInt::get(CGM
.getLLVMContext(), Value
.getInt());
2338 case APValue::FixedPoint
:
2339 return llvm::ConstantInt::get(CGM
.getLLVMContext(),
2340 Value
.getFixedPoint().getValue());
2341 case APValue::ComplexInt
: {
2342 llvm::Constant
*Complex
[2];
2344 Complex
[0] = llvm::ConstantInt::get(CGM
.getLLVMContext(),
2345 Value
.getComplexIntReal());
2346 Complex
[1] = llvm::ConstantInt::get(CGM
.getLLVMContext(),
2347 Value
.getComplexIntImag());
2349 // FIXME: the target may want to specify that this is packed.
2350 llvm::StructType
*STy
=
2351 llvm::StructType::get(Complex
[0]->getType(), Complex
[1]->getType());
2352 return llvm::ConstantStruct::get(STy
, Complex
);
2354 case APValue::Float
: {
2355 const llvm::APFloat
&Init
= Value
.getFloat();
2356 if (&Init
.getSemantics() == &llvm::APFloat::IEEEhalf() &&
2357 !CGM
.getContext().getLangOpts().NativeHalfType
&&
2358 CGM
.getContext().getTargetInfo().useFP16ConversionIntrinsics())
2359 return llvm::ConstantInt::get(CGM
.getLLVMContext(),
2360 Init
.bitcastToAPInt());
2362 return llvm::ConstantFP::get(CGM
.getLLVMContext(), Init
);
2364 case APValue::ComplexFloat
: {
2365 llvm::Constant
*Complex
[2];
2367 Complex
[0] = llvm::ConstantFP::get(CGM
.getLLVMContext(),
2368 Value
.getComplexFloatReal());
2369 Complex
[1] = llvm::ConstantFP::get(CGM
.getLLVMContext(),
2370 Value
.getComplexFloatImag());
2372 // FIXME: the target may want to specify that this is packed.
2373 llvm::StructType
*STy
=
2374 llvm::StructType::get(Complex
[0]->getType(), Complex
[1]->getType());
2375 return llvm::ConstantStruct::get(STy
, Complex
);
2377 case APValue::Vector
: {
2378 unsigned NumElts
= Value
.getVectorLength();
2379 SmallVector
<llvm::Constant
*, 4> Inits(NumElts
);
2381 for (unsigned I
= 0; I
!= NumElts
; ++I
) {
2382 const APValue
&Elt
= Value
.getVectorElt(I
);
2384 Inits
[I
] = llvm::ConstantInt::get(CGM
.getLLVMContext(), Elt
.getInt());
2385 else if (Elt
.isFloat())
2386 Inits
[I
] = llvm::ConstantFP::get(CGM
.getLLVMContext(), Elt
.getFloat());
2387 else if (Elt
.isIndeterminate())
2388 Inits
[I
] = llvm::UndefValue::get(CGM
.getTypes().ConvertType(
2389 DestType
->castAs
<VectorType
>()->getElementType()));
2391 llvm_unreachable("unsupported vector element type");
2393 return llvm::ConstantVector::get(Inits
);
2395 case APValue::AddrLabelDiff
: {
2396 const AddrLabelExpr
*LHSExpr
= Value
.getAddrLabelDiffLHS();
2397 const AddrLabelExpr
*RHSExpr
= Value
.getAddrLabelDiffRHS();
2398 llvm::Constant
*LHS
= tryEmitPrivate(LHSExpr
, LHSExpr
->getType());
2399 llvm::Constant
*RHS
= tryEmitPrivate(RHSExpr
, RHSExpr
->getType());
2400 if (!LHS
|| !RHS
) return nullptr;
2402 // Compute difference
2403 llvm::Type
*ResultType
= CGM
.getTypes().ConvertType(DestType
);
2404 LHS
= llvm::ConstantExpr::getPtrToInt(LHS
, CGM
.IntPtrTy
);
2405 RHS
= llvm::ConstantExpr::getPtrToInt(RHS
, CGM
.IntPtrTy
);
2406 llvm::Constant
*AddrLabelDiff
= llvm::ConstantExpr::getSub(LHS
, RHS
);
2408 // LLVM is a bit sensitive about the exact format of the
2409 // address-of-label difference; make sure to truncate after
2411 return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff
, ResultType
);
2413 case APValue::Struct
:
2414 case APValue::Union
:
2415 return ConstStructBuilder::BuildStruct(*this, Value
, DestType
);
2416 case APValue::Array
: {
2417 const ArrayType
*ArrayTy
= CGM
.getContext().getAsArrayType(DestType
);
2418 unsigned NumElements
= Value
.getArraySize();
2419 unsigned NumInitElts
= Value
.getArrayInitializedElts();
2421 // Emit array filler, if there is one.
2422 llvm::Constant
*Filler
= nullptr;
2423 if (Value
.hasArrayFiller()) {
2424 Filler
= tryEmitAbstractForMemory(Value
.getArrayFiller(),
2425 ArrayTy
->getElementType());
2430 // Emit initializer elements.
2431 SmallVector
<llvm::Constant
*, 16> Elts
;
2432 if (Filler
&& Filler
->isNullValue())
2433 Elts
.reserve(NumInitElts
+ 1);
2435 Elts
.reserve(NumElements
);
2437 llvm::Type
*CommonElementType
= nullptr;
2438 for (unsigned I
= 0; I
< NumInitElts
; ++I
) {
2439 llvm::Constant
*C
= tryEmitPrivateForMemory(
2440 Value
.getArrayInitializedElt(I
), ArrayTy
->getElementType());
2441 if (!C
) return nullptr;
2444 CommonElementType
= C
->getType();
2445 else if (C
->getType() != CommonElementType
)
2446 CommonElementType
= nullptr;
2450 llvm::ArrayType
*Desired
=
2451 cast
<llvm::ArrayType
>(CGM
.getTypes().ConvertType(DestType
));
2453 // Fix the type of incomplete arrays if the initializer isn't empty.
2454 if (DestType
->isIncompleteArrayType() && !Elts
.empty())
2455 Desired
= llvm::ArrayType::get(Desired
->getElementType(), Elts
.size());
2457 return EmitArrayConstant(CGM
, Desired
, CommonElementType
, NumElements
, Elts
,
2460 case APValue::MemberPointer
:
2461 return CGM
.getCXXABI().EmitMemberPointer(Value
, DestType
);
2463 llvm_unreachable("Unknown APValue kind");
2466 llvm::GlobalVariable
*CodeGenModule::getAddrOfConstantCompoundLiteralIfEmitted(
2467 const CompoundLiteralExpr
*E
) {
2468 return EmittedCompoundLiterals
.lookup(E
);
2471 void CodeGenModule::setAddrOfConstantCompoundLiteral(
2472 const CompoundLiteralExpr
*CLE
, llvm::GlobalVariable
*GV
) {
2473 bool Ok
= EmittedCompoundLiterals
.insert(std::make_pair(CLE
, GV
)).second
;
2475 assert(Ok
&& "CLE has already been emitted!");
2479 CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr
*E
) {
2480 assert(E
->isFileScope() && "not a file-scope compound literal expr");
2481 ConstantEmitter
emitter(*this);
2482 return tryEmitGlobalCompoundLiteral(emitter
, E
);
2486 CodeGenModule::getMemberPointerConstant(const UnaryOperator
*uo
) {
2487 // Member pointer constants always have a very particular form.
2488 const MemberPointerType
*type
= cast
<MemberPointerType
>(uo
->getType());
2489 const ValueDecl
*decl
= cast
<DeclRefExpr
>(uo
->getSubExpr())->getDecl();
2491 // A member function pointer.
2492 if (const CXXMethodDecl
*method
= dyn_cast
<CXXMethodDecl
>(decl
))
2493 return getCXXABI().EmitMemberFunctionPointer(method
);
2495 // Otherwise, a member data pointer.
2496 uint64_t fieldOffset
= getContext().getFieldOffset(decl
);
2497 CharUnits chars
= getContext().toCharUnitsFromBits((int64_t) fieldOffset
);
2498 return getCXXABI().EmitMemberDataPointer(type
, chars
);
2501 static llvm::Constant
*EmitNullConstantForBase(CodeGenModule
&CGM
,
2502 llvm::Type
*baseType
,
2503 const CXXRecordDecl
*base
);
2505 static llvm::Constant
*EmitNullConstant(CodeGenModule
&CGM
,
2506 const RecordDecl
*record
,
2507 bool asCompleteObject
) {
2508 const CGRecordLayout
&layout
= CGM
.getTypes().getCGRecordLayout(record
);
2509 llvm::StructType
*structure
=
2510 (asCompleteObject
? layout
.getLLVMType()
2511 : layout
.getBaseSubobjectLLVMType());
2513 unsigned numElements
= structure
->getNumElements();
2514 std::vector
<llvm::Constant
*> elements(numElements
);
2516 auto CXXR
= dyn_cast
<CXXRecordDecl
>(record
);
2517 // Fill in all the bases.
2519 for (const auto &I
: CXXR
->bases()) {
2520 if (I
.isVirtual()) {
2521 // Ignore virtual bases; if we're laying out for a complete
2522 // object, we'll lay these out later.
2526 const CXXRecordDecl
*base
=
2527 cast
<CXXRecordDecl
>(I
.getType()->castAs
<RecordType
>()->getDecl());
2529 // Ignore empty bases.
2530 if (isEmptyRecordForLayout(CGM
.getContext(), I
.getType()) ||
2532 .getASTRecordLayout(base
)
2533 .getNonVirtualSize()
2537 unsigned fieldIndex
= layout
.getNonVirtualBaseLLVMFieldNo(base
);
2538 llvm::Type
*baseType
= structure
->getElementType(fieldIndex
);
2539 elements
[fieldIndex
] = EmitNullConstantForBase(CGM
, baseType
, base
);
2543 // Fill in all the fields.
2544 for (const auto *Field
: record
->fields()) {
2545 // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
2546 // will fill in later.)
2547 if (!Field
->isBitField() &&
2548 !isEmptyFieldForLayout(CGM
.getContext(), Field
)) {
2549 unsigned fieldIndex
= layout
.getLLVMFieldNo(Field
);
2550 elements
[fieldIndex
] = CGM
.EmitNullConstant(Field
->getType());
2553 // For unions, stop after the first named field.
2554 if (record
->isUnion()) {
2555 if (Field
->getIdentifier())
2557 if (const auto *FieldRD
= Field
->getType()->getAsRecordDecl())
2558 if (FieldRD
->findFirstNamedDataMember())
2563 // Fill in the virtual bases, if we're working with the complete object.
2564 if (CXXR
&& asCompleteObject
) {
2565 for (const auto &I
: CXXR
->vbases()) {
2566 const CXXRecordDecl
*base
=
2567 cast
<CXXRecordDecl
>(I
.getType()->castAs
<RecordType
>()->getDecl());
2569 // Ignore empty bases.
2570 if (isEmptyRecordForLayout(CGM
.getContext(), I
.getType()))
2573 unsigned fieldIndex
= layout
.getVirtualBaseIndex(base
);
2575 // We might have already laid this field out.
2576 if (elements
[fieldIndex
]) continue;
2578 llvm::Type
*baseType
= structure
->getElementType(fieldIndex
);
2579 elements
[fieldIndex
] = EmitNullConstantForBase(CGM
, baseType
, base
);
2583 // Now go through all other fields and zero them out.
2584 for (unsigned i
= 0; i
!= numElements
; ++i
) {
2586 elements
[i
] = llvm::Constant::getNullValue(structure
->getElementType(i
));
2589 return llvm::ConstantStruct::get(structure
, elements
);
2592 /// Emit the null constant for a base subobject.
2593 static llvm::Constant
*EmitNullConstantForBase(CodeGenModule
&CGM
,
2594 llvm::Type
*baseType
,
2595 const CXXRecordDecl
*base
) {
2596 const CGRecordLayout
&baseLayout
= CGM
.getTypes().getCGRecordLayout(base
);
2598 // Just zero out bases that don't have any pointer to data members.
2599 if (baseLayout
.isZeroInitializableAsBase())
2600 return llvm::Constant::getNullValue(baseType
);
2602 // Otherwise, we can just use its null constant.
2603 return EmitNullConstant(CGM
, base
, /*asCompleteObject=*/false);
2606 llvm::Constant
*ConstantEmitter::emitNullForMemory(CodeGenModule
&CGM
,
2608 return emitForMemory(CGM
, CGM
.EmitNullConstant(T
), T
);
2611 llvm::Constant
*CodeGenModule::EmitNullConstant(QualType T
) {
2612 if (T
->getAs
<PointerType
>())
2613 return getNullPointer(
2614 cast
<llvm::PointerType
>(getTypes().ConvertTypeForMem(T
)), T
);
2616 if (getTypes().isZeroInitializable(T
))
2617 return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T
));
2619 if (const ConstantArrayType
*CAT
= Context
.getAsConstantArrayType(T
)) {
2620 llvm::ArrayType
*ATy
=
2621 cast
<llvm::ArrayType
>(getTypes().ConvertTypeForMem(T
));
2623 QualType ElementTy
= CAT
->getElementType();
2625 llvm::Constant
*Element
=
2626 ConstantEmitter::emitNullForMemory(*this, ElementTy
);
2627 unsigned NumElements
= CAT
->getZExtSize();
2628 SmallVector
<llvm::Constant
*, 8> Array(NumElements
, Element
);
2629 return llvm::ConstantArray::get(ATy
, Array
);
2632 if (const RecordType
*RT
= T
->getAs
<RecordType
>())
2633 return ::EmitNullConstant(*this, RT
->getDecl(), /*complete object*/ true);
2635 assert(T
->isMemberDataPointerType() &&
2636 "Should only see pointers to data members here!");
2638 return getCXXABI().EmitNullMemberPointer(T
->castAs
<MemberPointerType
>());
2642 CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl
*Record
) {
2643 return ::EmitNullConstant(*this, Record
, false);