1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implementation of the abstract lowering for the Swift calling convention.
11 //===----------------------------------------------------------------------===//
13 #include "clang/CodeGen/SwiftCallingConv.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 #include "clang/Basic/TargetInfo.h"
20 using namespace clang
;
21 using namespace CodeGen
;
22 using namespace swiftcall
;
24 static const SwiftABIInfo
&getSwiftABIInfo(CodeGenModule
&CGM
) {
25 return CGM
.getTargetCodeGenInfo().getSwiftABIInfo();
28 static bool isPowerOf2(unsigned n
) {
32 /// Given two types with the same size, try to find a common type.
33 static llvm::Type
*getCommonType(llvm::Type
*first
, llvm::Type
*second
) {
34 assert(first
!= second
);
36 // Allow pointers to merge with integers, but prefer the integer type.
37 if (first
->isIntegerTy()) {
38 if (second
->isPointerTy()) return first
;
39 } else if (first
->isPointerTy()) {
40 if (second
->isIntegerTy()) return second
;
41 if (second
->isPointerTy()) return first
;
43 // Allow two vectors to be merged (given that they have the same size).
44 // This assumes that we never have two different vector register sets.
45 } else if (auto firstVecTy
= dyn_cast
<llvm::VectorType
>(first
)) {
46 if (auto secondVecTy
= dyn_cast
<llvm::VectorType
>(second
)) {
47 if (auto commonTy
= getCommonType(firstVecTy
->getElementType(),
48 secondVecTy
->getElementType())) {
49 return (commonTy
== firstVecTy
->getElementType() ? first
: second
);
57 static CharUnits
getTypeStoreSize(CodeGenModule
&CGM
, llvm::Type
*type
) {
58 return CharUnits::fromQuantity(CGM
.getDataLayout().getTypeStoreSize(type
));
61 static CharUnits
getTypeAllocSize(CodeGenModule
&CGM
, llvm::Type
*type
) {
62 return CharUnits::fromQuantity(CGM
.getDataLayout().getTypeAllocSize(type
));
65 void SwiftAggLowering::addTypedData(QualType type
, CharUnits begin
) {
66 // Deal with various aggregate types as special cases:
69 if (auto recType
= type
->getAs
<RecordType
>()) {
70 addTypedData(recType
->getDecl(), begin
);
73 } else if (type
->isArrayType()) {
74 // Incomplete array types (flexible array members?) don't provide
75 // data to lay out, and the other cases shouldn't be possible.
76 auto arrayType
= CGM
.getContext().getAsConstantArrayType(type
);
77 if (!arrayType
) return;
79 QualType eltType
= arrayType
->getElementType();
80 auto eltSize
= CGM
.getContext().getTypeSizeInChars(eltType
);
81 for (uint64_t i
= 0, e
= arrayType
->getSize().getZExtValue(); i
!= e
; ++i
) {
82 addTypedData(eltType
, begin
+ i
* eltSize
);
86 } else if (auto complexType
= type
->getAs
<ComplexType
>()) {
87 auto eltType
= complexType
->getElementType();
88 auto eltSize
= CGM
.getContext().getTypeSizeInChars(eltType
);
89 auto eltLLVMType
= CGM
.getTypes().ConvertType(eltType
);
90 addTypedData(eltLLVMType
, begin
, begin
+ eltSize
);
91 addTypedData(eltLLVMType
, begin
+ eltSize
, begin
+ 2 * eltSize
);
93 // Member pointer types.
94 } else if (type
->getAs
<MemberPointerType
>()) {
95 // Just add it all as opaque.
96 addOpaqueData(begin
, begin
+ CGM
.getContext().getTypeSizeInChars(type
));
99 } else if (const auto *atomicType
= type
->getAs
<AtomicType
>()) {
100 auto valueType
= atomicType
->getValueType();
101 auto atomicSize
= CGM
.getContext().getTypeSizeInChars(atomicType
);
102 auto valueSize
= CGM
.getContext().getTypeSizeInChars(valueType
);
104 addTypedData(atomicType
->getValueType(), begin
);
106 // Add atomic padding.
107 auto atomicPadding
= atomicSize
- valueSize
;
108 if (atomicPadding
> CharUnits::Zero())
109 addOpaqueData(begin
+ valueSize
, begin
+ atomicSize
);
111 // Everything else is scalar and should not convert as an LLVM aggregate.
113 // We intentionally convert as !ForMem because we want to preserve
114 // that a type was an i1.
115 auto *llvmType
= CGM
.getTypes().ConvertType(type
);
116 addTypedData(llvmType
, begin
);
120 void SwiftAggLowering::addTypedData(const RecordDecl
*record
, CharUnits begin
) {
121 addTypedData(record
, begin
, CGM
.getContext().getASTRecordLayout(record
));
124 void SwiftAggLowering::addTypedData(const RecordDecl
*record
, CharUnits begin
,
125 const ASTRecordLayout
&layout
) {
126 // Unions are a special case.
127 if (record
->isUnion()) {
128 for (auto *field
: record
->fields()) {
129 if (field
->isBitField()) {
130 addBitFieldData(field
, begin
, 0);
132 addTypedData(field
->getType(), begin
);
138 // Note that correctness does not rely on us adding things in
139 // their actual order of layout; it's just somewhat more efficient
142 // With that in mind, add "early" C++ data.
143 auto cxxRecord
= dyn_cast
<CXXRecordDecl
>(record
);
145 // - a v-table pointer, if the class adds its own
146 if (layout
.hasOwnVFPtr()) {
147 addTypedData(CGM
.Int8PtrTy
, begin
);
150 // - non-virtual bases
151 for (auto &baseSpecifier
: cxxRecord
->bases()) {
152 if (baseSpecifier
.isVirtual()) continue;
154 auto baseRecord
= baseSpecifier
.getType()->getAsCXXRecordDecl();
155 addTypedData(baseRecord
, begin
+ layout
.getBaseClassOffset(baseRecord
));
158 // - a vbptr if the class adds its own
159 if (layout
.hasOwnVBPtr()) {
160 addTypedData(CGM
.Int8PtrTy
, begin
+ layout
.getVBPtrOffset());
165 for (auto *field
: record
->fields()) {
166 auto fieldOffsetInBits
= layout
.getFieldOffset(field
->getFieldIndex());
167 if (field
->isBitField()) {
168 addBitFieldData(field
, begin
, fieldOffsetInBits
);
170 addTypedData(field
->getType(),
171 begin
+ CGM
.getContext().toCharUnitsFromBits(fieldOffsetInBits
));
175 // Add "late" C++ data:
178 for (auto &vbaseSpecifier
: cxxRecord
->vbases()) {
179 auto baseRecord
= vbaseSpecifier
.getType()->getAsCXXRecordDecl();
180 addTypedData(baseRecord
, begin
+ layout
.getVBaseClassOffset(baseRecord
));
185 void SwiftAggLowering::addBitFieldData(const FieldDecl
*bitfield
,
186 CharUnits recordBegin
,
187 uint64_t bitfieldBitBegin
) {
188 assert(bitfield
->isBitField());
189 auto &ctx
= CGM
.getContext();
190 auto width
= bitfield
->getBitWidthValue(ctx
);
192 // We can ignore zero-width bit-fields.
193 if (width
== 0) return;
195 // toCharUnitsFromBits rounds down.
196 CharUnits bitfieldByteBegin
= ctx
.toCharUnitsFromBits(bitfieldBitBegin
);
198 // Find the offset of the last byte that is partially occupied by the
199 // bit-field; since we otherwise expect exclusive ends, the end is the
201 uint64_t bitfieldBitLast
= bitfieldBitBegin
+ width
- 1;
202 CharUnits bitfieldByteEnd
=
203 ctx
.toCharUnitsFromBits(bitfieldBitLast
) + CharUnits::One();
204 addOpaqueData(recordBegin
+ bitfieldByteBegin
,
205 recordBegin
+ bitfieldByteEnd
);
208 void SwiftAggLowering::addTypedData(llvm::Type
*type
, CharUnits begin
) {
209 assert(type
&& "didn't provide type for typed data");
210 addTypedData(type
, begin
, begin
+ getTypeStoreSize(CGM
, type
));
213 void SwiftAggLowering::addTypedData(llvm::Type
*type
,
214 CharUnits begin
, CharUnits end
) {
215 assert(type
&& "didn't provide type for typed data");
216 assert(getTypeStoreSize(CGM
, type
) == end
- begin
);
218 // Legalize vector types.
219 if (auto vecTy
= dyn_cast
<llvm::VectorType
>(type
)) {
220 SmallVector
<llvm::Type
*, 4> componentTys
;
221 legalizeVectorType(CGM
, end
- begin
, vecTy
, componentTys
);
222 assert(componentTys
.size() >= 1);
224 // Walk the initial components.
225 for (size_t i
= 0, e
= componentTys
.size(); i
!= e
- 1; ++i
) {
226 llvm::Type
*componentTy
= componentTys
[i
];
227 auto componentSize
= getTypeStoreSize(CGM
, componentTy
);
228 assert(componentSize
< end
- begin
);
229 addLegalTypedData(componentTy
, begin
, begin
+ componentSize
);
230 begin
+= componentSize
;
233 return addLegalTypedData(componentTys
.back(), begin
, end
);
236 // Legalize integer types.
237 if (auto intTy
= dyn_cast
<llvm::IntegerType
>(type
)) {
238 if (!isLegalIntegerType(CGM
, intTy
))
239 return addOpaqueData(begin
, end
);
242 // All other types should be legal.
243 return addLegalTypedData(type
, begin
, end
);
246 void SwiftAggLowering::addLegalTypedData(llvm::Type
*type
,
247 CharUnits begin
, CharUnits end
) {
248 // Require the type to be naturally aligned.
249 if (!begin
.isZero() && !begin
.isMultipleOf(getNaturalAlignment(CGM
, type
))) {
251 // Try splitting vector types.
252 if (auto vecTy
= dyn_cast
<llvm::VectorType
>(type
)) {
253 auto split
= splitLegalVectorType(CGM
, end
- begin
, vecTy
);
254 auto eltTy
= split
.first
;
255 auto numElts
= split
.second
;
257 auto eltSize
= (end
- begin
) / numElts
;
258 assert(eltSize
== getTypeStoreSize(CGM
, eltTy
));
259 for (size_t i
= 0, e
= numElts
; i
!= e
; ++i
) {
260 addLegalTypedData(eltTy
, begin
, begin
+ eltSize
);
263 assert(begin
== end
);
267 return addOpaqueData(begin
, end
);
270 addEntry(type
, begin
, end
);
273 void SwiftAggLowering::addEntry(llvm::Type
*type
,
274 CharUnits begin
, CharUnits end
) {
276 (!isa
<llvm::StructType
>(type
) && !isa
<llvm::ArrayType
>(type
))) &&
277 "cannot add aggregate-typed data");
278 assert(!type
|| begin
.isMultipleOf(getNaturalAlignment(CGM
, type
)));
280 // Fast path: we can just add entries to the end.
281 if (Entries
.empty() || Entries
.back().End
<= begin
) {
282 Entries
.push_back({begin
, end
, type
});
286 // Find the first existing entry that ends after the start of the new data.
287 // TODO: do a binary search if Entries is big enough for it to matter.
288 size_t index
= Entries
.size() - 1;
290 if (Entries
[index
- 1].End
<= begin
) break;
294 // The entry ends after the start of the new data.
295 // If the entry starts after the end of the new data, there's no conflict.
296 if (Entries
[index
].Begin
>= end
) {
297 // This insertion is potentially O(n), but the way we generally build
298 // these layouts makes that unlikely to matter: we'd need a union of
299 // several very large types.
300 Entries
.insert(Entries
.begin() + index
, {begin
, end
, type
});
304 // Otherwise, the ranges overlap. The new range might also overlap
305 // with later ranges.
308 // Simplest case: an exact overlap.
309 if (Entries
[index
].Begin
== begin
&& Entries
[index
].End
== end
) {
310 // If the types match exactly, great.
311 if (Entries
[index
].Type
== type
) return;
313 // If either type is opaque, make the entry opaque and return.
314 if (Entries
[index
].Type
== nullptr) {
316 } else if (type
== nullptr) {
317 Entries
[index
].Type
= nullptr;
321 // If they disagree in an ABI-agnostic way, just resolve the conflict
323 if (auto entryType
= getCommonType(Entries
[index
].Type
, type
)) {
324 Entries
[index
].Type
= entryType
;
328 // Otherwise, make the entry opaque.
329 Entries
[index
].Type
= nullptr;
333 // Okay, we have an overlapping conflict of some sort.
335 // If we have a vector type, split it.
336 if (auto vecTy
= dyn_cast_or_null
<llvm::VectorType
>(type
)) {
337 auto eltTy
= vecTy
->getElementType();
339 (end
- begin
) / cast
<llvm::FixedVectorType
>(vecTy
)->getNumElements();
340 assert(eltSize
== getTypeStoreSize(CGM
, eltTy
));
342 e
= cast
<llvm::FixedVectorType
>(vecTy
)->getNumElements();
344 addEntry(eltTy
, begin
, begin
+ eltSize
);
347 assert(begin
== end
);
351 // If the entry is a vector type, split it and try again.
352 if (Entries
[index
].Type
&& Entries
[index
].Type
->isVectorTy()) {
353 splitVectorEntry(index
);
354 goto restartAfterSplit
;
357 // Okay, we have no choice but to make the existing entry opaque.
359 Entries
[index
].Type
= nullptr;
361 // Stretch the start of the entry to the beginning of the range.
362 if (begin
< Entries
[index
].Begin
) {
363 Entries
[index
].Begin
= begin
;
364 assert(index
== 0 || begin
>= Entries
[index
- 1].End
);
367 // Stretch the end of the entry to the end of the range; but if we run
368 // into the start of the next entry, just leave the range there and repeat.
369 while (end
> Entries
[index
].End
) {
370 assert(Entries
[index
].Type
== nullptr);
372 // If the range doesn't overlap the next entry, we're done.
373 if (index
== Entries
.size() - 1 || end
<= Entries
[index
+ 1].Begin
) {
374 Entries
[index
].End
= end
;
378 // Otherwise, stretch to the start of the next entry.
379 Entries
[index
].End
= Entries
[index
+ 1].Begin
;
381 // Continue with the next entry.
384 // This entry needs to be made opaque if it is not already.
385 if (Entries
[index
].Type
== nullptr)
388 // Split vector entries unless we completely subsume them.
389 if (Entries
[index
].Type
->isVectorTy() &&
390 end
< Entries
[index
].End
) {
391 splitVectorEntry(index
);
394 // Make the entry opaque.
395 Entries
[index
].Type
= nullptr;
399 /// Replace the entry of vector type at offset 'index' with a sequence
400 /// of its component vectors.
401 void SwiftAggLowering::splitVectorEntry(unsigned index
) {
402 auto vecTy
= cast
<llvm::VectorType
>(Entries
[index
].Type
);
403 auto split
= splitLegalVectorType(CGM
, Entries
[index
].getWidth(), vecTy
);
405 auto eltTy
= split
.first
;
406 CharUnits eltSize
= getTypeStoreSize(CGM
, eltTy
);
407 auto numElts
= split
.second
;
408 Entries
.insert(Entries
.begin() + index
+ 1, numElts
- 1, StorageEntry());
410 CharUnits begin
= Entries
[index
].Begin
;
411 for (unsigned i
= 0; i
!= numElts
; ++i
) {
412 unsigned idx
= index
+ i
;
413 Entries
[idx
].Type
= eltTy
;
414 Entries
[idx
].Begin
= begin
;
415 Entries
[idx
].End
= begin
+ eltSize
;
420 /// Given a power-of-two unit size, return the offset of the aligned unit
421 /// of that size which contains the given offset.
423 /// In other words, round down to the nearest multiple of the unit size.
424 static CharUnits
getOffsetAtStartOfUnit(CharUnits offset
, CharUnits unitSize
) {
425 assert(isPowerOf2(unitSize
.getQuantity()));
426 auto unitMask
= ~(unitSize
.getQuantity() - 1);
427 return CharUnits::fromQuantity(offset
.getQuantity() & unitMask
);
430 static bool areBytesInSameUnit(CharUnits first
, CharUnits second
,
431 CharUnits chunkSize
) {
432 return getOffsetAtStartOfUnit(first
, chunkSize
)
433 == getOffsetAtStartOfUnit(second
, chunkSize
);
436 static bool isMergeableEntryType(llvm::Type
*type
) {
437 // Opaquely-typed memory is always mergeable.
438 if (type
== nullptr) return true;
440 // Pointers and integers are always mergeable. In theory we should not
441 // merge pointers, but (1) it doesn't currently matter in practice because
442 // the chunk size is never greater than the size of a pointer and (2)
443 // Swift IRGen uses integer types for a lot of things that are "really"
444 // just storing pointers (like std::optional<SomePointer>). If we ever have a
445 // target that would otherwise combine pointers, we should put some effort
446 // into fixing those cases in Swift IRGen and then call out pointer types
449 // Floating-point and vector types should never be merged.
450 // Most such types are too large and highly-aligned to ever trigger merging
451 // in practice, but it's important for the rule to cover at least 'half'
452 // and 'float', as well as things like small vectors of 'i1' or 'i8'.
453 return (!type
->isFloatingPointTy() && !type
->isVectorTy());
456 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry
&first
,
457 const StorageEntry
&second
,
458 CharUnits chunkSize
) {
459 // Only merge entries that overlap the same chunk. We test this first
460 // despite being a bit more expensive because this is the condition that
461 // tends to prevent merging.
462 if (!areBytesInSameUnit(first
.End
- CharUnits::One(), second
.Begin
,
466 return (isMergeableEntryType(first
.Type
) &&
467 isMergeableEntryType(second
.Type
));
470 void SwiftAggLowering::finish() {
471 if (Entries
.empty()) {
476 // We logically split the layout down into a series of chunks of this size,
477 // which is generally the size of a pointer.
478 const CharUnits chunkSize
= getMaximumVoluntaryIntegerSize(CGM
);
480 // First pass: if two entries should be merged, make them both opaque
481 // and stretch one to meet the next.
482 // Also, remember if there are any opaque entries.
483 bool hasOpaqueEntries
= (Entries
[0].Type
== nullptr);
484 for (size_t i
= 1, e
= Entries
.size(); i
!= e
; ++i
) {
485 if (shouldMergeEntries(Entries
[i
- 1], Entries
[i
], chunkSize
)) {
486 Entries
[i
- 1].Type
= nullptr;
487 Entries
[i
].Type
= nullptr;
488 Entries
[i
- 1].End
= Entries
[i
].Begin
;
489 hasOpaqueEntries
= true;
491 } else if (Entries
[i
].Type
== nullptr) {
492 hasOpaqueEntries
= true;
496 // The rest of the algorithm leaves non-opaque entries alone, so if we
497 // have no opaque entries, we're done.
498 if (!hasOpaqueEntries
) {
503 // Okay, move the entries to a temporary and rebuild Entries.
504 auto orig
= std::move(Entries
);
505 assert(Entries
.empty());
507 for (size_t i
= 0, e
= orig
.size(); i
!= e
; ++i
) {
508 // Just copy over non-opaque entries.
509 if (orig
[i
].Type
!= nullptr) {
510 Entries
.push_back(orig
[i
]);
514 // Scan forward to determine the full extent of the next opaque range.
515 // We know from the first pass that only contiguous ranges will overlap
516 // the same aligned chunk.
517 auto begin
= orig
[i
].Begin
;
518 auto end
= orig
[i
].End
;
520 orig
[i
+ 1].Type
== nullptr &&
521 end
== orig
[i
+ 1].Begin
) {
522 end
= orig
[i
+ 1].End
;
526 // Add an entry per intersected chunk.
528 // Find the smallest aligned storage unit in the maximal aligned
529 // storage unit containing 'begin' that contains all the bytes in
530 // the intersection between the range and this chunk.
531 CharUnits localBegin
= begin
;
532 CharUnits chunkBegin
= getOffsetAtStartOfUnit(localBegin
, chunkSize
);
533 CharUnits chunkEnd
= chunkBegin
+ chunkSize
;
534 CharUnits localEnd
= std::min(end
, chunkEnd
);
536 // Just do a simple loop over ever-increasing unit sizes.
537 CharUnits unitSize
= CharUnits::One();
538 CharUnits unitBegin
, unitEnd
;
539 for (; ; unitSize
*= 2) {
540 assert(unitSize
<= chunkSize
);
541 unitBegin
= getOffsetAtStartOfUnit(localBegin
, unitSize
);
542 unitEnd
= unitBegin
+ unitSize
;
543 if (unitEnd
>= localEnd
) break;
546 // Add an entry for this unit.
548 llvm::IntegerType::get(CGM
.getLLVMContext(),
549 CGM
.getContext().toBits(unitSize
));
550 Entries
.push_back({unitBegin
, unitEnd
, entryTy
});
552 // The next chunk starts where this chunk left off.
554 } while (begin
!= end
);
557 // Okay, finally finished.
561 void SwiftAggLowering::enumerateComponents(EnumerationCallback callback
) const {
562 assert(Finished
&& "haven't yet finished lowering");
564 for (auto &entry
: Entries
) {
565 callback(entry
.Begin
, entry
.End
, entry
.Type
);
569 std::pair
<llvm::StructType
*, llvm::Type
*>
570 SwiftAggLowering::getCoerceAndExpandTypes() const {
571 assert(Finished
&& "haven't yet finished lowering");
573 auto &ctx
= CGM
.getLLVMContext();
575 if (Entries
.empty()) {
576 auto type
= llvm::StructType::get(ctx
);
577 return { type
, type
};
580 SmallVector
<llvm::Type
*, 8> elts
;
581 CharUnits lastEnd
= CharUnits::Zero();
582 bool hasPadding
= false;
584 for (auto &entry
: Entries
) {
585 if (entry
.Begin
!= lastEnd
) {
586 auto paddingSize
= entry
.Begin
- lastEnd
;
587 assert(!paddingSize
.isNegative());
589 auto padding
= llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx
),
590 paddingSize
.getQuantity());
591 elts
.push_back(padding
);
595 if (!packed
&& !entry
.Begin
.isMultipleOf(CharUnits::fromQuantity(
596 CGM
.getDataLayout().getABITypeAlign(entry
.Type
))))
599 elts
.push_back(entry
.Type
);
601 lastEnd
= entry
.Begin
+ getTypeAllocSize(CGM
, entry
.Type
);
602 assert(entry
.End
<= lastEnd
);
605 // We don't need to adjust 'packed' to deal with possible tail padding
606 // because we never do that kind of access through the coercion type.
607 auto coercionType
= llvm::StructType::get(ctx
, elts
, packed
);
609 llvm::Type
*unpaddedType
= coercionType
;
612 for (auto &entry
: Entries
) {
613 elts
.push_back(entry
.Type
);
615 if (elts
.size() == 1) {
616 unpaddedType
= elts
[0];
618 unpaddedType
= llvm::StructType::get(ctx
, elts
, /*packed*/ false);
620 } else if (Entries
.size() == 1) {
621 unpaddedType
= Entries
[0].Type
;
624 return { coercionType
, unpaddedType
};
627 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue
) const {
628 assert(Finished
&& "haven't yet finished lowering");
630 // Empty types don't need to be passed indirectly.
631 if (Entries
.empty()) return false;
633 // Avoid copying the array of types when there's just a single element.
634 if (Entries
.size() == 1) {
635 return getSwiftABIInfo(CGM
).shouldPassIndirectly(Entries
.back().Type
,
639 SmallVector
<llvm::Type
*, 8> componentTys
;
640 componentTys
.reserve(Entries
.size());
641 for (auto &entry
: Entries
) {
642 componentTys
.push_back(entry
.Type
);
644 return getSwiftABIInfo(CGM
).shouldPassIndirectly(componentTys
, asReturnValue
);
647 bool swiftcall::shouldPassIndirectly(CodeGenModule
&CGM
,
648 ArrayRef
<llvm::Type
*> componentTys
,
649 bool asReturnValue
) {
650 return getSwiftABIInfo(CGM
).shouldPassIndirectly(componentTys
, asReturnValue
);
653 CharUnits
swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule
&CGM
) {
654 // Currently always the size of an ordinary pointer.
655 return CGM
.getContext().toCharUnitsFromBits(
656 CGM
.getContext().getTargetInfo().getPointerWidth(LangAS::Default
));
659 CharUnits
swiftcall::getNaturalAlignment(CodeGenModule
&CGM
, llvm::Type
*type
) {
660 // For Swift's purposes, this is always just the store size of the type
661 // rounded up to a power of 2.
662 auto size
= (unsigned long long) getTypeStoreSize(CGM
, type
).getQuantity();
663 size
= llvm::bit_ceil(size
);
664 assert(CGM
.getDataLayout().getABITypeAlign(type
) <= size
);
665 return CharUnits::fromQuantity(size
);
668 bool swiftcall::isLegalIntegerType(CodeGenModule
&CGM
,
669 llvm::IntegerType
*intTy
) {
670 auto size
= intTy
->getBitWidth();
677 // Just assume that the above are always legal.
681 return CGM
.getContext().getTargetInfo().hasInt128Type();
688 bool swiftcall::isLegalVectorType(CodeGenModule
&CGM
, CharUnits vectorSize
,
689 llvm::VectorType
*vectorTy
) {
690 return isLegalVectorType(
691 CGM
, vectorSize
, vectorTy
->getElementType(),
692 cast
<llvm::FixedVectorType
>(vectorTy
)->getNumElements());
695 bool swiftcall::isLegalVectorType(CodeGenModule
&CGM
, CharUnits vectorSize
,
696 llvm::Type
*eltTy
, unsigned numElts
) {
697 assert(numElts
> 1 && "illegal vector length");
698 return getSwiftABIInfo(CGM
).isLegalVectorType(vectorSize
, eltTy
, numElts
);
701 std::pair
<llvm::Type
*, unsigned>
702 swiftcall::splitLegalVectorType(CodeGenModule
&CGM
, CharUnits vectorSize
,
703 llvm::VectorType
*vectorTy
) {
704 auto numElts
= cast
<llvm::FixedVectorType
>(vectorTy
)->getNumElements();
705 auto eltTy
= vectorTy
->getElementType();
707 // Try to split the vector type in half.
708 if (numElts
>= 4 && isPowerOf2(numElts
)) {
709 if (isLegalVectorType(CGM
, vectorSize
/ 2, eltTy
, numElts
/ 2))
710 return {llvm::FixedVectorType::get(eltTy
, numElts
/ 2), 2};
713 return {eltTy
, numElts
};
716 void swiftcall::legalizeVectorType(CodeGenModule
&CGM
, CharUnits origVectorSize
,
717 llvm::VectorType
*origVectorTy
,
718 llvm::SmallVectorImpl
<llvm::Type
*> &components
) {
719 // If it's already a legal vector type, use it.
720 if (isLegalVectorType(CGM
, origVectorSize
, origVectorTy
)) {
721 components
.push_back(origVectorTy
);
725 // Try to split the vector into legal subvectors.
726 auto numElts
= cast
<llvm::FixedVectorType
>(origVectorTy
)->getNumElements();
727 auto eltTy
= origVectorTy
->getElementType();
728 assert(numElts
!= 1);
730 // The largest size that we're still considering making subvectors of.
731 // Always a power of 2.
732 unsigned logCandidateNumElts
= llvm::Log2_32(numElts
);
733 unsigned candidateNumElts
= 1U << logCandidateNumElts
;
734 assert(candidateNumElts
<= numElts
&& candidateNumElts
* 2 > numElts
);
736 // Minor optimization: don't check the legality of this exact size twice.
737 if (candidateNumElts
== numElts
) {
738 logCandidateNumElts
--;
739 candidateNumElts
>>= 1;
742 CharUnits eltSize
= (origVectorSize
/ numElts
);
743 CharUnits candidateSize
= eltSize
* candidateNumElts
;
745 // The sensibility of this algorithm relies on the fact that we never
746 // have a legal non-power-of-2 vector size without having the power of 2
748 while (logCandidateNumElts
> 0) {
749 assert(candidateNumElts
== 1U << logCandidateNumElts
);
750 assert(candidateNumElts
<= numElts
);
751 assert(candidateSize
== eltSize
* candidateNumElts
);
753 // Skip illegal vector sizes.
754 if (!isLegalVectorType(CGM
, candidateSize
, eltTy
, candidateNumElts
)) {
755 logCandidateNumElts
--;
756 candidateNumElts
/= 2;
761 // Add the right number of vectors of this size.
762 auto numVecs
= numElts
>> logCandidateNumElts
;
763 components
.append(numVecs
,
764 llvm::FixedVectorType::get(eltTy
, candidateNumElts
));
765 numElts
-= (numVecs
<< logCandidateNumElts
);
767 if (numElts
== 0) return;
769 // It's possible that the number of elements remaining will be legal.
770 // This can happen with e.g. <7 x float> when <3 x float> is legal.
771 // This only needs to be separately checked if it's not a power of 2.
772 if (numElts
> 2 && !isPowerOf2(numElts
) &&
773 isLegalVectorType(CGM
, eltSize
* numElts
, eltTy
, numElts
)) {
774 components
.push_back(llvm::FixedVectorType::get(eltTy
, numElts
));
778 // Bring vecSize down to something no larger than numElts.
780 logCandidateNumElts
--;
781 candidateNumElts
/= 2;
783 } while (candidateNumElts
> numElts
);
786 // Otherwise, just append a bunch of individual elements.
787 components
.append(numElts
, eltTy
);
790 bool swiftcall::mustPassRecordIndirectly(CodeGenModule
&CGM
,
791 const RecordDecl
*record
) {
792 // FIXME: should we not rely on the standard computation in Sema, just in
793 // case we want to diverge from the platform ABI (e.g. on targets where
794 // that uses the MSVC rule)?
795 return !record
->canPassInRegisters();
798 static ABIArgInfo
classifyExpandedType(SwiftAggLowering
&lowering
,
800 CharUnits alignmentForIndirect
) {
801 if (lowering
.empty()) {
802 return ABIArgInfo::getIgnore();
803 } else if (lowering
.shouldPassIndirectly(forReturn
)) {
804 return ABIArgInfo::getIndirect(alignmentForIndirect
, /*byval*/ false);
806 auto types
= lowering
.getCoerceAndExpandTypes();
807 return ABIArgInfo::getCoerceAndExpand(types
.first
, types
.second
);
811 static ABIArgInfo
classifyType(CodeGenModule
&CGM
, CanQualType type
,
813 if (auto recordType
= dyn_cast
<RecordType
>(type
)) {
814 auto record
= recordType
->getDecl();
815 auto &layout
= CGM
.getContext().getASTRecordLayout(record
);
817 if (mustPassRecordIndirectly(CGM
, record
))
818 return ABIArgInfo::getIndirect(layout
.getAlignment(), /*byval*/ false);
820 SwiftAggLowering
lowering(CGM
);
821 lowering
.addTypedData(recordType
->getDecl(), CharUnits::Zero(), layout
);
824 return classifyExpandedType(lowering
, forReturn
, layout
.getAlignment());
827 // Just assume that all of our target ABIs can support returning at least
828 // two integer or floating-point values.
829 if (isa
<ComplexType
>(type
)) {
830 return (forReturn
? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
833 // Vector types may need to be legalized.
834 if (isa
<VectorType
>(type
)) {
835 SwiftAggLowering
lowering(CGM
);
836 lowering
.addTypedData(type
, CharUnits::Zero());
839 CharUnits alignment
= CGM
.getContext().getTypeAlignInChars(type
);
840 return classifyExpandedType(lowering
, forReturn
, alignment
);
843 // Member pointer types need to be expanded, but it's a simple form of
844 // expansion that 'Direct' can handle. Note that CanBeFlattened should be
845 // true for this to work.
847 // 'void' needs to be ignored.
848 if (type
->isVoidType()) {
849 return ABIArgInfo::getIgnore();
852 // Everything else can be passed directly.
853 return ABIArgInfo::getDirect();
856 ABIArgInfo
swiftcall::classifyReturnType(CodeGenModule
&CGM
, CanQualType type
) {
857 return classifyType(CGM
, type
, /*forReturn*/ true);
860 ABIArgInfo
swiftcall::classifyArgumentType(CodeGenModule
&CGM
,
862 return classifyType(CGM
, type
, /*forReturn*/ false);
865 void swiftcall::computeABIInfo(CodeGenModule
&CGM
, CGFunctionInfo
&FI
) {
866 auto &retInfo
= FI
.getReturnInfo();
867 retInfo
= classifyReturnType(CGM
, FI
.getReturnType());
869 for (unsigned i
= 0, e
= FI
.arg_size(); i
!= e
; ++i
) {
870 auto &argInfo
= FI
.arg_begin()[i
];
871 argInfo
.info
= classifyArgumentType(CGM
, argInfo
.type
);
875 // Is swifterror lowered to a register by the target ABI.
876 bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule
&CGM
) {
877 return getSwiftABIInfo(CGM
).isSwiftErrorInRegister();