[AMDGPU][AsmParser][NFC] Get rid of custom default operand handlers.
[llvm-project.git] / clang / lib / CodeGen / SwiftCallingConv.cpp
blob055dd3704386673f34415ef4a00b6d4a1ce3f95f
1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the abstract lowering for the Swift calling convention.
11 //===----------------------------------------------------------------------===//
13 #include "clang/CodeGen/SwiftCallingConv.h"
14 #include "ABIInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include <optional>
20 using namespace clang;
21 using namespace CodeGen;
22 using namespace swiftcall;
24 static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
25 return CGM.getTargetCodeGenInfo().getSwiftABIInfo();
28 static bool isPowerOf2(unsigned n) {
29 return n == (n & -n);
32 /// Given two types with the same size, try to find a common type.
33 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
34 assert(first != second);
36 // Allow pointers to merge with integers, but prefer the integer type.
37 if (first->isIntegerTy()) {
38 if (second->isPointerTy()) return first;
39 } else if (first->isPointerTy()) {
40 if (second->isIntegerTy()) return second;
41 if (second->isPointerTy()) return first;
43 // Allow two vectors to be merged (given that they have the same size).
44 // This assumes that we never have two different vector register sets.
45 } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
46 if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
47 if (auto commonTy = getCommonType(firstVecTy->getElementType(),
48 secondVecTy->getElementType())) {
49 return (commonTy == firstVecTy->getElementType() ? first : second);
54 return nullptr;
57 static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type) {
58 return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
61 static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type) {
62 return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
65 void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
66 // Deal with various aggregate types as special cases:
68 // Record types.
69 if (auto recType = type->getAs<RecordType>()) {
70 addTypedData(recType->getDecl(), begin);
72 // Array types.
73 } else if (type->isArrayType()) {
74 // Incomplete array types (flexible array members?) don't provide
75 // data to lay out, and the other cases shouldn't be possible.
76 auto arrayType = CGM.getContext().getAsConstantArrayType(type);
77 if (!arrayType) return;
79 QualType eltType = arrayType->getElementType();
80 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
81 for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
82 addTypedData(eltType, begin + i * eltSize);
85 // Complex types.
86 } else if (auto complexType = type->getAs<ComplexType>()) {
87 auto eltType = complexType->getElementType();
88 auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
89 auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
90 addTypedData(eltLLVMType, begin, begin + eltSize);
91 addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
93 // Member pointer types.
94 } else if (type->getAs<MemberPointerType>()) {
95 // Just add it all as opaque.
96 addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
98 // Atomic types.
99 } else if (const auto *atomicType = type->getAs<AtomicType>()) {
100 auto valueType = atomicType->getValueType();
101 auto atomicSize = CGM.getContext().getTypeSizeInChars(atomicType);
102 auto valueSize = CGM.getContext().getTypeSizeInChars(valueType);
104 addTypedData(atomicType->getValueType(), begin);
106 // Add atomic padding.
107 auto atomicPadding = atomicSize - valueSize;
108 if (atomicPadding > CharUnits::Zero())
109 addOpaqueData(begin + valueSize, begin + atomicSize);
111 // Everything else is scalar and should not convert as an LLVM aggregate.
112 } else {
113 // We intentionally convert as !ForMem because we want to preserve
114 // that a type was an i1.
115 auto *llvmType = CGM.getTypes().ConvertType(type);
116 addTypedData(llvmType, begin);
120 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin) {
121 addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
124 void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
125 const ASTRecordLayout &layout) {
126 // Unions are a special case.
127 if (record->isUnion()) {
128 for (auto *field : record->fields()) {
129 if (field->isBitField()) {
130 addBitFieldData(field, begin, 0);
131 } else {
132 addTypedData(field->getType(), begin);
135 return;
138 // Note that correctness does not rely on us adding things in
139 // their actual order of layout; it's just somewhat more efficient
140 // for the builder.
142 // With that in mind, add "early" C++ data.
143 auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
144 if (cxxRecord) {
145 // - a v-table pointer, if the class adds its own
146 if (layout.hasOwnVFPtr()) {
147 addTypedData(CGM.Int8PtrTy, begin);
150 // - non-virtual bases
151 for (auto &baseSpecifier : cxxRecord->bases()) {
152 if (baseSpecifier.isVirtual()) continue;
154 auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
155 addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
158 // - a vbptr if the class adds its own
159 if (layout.hasOwnVBPtr()) {
160 addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
164 // Add fields.
165 for (auto *field : record->fields()) {
166 auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
167 if (field->isBitField()) {
168 addBitFieldData(field, begin, fieldOffsetInBits);
169 } else {
170 addTypedData(field->getType(),
171 begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
175 // Add "late" C++ data:
176 if (cxxRecord) {
177 // - virtual bases
178 for (auto &vbaseSpecifier : cxxRecord->vbases()) {
179 auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
180 addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
185 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
186 CharUnits recordBegin,
187 uint64_t bitfieldBitBegin) {
188 assert(bitfield->isBitField());
189 auto &ctx = CGM.getContext();
190 auto width = bitfield->getBitWidthValue(ctx);
192 // We can ignore zero-width bit-fields.
193 if (width == 0) return;
195 // toCharUnitsFromBits rounds down.
196 CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
198 // Find the offset of the last byte that is partially occupied by the
199 // bit-field; since we otherwise expect exclusive ends, the end is the
200 // next byte.
201 uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
202 CharUnits bitfieldByteEnd =
203 ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
204 addOpaqueData(recordBegin + bitfieldByteBegin,
205 recordBegin + bitfieldByteEnd);
208 void SwiftAggLowering::addTypedData(llvm::Type *type, CharUnits begin) {
209 assert(type && "didn't provide type for typed data");
210 addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
213 void SwiftAggLowering::addTypedData(llvm::Type *type,
214 CharUnits begin, CharUnits end) {
215 assert(type && "didn't provide type for typed data");
216 assert(getTypeStoreSize(CGM, type) == end - begin);
218 // Legalize vector types.
219 if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
220 SmallVector<llvm::Type*, 4> componentTys;
221 legalizeVectorType(CGM, end - begin, vecTy, componentTys);
222 assert(componentTys.size() >= 1);
224 // Walk the initial components.
225 for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
226 llvm::Type *componentTy = componentTys[i];
227 auto componentSize = getTypeStoreSize(CGM, componentTy);
228 assert(componentSize < end - begin);
229 addLegalTypedData(componentTy, begin, begin + componentSize);
230 begin += componentSize;
233 return addLegalTypedData(componentTys.back(), begin, end);
236 // Legalize integer types.
237 if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
238 if (!isLegalIntegerType(CGM, intTy))
239 return addOpaqueData(begin, end);
242 // All other types should be legal.
243 return addLegalTypedData(type, begin, end);
246 void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
247 CharUnits begin, CharUnits end) {
248 // Require the type to be naturally aligned.
249 if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
251 // Try splitting vector types.
252 if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
253 auto split = splitLegalVectorType(CGM, end - begin, vecTy);
254 auto eltTy = split.first;
255 auto numElts = split.second;
257 auto eltSize = (end - begin) / numElts;
258 assert(eltSize == getTypeStoreSize(CGM, eltTy));
259 for (size_t i = 0, e = numElts; i != e; ++i) {
260 addLegalTypedData(eltTy, begin, begin + eltSize);
261 begin += eltSize;
263 assert(begin == end);
264 return;
267 return addOpaqueData(begin, end);
270 addEntry(type, begin, end);
273 void SwiftAggLowering::addEntry(llvm::Type *type,
274 CharUnits begin, CharUnits end) {
275 assert((!type ||
276 (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
277 "cannot add aggregate-typed data");
278 assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
280 // Fast path: we can just add entries to the end.
281 if (Entries.empty() || Entries.back().End <= begin) {
282 Entries.push_back({begin, end, type});
283 return;
286 // Find the first existing entry that ends after the start of the new data.
287 // TODO: do a binary search if Entries is big enough for it to matter.
288 size_t index = Entries.size() - 1;
289 while (index != 0) {
290 if (Entries[index - 1].End <= begin) break;
291 --index;
294 // The entry ends after the start of the new data.
295 // If the entry starts after the end of the new data, there's no conflict.
296 if (Entries[index].Begin >= end) {
297 // This insertion is potentially O(n), but the way we generally build
298 // these layouts makes that unlikely to matter: we'd need a union of
299 // several very large types.
300 Entries.insert(Entries.begin() + index, {begin, end, type});
301 return;
304 // Otherwise, the ranges overlap. The new range might also overlap
305 // with later ranges.
306 restartAfterSplit:
308 // Simplest case: an exact overlap.
309 if (Entries[index].Begin == begin && Entries[index].End == end) {
310 // If the types match exactly, great.
311 if (Entries[index].Type == type) return;
313 // If either type is opaque, make the entry opaque and return.
314 if (Entries[index].Type == nullptr) {
315 return;
316 } else if (type == nullptr) {
317 Entries[index].Type = nullptr;
318 return;
321 // If they disagree in an ABI-agnostic way, just resolve the conflict
322 // arbitrarily.
323 if (auto entryType = getCommonType(Entries[index].Type, type)) {
324 Entries[index].Type = entryType;
325 return;
328 // Otherwise, make the entry opaque.
329 Entries[index].Type = nullptr;
330 return;
333 // Okay, we have an overlapping conflict of some sort.
335 // If we have a vector type, split it.
336 if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
337 auto eltTy = vecTy->getElementType();
338 CharUnits eltSize =
339 (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
340 assert(eltSize == getTypeStoreSize(CGM, eltTy));
341 for (unsigned i = 0,
342 e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
343 i != e; ++i) {
344 addEntry(eltTy, begin, begin + eltSize);
345 begin += eltSize;
347 assert(begin == end);
348 return;
351 // If the entry is a vector type, split it and try again.
352 if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
353 splitVectorEntry(index);
354 goto restartAfterSplit;
357 // Okay, we have no choice but to make the existing entry opaque.
359 Entries[index].Type = nullptr;
361 // Stretch the start of the entry to the beginning of the range.
362 if (begin < Entries[index].Begin) {
363 Entries[index].Begin = begin;
364 assert(index == 0 || begin >= Entries[index - 1].End);
367 // Stretch the end of the entry to the end of the range; but if we run
368 // into the start of the next entry, just leave the range there and repeat.
369 while (end > Entries[index].End) {
370 assert(Entries[index].Type == nullptr);
372 // If the range doesn't overlap the next entry, we're done.
373 if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
374 Entries[index].End = end;
375 break;
378 // Otherwise, stretch to the start of the next entry.
379 Entries[index].End = Entries[index + 1].Begin;
381 // Continue with the next entry.
382 index++;
384 // This entry needs to be made opaque if it is not already.
385 if (Entries[index].Type == nullptr)
386 continue;
388 // Split vector entries unless we completely subsume them.
389 if (Entries[index].Type->isVectorTy() &&
390 end < Entries[index].End) {
391 splitVectorEntry(index);
394 // Make the entry opaque.
395 Entries[index].Type = nullptr;
399 /// Replace the entry of vector type at offset 'index' with a sequence
400 /// of its component vectors.
401 void SwiftAggLowering::splitVectorEntry(unsigned index) {
402 auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
403 auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
405 auto eltTy = split.first;
406 CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
407 auto numElts = split.second;
408 Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
410 CharUnits begin = Entries[index].Begin;
411 for (unsigned i = 0; i != numElts; ++i) {
412 Entries[index].Type = eltTy;
413 Entries[index].Begin = begin;
414 Entries[index].End = begin + eltSize;
415 begin += eltSize;
419 /// Given a power-of-two unit size, return the offset of the aligned unit
420 /// of that size which contains the given offset.
422 /// In other words, round down to the nearest multiple of the unit size.
423 static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize) {
424 assert(isPowerOf2(unitSize.getQuantity()));
425 auto unitMask = ~(unitSize.getQuantity() - 1);
426 return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
429 static bool areBytesInSameUnit(CharUnits first, CharUnits second,
430 CharUnits chunkSize) {
431 return getOffsetAtStartOfUnit(first, chunkSize)
432 == getOffsetAtStartOfUnit(second, chunkSize);
435 static bool isMergeableEntryType(llvm::Type *type) {
436 // Opaquely-typed memory is always mergeable.
437 if (type == nullptr) return true;
439 // Pointers and integers are always mergeable. In theory we should not
440 // merge pointers, but (1) it doesn't currently matter in practice because
441 // the chunk size is never greater than the size of a pointer and (2)
442 // Swift IRGen uses integer types for a lot of things that are "really"
443 // just storing pointers (like std::optional<SomePointer>). If we ever have a
444 // target that would otherwise combine pointers, we should put some effort
445 // into fixing those cases in Swift IRGen and then call out pointer types
446 // here.
448 // Floating-point and vector types should never be merged.
449 // Most such types are too large and highly-aligned to ever trigger merging
450 // in practice, but it's important for the rule to cover at least 'half'
451 // and 'float', as well as things like small vectors of 'i1' or 'i8'.
452 return (!type->isFloatingPointTy() && !type->isVectorTy());
455 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
456 const StorageEntry &second,
457 CharUnits chunkSize) {
458 // Only merge entries that overlap the same chunk. We test this first
459 // despite being a bit more expensive because this is the condition that
460 // tends to prevent merging.
461 if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
462 chunkSize))
463 return false;
465 return (isMergeableEntryType(first.Type) &&
466 isMergeableEntryType(second.Type));
469 void SwiftAggLowering::finish() {
470 if (Entries.empty()) {
471 Finished = true;
472 return;
475 // We logically split the layout down into a series of chunks of this size,
476 // which is generally the size of a pointer.
477 const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
479 // First pass: if two entries should be merged, make them both opaque
480 // and stretch one to meet the next.
481 // Also, remember if there are any opaque entries.
482 bool hasOpaqueEntries = (Entries[0].Type == nullptr);
483 for (size_t i = 1, e = Entries.size(); i != e; ++i) {
484 if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
485 Entries[i - 1].Type = nullptr;
486 Entries[i].Type = nullptr;
487 Entries[i - 1].End = Entries[i].Begin;
488 hasOpaqueEntries = true;
490 } else if (Entries[i].Type == nullptr) {
491 hasOpaqueEntries = true;
495 // The rest of the algorithm leaves non-opaque entries alone, so if we
496 // have no opaque entries, we're done.
497 if (!hasOpaqueEntries) {
498 Finished = true;
499 return;
502 // Okay, move the entries to a temporary and rebuild Entries.
503 auto orig = std::move(Entries);
504 assert(Entries.empty());
506 for (size_t i = 0, e = orig.size(); i != e; ++i) {
507 // Just copy over non-opaque entries.
508 if (orig[i].Type != nullptr) {
509 Entries.push_back(orig[i]);
510 continue;
513 // Scan forward to determine the full extent of the next opaque range.
514 // We know from the first pass that only contiguous ranges will overlap
515 // the same aligned chunk.
516 auto begin = orig[i].Begin;
517 auto end = orig[i].End;
518 while (i + 1 != e &&
519 orig[i + 1].Type == nullptr &&
520 end == orig[i + 1].Begin) {
521 end = orig[i + 1].End;
522 i++;
525 // Add an entry per intersected chunk.
526 do {
527 // Find the smallest aligned storage unit in the maximal aligned
528 // storage unit containing 'begin' that contains all the bytes in
529 // the intersection between the range and this chunk.
530 CharUnits localBegin = begin;
531 CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
532 CharUnits chunkEnd = chunkBegin + chunkSize;
533 CharUnits localEnd = std::min(end, chunkEnd);
535 // Just do a simple loop over ever-increasing unit sizes.
536 CharUnits unitSize = CharUnits::One();
537 CharUnits unitBegin, unitEnd;
538 for (; ; unitSize *= 2) {
539 assert(unitSize <= chunkSize);
540 unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
541 unitEnd = unitBegin + unitSize;
542 if (unitEnd >= localEnd) break;
545 // Add an entry for this unit.
546 auto entryTy =
547 llvm::IntegerType::get(CGM.getLLVMContext(),
548 CGM.getContext().toBits(unitSize));
549 Entries.push_back({unitBegin, unitEnd, entryTy});
551 // The next chunk starts where this chunk left off.
552 begin = localEnd;
553 } while (begin != end);
556 // Okay, finally finished.
557 Finished = true;
560 void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
561 assert(Finished && "haven't yet finished lowering");
563 for (auto &entry : Entries) {
564 callback(entry.Begin, entry.End, entry.Type);
568 std::pair<llvm::StructType*, llvm::Type*>
569 SwiftAggLowering::getCoerceAndExpandTypes() const {
570 assert(Finished && "haven't yet finished lowering");
572 auto &ctx = CGM.getLLVMContext();
574 if (Entries.empty()) {
575 auto type = llvm::StructType::get(ctx);
576 return { type, type };
579 SmallVector<llvm::Type*, 8> elts;
580 CharUnits lastEnd = CharUnits::Zero();
581 bool hasPadding = false;
582 bool packed = false;
583 for (auto &entry : Entries) {
584 if (entry.Begin != lastEnd) {
585 auto paddingSize = entry.Begin - lastEnd;
586 assert(!paddingSize.isNegative());
588 auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
589 paddingSize.getQuantity());
590 elts.push_back(padding);
591 hasPadding = true;
594 if (!packed && !entry.Begin.isMultipleOf(CharUnits::fromQuantity(
595 CGM.getDataLayout().getABITypeAlign(entry.Type))))
596 packed = true;
598 elts.push_back(entry.Type);
600 lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
601 assert(entry.End <= lastEnd);
604 // We don't need to adjust 'packed' to deal with possible tail padding
605 // because we never do that kind of access through the coercion type.
606 auto coercionType = llvm::StructType::get(ctx, elts, packed);
608 llvm::Type *unpaddedType = coercionType;
609 if (hasPadding) {
610 elts.clear();
611 for (auto &entry : Entries) {
612 elts.push_back(entry.Type);
614 if (elts.size() == 1) {
615 unpaddedType = elts[0];
616 } else {
617 unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
619 } else if (Entries.size() == 1) {
620 unpaddedType = Entries[0].Type;
623 return { coercionType, unpaddedType };
626 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
627 assert(Finished && "haven't yet finished lowering");
629 // Empty types don't need to be passed indirectly.
630 if (Entries.empty()) return false;
632 // Avoid copying the array of types when there's just a single element.
633 if (Entries.size() == 1) {
634 return getSwiftABIInfo(CGM).shouldPassIndirectly(Entries.back().Type,
635 asReturnValue);
638 SmallVector<llvm::Type*, 8> componentTys;
639 componentTys.reserve(Entries.size());
640 for (auto &entry : Entries) {
641 componentTys.push_back(entry.Type);
643 return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
646 bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
647 ArrayRef<llvm::Type*> componentTys,
648 bool asReturnValue) {
649 return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
652 CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
653 // Currently always the size of an ordinary pointer.
654 return CGM.getContext().toCharUnitsFromBits(
655 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default));
658 CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
659 // For Swift's purposes, this is always just the store size of the type
660 // rounded up to a power of 2.
661 auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
662 size = llvm::bit_ceil(size);
663 assert(CGM.getDataLayout().getABITypeAlign(type) <= size);
664 return CharUnits::fromQuantity(size);
667 bool swiftcall::isLegalIntegerType(CodeGenModule &CGM,
668 llvm::IntegerType *intTy) {
669 auto size = intTy->getBitWidth();
670 switch (size) {
671 case 1:
672 case 8:
673 case 16:
674 case 32:
675 case 64:
676 // Just assume that the above are always legal.
677 return true;
679 case 128:
680 return CGM.getContext().getTargetInfo().hasInt128Type();
682 default:
683 return false;
687 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
688 llvm::VectorType *vectorTy) {
689 return isLegalVectorType(
690 CGM, vectorSize, vectorTy->getElementType(),
691 cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
694 bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
695 llvm::Type *eltTy, unsigned numElts) {
696 assert(numElts > 1 && "illegal vector length");
697 return getSwiftABIInfo(CGM).isLegalVectorType(vectorSize, eltTy, numElts);
700 std::pair<llvm::Type*, unsigned>
701 swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
702 llvm::VectorType *vectorTy) {
703 auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
704 auto eltTy = vectorTy->getElementType();
706 // Try to split the vector type in half.
707 if (numElts >= 4 && isPowerOf2(numElts)) {
708 if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
709 return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
712 return {eltTy, numElts};
715 void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
716 llvm::VectorType *origVectorTy,
717 llvm::SmallVectorImpl<llvm::Type*> &components) {
718 // If it's already a legal vector type, use it.
719 if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
720 components.push_back(origVectorTy);
721 return;
724 // Try to split the vector into legal subvectors.
725 auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
726 auto eltTy = origVectorTy->getElementType();
727 assert(numElts != 1);
729 // The largest size that we're still considering making subvectors of.
730 // Always a power of 2.
731 unsigned logCandidateNumElts = llvm::Log2_32(numElts);
732 unsigned candidateNumElts = 1U << logCandidateNumElts;
733 assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
735 // Minor optimization: don't check the legality of this exact size twice.
736 if (candidateNumElts == numElts) {
737 logCandidateNumElts--;
738 candidateNumElts >>= 1;
741 CharUnits eltSize = (origVectorSize / numElts);
742 CharUnits candidateSize = eltSize * candidateNumElts;
744 // The sensibility of this algorithm relies on the fact that we never
745 // have a legal non-power-of-2 vector size without having the power of 2
746 // also be legal.
747 while (logCandidateNumElts > 0) {
748 assert(candidateNumElts == 1U << logCandidateNumElts);
749 assert(candidateNumElts <= numElts);
750 assert(candidateSize == eltSize * candidateNumElts);
752 // Skip illegal vector sizes.
753 if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
754 logCandidateNumElts--;
755 candidateNumElts /= 2;
756 candidateSize /= 2;
757 continue;
760 // Add the right number of vectors of this size.
761 auto numVecs = numElts >> logCandidateNumElts;
762 components.append(numVecs,
763 llvm::FixedVectorType::get(eltTy, candidateNumElts));
764 numElts -= (numVecs << logCandidateNumElts);
766 if (numElts == 0) return;
768 // It's possible that the number of elements remaining will be legal.
769 // This can happen with e.g. <7 x float> when <3 x float> is legal.
770 // This only needs to be separately checked if it's not a power of 2.
771 if (numElts > 2 && !isPowerOf2(numElts) &&
772 isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
773 components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
774 return;
777 // Bring vecSize down to something no larger than numElts.
778 do {
779 logCandidateNumElts--;
780 candidateNumElts /= 2;
781 candidateSize /= 2;
782 } while (candidateNumElts > numElts);
785 // Otherwise, just append a bunch of individual elements.
786 components.append(numElts, eltTy);
789 bool swiftcall::mustPassRecordIndirectly(CodeGenModule &CGM,
790 const RecordDecl *record) {
791 // FIXME: should we not rely on the standard computation in Sema, just in
792 // case we want to diverge from the platform ABI (e.g. on targets where
793 // that uses the MSVC rule)?
794 return !record->canPassInRegisters();
797 static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering,
798 bool forReturn,
799 CharUnits alignmentForIndirect) {
800 if (lowering.empty()) {
801 return ABIArgInfo::getIgnore();
802 } else if (lowering.shouldPassIndirectly(forReturn)) {
803 return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
804 } else {
805 auto types = lowering.getCoerceAndExpandTypes();
806 return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
810 static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
811 bool forReturn) {
812 if (auto recordType = dyn_cast<RecordType>(type)) {
813 auto record = recordType->getDecl();
814 auto &layout = CGM.getContext().getASTRecordLayout(record);
816 if (mustPassRecordIndirectly(CGM, record))
817 return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
819 SwiftAggLowering lowering(CGM);
820 lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
821 lowering.finish();
823 return classifyExpandedType(lowering, forReturn, layout.getAlignment());
826 // Just assume that all of our target ABIs can support returning at least
827 // two integer or floating-point values.
828 if (isa<ComplexType>(type)) {
829 return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
832 // Vector types may need to be legalized.
833 if (isa<VectorType>(type)) {
834 SwiftAggLowering lowering(CGM);
835 lowering.addTypedData(type, CharUnits::Zero());
836 lowering.finish();
838 CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
839 return classifyExpandedType(lowering, forReturn, alignment);
842 // Member pointer types need to be expanded, but it's a simple form of
843 // expansion that 'Direct' can handle. Note that CanBeFlattened should be
844 // true for this to work.
846 // 'void' needs to be ignored.
847 if (type->isVoidType()) {
848 return ABIArgInfo::getIgnore();
851 // Everything else can be passed directly.
852 return ABIArgInfo::getDirect();
855 ABIArgInfo swiftcall::classifyReturnType(CodeGenModule &CGM, CanQualType type) {
856 return classifyType(CGM, type, /*forReturn*/ true);
859 ABIArgInfo swiftcall::classifyArgumentType(CodeGenModule &CGM,
860 CanQualType type) {
861 return classifyType(CGM, type, /*forReturn*/ false);
864 void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
865 auto &retInfo = FI.getReturnInfo();
866 retInfo = classifyReturnType(CGM, FI.getReturnType());
868 for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
869 auto &argInfo = FI.arg_begin()[i];
870 argInfo.info = classifyArgumentType(CGM, argInfo.type);
874 // Is swifterror lowered to a register by the target ABI.
875 bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
876 return getSwiftABIInfo(CGM).isSwiftErrorInRegister();