[OptTable] Fix typo VALUE => VALUES (NFCI) (#121523)
[llvm-project.git] / clang / lib / CodeGen / Targets / RISCV.cpp
blob873e696e1328f9935e9910d199bea26ee20bd398
1 //===- RISCV.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
12 using namespace clang;
13 using namespace clang::CodeGen;
15 //===----------------------------------------------------------------------===//
16 // RISC-V ABI Implementation
17 //===----------------------------------------------------------------------===//
19 namespace {
20 class RISCVABIInfo : public DefaultABIInfo {
21 private:
22 // Size of the integer ('x') registers in bits.
23 unsigned XLen;
24 // Size of the floating point ('f') registers in bits. Note that the target
25 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
26 // with soft float ABI has FLen==0).
27 unsigned FLen;
28 const int NumArgGPRs;
29 const int NumArgFPRs;
30 const bool EABI;
31 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
32 llvm::Type *&Field1Ty,
33 CharUnits &Field1Off,
34 llvm::Type *&Field2Ty,
35 CharUnits &Field2Off) const;
37 public:
38 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen,
39 bool EABI)
40 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8),
41 NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {}
43 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
44 // non-virtual, but computeInfo is virtual, so we overload it.
45 void computeInfo(CGFunctionInfo &FI) const override;
47 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
48 int &ArgFPRsLeft) const;
49 ABIArgInfo classifyReturnType(QualType RetTy) const;
51 RValue EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
52 AggValueSlot Slot) const override;
54 ABIArgInfo extendType(QualType Ty, llvm::Type *CoerceTy = nullptr) const;
56 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
57 CharUnits &Field1Off, llvm::Type *&Field2Ty,
58 CharUnits &Field2Off, int &NeededArgGPRs,
59 int &NeededArgFPRs) const;
60 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
61 CharUnits Field1Off,
62 llvm::Type *Field2Ty,
63 CharUnits Field2Off) const;
65 ABIArgInfo coerceVLSVector(QualType Ty) const;
67 using ABIInfo::appendAttributeMangling;
68 void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
69 raw_ostream &Out) const override;
70 void appendAttributeMangling(StringRef AttrStr,
71 raw_ostream &Out) const override;
73 } // end anonymous namespace
75 void RISCVABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
76 unsigned Index,
77 raw_ostream &Out) const {
78 appendAttributeMangling(Attr->getFeatureStr(Index), Out);
81 void RISCVABIInfo::appendAttributeMangling(StringRef AttrStr,
82 raw_ostream &Out) const {
83 if (AttrStr == "default") {
84 Out << ".default";
85 return;
88 Out << '.';
90 SmallVector<StringRef, 8> Attrs;
91 AttrStr.split(Attrs, ';');
93 // Only consider the arch string.
94 StringRef ArchStr;
95 for (auto &Attr : Attrs) {
96 if (Attr.starts_with("arch="))
97 ArchStr = Attr;
100 // Extract features string.
101 SmallVector<StringRef, 8> Features;
102 ArchStr.consume_front("arch=");
103 ArchStr.split(Features, ',');
105 llvm::stable_sort(Features);
107 for (auto Feat : Features) {
108 Feat.consume_front("+");
109 Out << "_" << Feat;
113 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
114 QualType RetTy = FI.getReturnType();
115 if (!getCXXABI().classifyReturnType(FI))
116 FI.getReturnInfo() = classifyReturnType(RetTy);
118 // IsRetIndirect is true if classifyArgumentType indicated the value should
119 // be passed indirect, or if the type size is a scalar greater than 2*XLen
120 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
121 // in LLVM IR, relying on the backend lowering code to rewrite the argument
122 // list and pass indirectly on RV32.
123 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
124 if (!IsRetIndirect && RetTy->isScalarType() &&
125 getContext().getTypeSize(RetTy) > (2 * XLen)) {
126 if (RetTy->isComplexType() && FLen) {
127 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
128 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
129 } else {
130 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
131 IsRetIndirect = true;
135 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
136 int ArgFPRsLeft = NumArgFPRs;
137 int NumFixedArgs = FI.getNumRequiredArgs();
139 int ArgNum = 0;
140 for (auto &ArgInfo : FI.arguments()) {
141 bool IsFixed = ArgNum < NumFixedArgs;
142 ArgInfo.info =
143 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
144 ArgNum++;
148 // Returns true if the struct is a potential candidate for the floating point
149 // calling convention. If this function returns true, the caller is
150 // responsible for checking that if there is only a single field then that
151 // field is a float.
152 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
153 llvm::Type *&Field1Ty,
154 CharUnits &Field1Off,
155 llvm::Type *&Field2Ty,
156 CharUnits &Field2Off) const {
157 bool IsInt = Ty->isIntegralOrEnumerationType();
158 bool IsFloat = Ty->isRealFloatingType();
160 if (IsInt || IsFloat) {
161 uint64_t Size = getContext().getTypeSize(Ty);
162 if (IsInt && Size > XLen)
163 return false;
164 // Can't be eligible if larger than the FP registers. Handling of half
165 // precision values has been specified in the ABI, so don't block those.
166 if (IsFloat && Size > FLen)
167 return false;
168 // Can't be eligible if an integer type was already found (int+int pairs
169 // are not eligible).
170 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
171 return false;
172 if (!Field1Ty) {
173 Field1Ty = CGT.ConvertType(Ty);
174 Field1Off = CurOff;
175 return true;
177 if (!Field2Ty) {
178 Field2Ty = CGT.ConvertType(Ty);
179 Field2Off = CurOff;
180 return true;
182 return false;
185 if (auto CTy = Ty->getAs<ComplexType>()) {
186 if (Field1Ty)
187 return false;
188 QualType EltTy = CTy->getElementType();
189 if (getContext().getTypeSize(EltTy) > FLen)
190 return false;
191 Field1Ty = CGT.ConvertType(EltTy);
192 Field1Off = CurOff;
193 Field2Ty = Field1Ty;
194 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
195 return true;
198 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
199 uint64_t ArraySize = ATy->getZExtSize();
200 QualType EltTy = ATy->getElementType();
201 // Non-zero-length arrays of empty records make the struct ineligible for
202 // the FP calling convention in C++.
203 if (const auto *RTy = EltTy->getAs<RecordType>()) {
204 if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
205 isEmptyRecord(getContext(), EltTy, true, true))
206 return false;
208 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
209 for (uint64_t i = 0; i < ArraySize; ++i) {
210 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
211 Field1Off, Field2Ty, Field2Off);
212 if (!Ret)
213 return false;
214 CurOff += EltSize;
216 return true;
219 if (const auto *RTy = Ty->getAs<RecordType>()) {
220 // Structures with either a non-trivial destructor or a non-trivial
221 // copy constructor are not eligible for the FP calling convention.
222 if (getRecordArgABI(Ty, CGT.getCXXABI()))
223 return false;
224 if (isEmptyRecord(getContext(), Ty, true, true))
225 return true;
226 const RecordDecl *RD = RTy->getDecl();
227 // Unions aren't eligible unless they're empty (which is caught above).
228 if (RD->isUnion())
229 return false;
230 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
231 // If this is a C++ record, check the bases first.
232 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
233 for (const CXXBaseSpecifier &B : CXXRD->bases()) {
234 const auto *BDecl =
235 cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
236 CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
237 bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
238 Field1Ty, Field1Off, Field2Ty,
239 Field2Off);
240 if (!Ret)
241 return false;
244 int ZeroWidthBitFieldCount = 0;
245 for (const FieldDecl *FD : RD->fields()) {
246 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
247 QualType QTy = FD->getType();
248 if (FD->isBitField()) {
249 unsigned BitWidth = FD->getBitWidthValue(getContext());
250 // Allow a bitfield with a type greater than XLen as long as the
251 // bitwidth is XLen or less.
252 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
253 QTy = getContext().getIntTypeForBitwidth(XLen, false);
254 if (BitWidth == 0) {
255 ZeroWidthBitFieldCount++;
256 continue;
260 bool Ret = detectFPCCEligibleStructHelper(
261 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
262 Field1Ty, Field1Off, Field2Ty, Field2Off);
263 if (!Ret)
264 return false;
266 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
267 // or int+fp structs, but are ignored for a struct with an fp field and
268 // any number of zero-width bitfields.
269 if (Field2Ty && ZeroWidthBitFieldCount > 0)
270 return false;
272 return Field1Ty != nullptr;
275 return false;
278 // Determine if a struct is eligible for passing according to the floating
279 // point calling convention (i.e., when flattened it contains a single fp
280 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
281 // NeededArgGPRs are incremented appropriately.
282 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
283 CharUnits &Field1Off,
284 llvm::Type *&Field2Ty,
285 CharUnits &Field2Off,
286 int &NeededArgGPRs,
287 int &NeededArgFPRs) const {
288 Field1Ty = nullptr;
289 Field2Ty = nullptr;
290 NeededArgGPRs = 0;
291 NeededArgFPRs = 0;
292 bool IsCandidate = detectFPCCEligibleStructHelper(
293 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
294 if (!Field1Ty)
295 return false;
296 // Not really a candidate if we have a single int but no float.
297 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
298 return false;
299 if (!IsCandidate)
300 return false;
301 if (Field1Ty && Field1Ty->isFloatingPointTy())
302 NeededArgFPRs++;
303 else if (Field1Ty)
304 NeededArgGPRs++;
305 if (Field2Ty && Field2Ty->isFloatingPointTy())
306 NeededArgFPRs++;
307 else if (Field2Ty)
308 NeededArgGPRs++;
309 return true;
312 // Call getCoerceAndExpand for the two-element flattened struct described by
313 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
314 // appropriate coerceToType and unpaddedCoerceToType.
315 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
316 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
317 CharUnits Field2Off) const {
318 SmallVector<llvm::Type *, 3> CoerceElts;
319 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
320 if (!Field1Off.isZero())
321 CoerceElts.push_back(llvm::ArrayType::get(
322 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
324 CoerceElts.push_back(Field1Ty);
325 UnpaddedCoerceElts.push_back(Field1Ty);
327 if (!Field2Ty) {
328 return ABIArgInfo::getCoerceAndExpand(
329 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
330 UnpaddedCoerceElts[0]);
333 CharUnits Field2Align =
334 CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
335 CharUnits Field1End = Field1Off +
336 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
337 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
339 CharUnits Padding = CharUnits::Zero();
340 if (Field2Off > Field2OffNoPadNoPack)
341 Padding = Field2Off - Field2OffNoPadNoPack;
342 else if (Field2Off != Field2Align && Field2Off > Field1End)
343 Padding = Field2Off - Field1End;
345 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
347 if (!Padding.isZero())
348 CoerceElts.push_back(llvm::ArrayType::get(
349 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
351 CoerceElts.push_back(Field2Ty);
352 UnpaddedCoerceElts.push_back(Field2Ty);
354 auto CoerceToType =
355 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
356 auto UnpaddedCoerceToType =
357 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
359 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
362 // Fixed-length RVV vectors are represented as scalable vectors in function
363 // args/return and must be coerced from fixed vectors.
364 ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
365 assert(Ty->isVectorType() && "expected vector type!");
367 const auto *VT = Ty->castAs<VectorType>();
368 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
370 auto VScale =
371 getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts());
373 unsigned NumElts = VT->getNumElements();
374 llvm::Type *EltType = llvm::Type::getInt1Ty(getVMContext());
375 switch (VT->getVectorKind()) {
376 case VectorKind::RVVFixedLengthMask_1:
377 break;
378 case VectorKind::RVVFixedLengthMask_2:
379 NumElts *= 2;
380 break;
381 case VectorKind::RVVFixedLengthMask_4:
382 NumElts *= 4;
383 break;
384 case VectorKind::RVVFixedLengthMask:
385 NumElts *= 8;
386 break;
387 default:
388 assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData &&
389 "Unexpected vector kind");
390 EltType = CGT.ConvertType(VT->getElementType());
393 // The MinNumElts is simplified from equation:
394 // NumElts / VScale =
395 // (EltSize * NumElts / (VScale * RVVBitsPerBlock))
396 // * (RVVBitsPerBlock / EltSize)
397 llvm::ScalableVectorType *ResType =
398 llvm::ScalableVectorType::get(EltType, NumElts / VScale->first);
399 return ABIArgInfo::getDirect(ResType);
402 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
403 int &ArgGPRsLeft,
404 int &ArgFPRsLeft) const {
405 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
406 Ty = useFirstFieldIfTransparentUnion(Ty);
408 // Structures with either a non-trivial destructor or a non-trivial
409 // copy constructor are always passed indirectly.
410 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
411 if (ArgGPRsLeft)
412 ArgGPRsLeft -= 1;
413 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
414 CGCXXABI::RAA_DirectInMemory);
417 uint64_t Size = getContext().getTypeSize(Ty);
419 // Ignore empty structs/unions whose size is zero. According to the calling
420 // convention empty structs/unions are required to be sized types in C++.
421 if (isEmptyRecord(getContext(), Ty, true) && Size == 0)
422 return ABIArgInfo::getIgnore();
424 // Pass floating point values via FPRs if possible.
425 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
426 FLen >= Size && ArgFPRsLeft) {
427 ArgFPRsLeft--;
428 return ABIArgInfo::getDirect();
431 // Complex types for the hard float ABI must be passed direct rather than
432 // using CoerceAndExpand.
433 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
434 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
435 if (getContext().getTypeSize(EltTy) <= FLen) {
436 ArgFPRsLeft -= 2;
437 return ABIArgInfo::getDirect();
441 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
442 llvm::Type *Field1Ty = nullptr;
443 llvm::Type *Field2Ty = nullptr;
444 CharUnits Field1Off = CharUnits::Zero();
445 CharUnits Field2Off = CharUnits::Zero();
446 int NeededArgGPRs = 0;
447 int NeededArgFPRs = 0;
448 bool IsCandidate =
449 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
450 NeededArgGPRs, NeededArgFPRs);
451 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
452 NeededArgFPRs <= ArgFPRsLeft) {
453 ArgGPRsLeft -= NeededArgGPRs;
454 ArgFPRsLeft -= NeededArgFPRs;
455 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
456 Field2Off);
460 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
461 // Determine the number of GPRs needed to pass the current argument
462 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
463 // register pairs, so may consume 3 registers.
464 // TODO: To be compatible with GCC's behaviors, we don't align registers
465 // currently if we are using ILP32E calling convention. This behavior may be
466 // changed when RV32E/ILP32E is ratified.
467 int NeededArgGPRs = 1;
468 if (!IsFixed && NeededAlign == 2 * XLen)
469 NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2));
470 else if (Size > XLen && Size <= 2 * XLen)
471 NeededArgGPRs = 2;
473 if (NeededArgGPRs > ArgGPRsLeft) {
474 NeededArgGPRs = ArgGPRsLeft;
477 ArgGPRsLeft -= NeededArgGPRs;
479 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
480 // Treat an enum type as its underlying type.
481 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
482 Ty = EnumTy->getDecl()->getIntegerType();
484 // All integral types are promoted to XLen width
485 if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
486 return extendType(Ty, CGT.ConvertType(Ty));
489 if (const auto *EIT = Ty->getAs<BitIntType>()) {
490 if (EIT->getNumBits() < XLen)
491 return extendType(Ty, CGT.ConvertType(Ty));
492 if (EIT->getNumBits() > 128 ||
493 (!getContext().getTargetInfo().hasInt128Type() &&
494 EIT->getNumBits() > 64))
495 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
498 return ABIArgInfo::getDirect();
501 if (const VectorType *VT = Ty->getAs<VectorType>())
502 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
503 VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
504 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
505 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
506 VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
507 return coerceVLSVector(Ty);
509 // Aggregates which are <= 2*XLen will be passed in registers if possible,
510 // so coerce to integers.
511 if (Size <= 2 * XLen) {
512 unsigned Alignment = getContext().getTypeAlign(Ty);
514 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
515 // required, and a 2-element XLen array if only XLen alignment is required.
516 if (Size <= XLen) {
517 return ABIArgInfo::getDirect(
518 llvm::IntegerType::get(getVMContext(), XLen));
519 } else if (Alignment == 2 * XLen) {
520 return ABIArgInfo::getDirect(
521 llvm::IntegerType::get(getVMContext(), 2 * XLen));
522 } else {
523 return ABIArgInfo::getDirect(llvm::ArrayType::get(
524 llvm::IntegerType::get(getVMContext(), XLen), 2));
527 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
530 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
531 if (RetTy->isVoidType())
532 return ABIArgInfo::getIgnore();
534 int ArgGPRsLeft = 2;
535 int ArgFPRsLeft = FLen ? 2 : 0;
537 // The rules for return and argument types are the same, so defer to
538 // classifyArgumentType.
539 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
540 ArgFPRsLeft);
543 RValue RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
544 QualType Ty, AggValueSlot Slot) const {
545 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
547 // Empty records are ignored for parameter passing purposes.
548 if (isEmptyRecord(getContext(), Ty, true))
549 return Slot.asRValue();
551 auto TInfo = getContext().getTypeInfoInChars(Ty);
553 // TODO: To be compatible with GCC's behaviors, we force arguments with
554 // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`,
555 // `unsigned long long` and `double` to have 4-byte alignment. This
556 // behavior may be changed when RV32E/ILP32E is ratified.
557 if (EABI && XLen == 32)
558 TInfo.Align = std::min(TInfo.Align, CharUnits::fromQuantity(4));
560 // Arguments bigger than 2*Xlen bytes are passed indirectly.
561 bool IsIndirect = TInfo.Width > 2 * SlotSize;
563 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo, SlotSize,
564 /*AllowHigherAlign=*/true, Slot);
567 ABIArgInfo RISCVABIInfo::extendType(QualType Ty, llvm::Type *CoerceTy) const {
568 int TySize = getContext().getTypeSize(Ty);
569 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
570 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
571 return ABIArgInfo::getSignExtend(Ty, CoerceTy);
572 return ABIArgInfo::getExtend(Ty, CoerceTy);
575 namespace {
576 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
577 public:
578 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
579 unsigned FLen, bool EABI)
580 : TargetCodeGenInfo(
581 std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {
582 SwiftInfo =
583 std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
586 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
587 CodeGen::CodeGenModule &CGM) const override {
588 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
589 if (!FD) return;
591 auto *Fn = cast<llvm::Function>(GV);
593 if (CGM.getCodeGenOpts().CFProtectionReturn)
594 Fn->addFnAttr("hw-shadow-stack");
596 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
597 if (!Attr)
598 return;
600 const char *Kind;
601 switch (Attr->getInterrupt()) {
602 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
603 case RISCVInterruptAttr::machine: Kind = "machine"; break;
606 Fn->addFnAttr("interrupt", Kind);
609 } // namespace
611 std::unique_ptr<TargetCodeGenInfo>
612 CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
613 unsigned FLen, bool EABI) {
614 return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen,
615 EABI);