1 //===- Hexagon.cpp --------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
12 using namespace clang
;
13 using namespace clang::CodeGen
;
15 //===----------------------------------------------------------------------===//
16 // Hexagon ABI Implementation
17 //===----------------------------------------------------------------------===//
21 class HexagonABIInfo
: public DefaultABIInfo
{
23 HexagonABIInfo(CodeGenTypes
&CGT
) : DefaultABIInfo(CGT
) {}
26 ABIArgInfo
classifyReturnType(QualType RetTy
) const;
27 ABIArgInfo
classifyArgumentType(QualType RetTy
) const;
28 ABIArgInfo
classifyArgumentType(QualType RetTy
, unsigned *RegsLeft
) const;
30 void computeInfo(CGFunctionInfo
&FI
) const override
;
32 RValue
EmitVAArg(CodeGenFunction
&CGF
, Address VAListAddr
, QualType Ty
,
33 AggValueSlot Slot
) const override
;
34 Address
EmitVAArgFromMemory(CodeGenFunction
&CFG
, Address VAListAddr
,
36 Address
EmitVAArgForHexagon(CodeGenFunction
&CFG
, Address VAListAddr
,
38 Address
EmitVAArgForHexagonLinux(CodeGenFunction
&CFG
, Address VAListAddr
,
42 class HexagonTargetCodeGenInfo
: public TargetCodeGenInfo
{
44 HexagonTargetCodeGenInfo(CodeGenTypes
&CGT
)
45 : TargetCodeGenInfo(std::make_unique
<HexagonABIInfo
>(CGT
)) {}
47 int getDwarfEHStackPointer(CodeGen::CodeGenModule
&M
) const override
{
51 void setTargetAttributes(const Decl
*D
, llvm::GlobalValue
*GV
,
52 CodeGen::CodeGenModule
&GCM
) const override
{
53 if (GV
->isDeclaration())
55 const FunctionDecl
*FD
= dyn_cast_or_null
<FunctionDecl
>(D
);
63 void HexagonABIInfo::computeInfo(CGFunctionInfo
&FI
) const {
64 unsigned RegsLeft
= 6;
65 if (!getCXXABI().classifyReturnType(FI
))
66 FI
.getReturnInfo() = classifyReturnType(FI
.getReturnType());
67 for (auto &I
: FI
.arguments())
68 I
.info
= classifyArgumentType(I
.type
, &RegsLeft
);
71 static bool HexagonAdjustRegsLeft(uint64_t Size
, unsigned *RegsLeft
) {
72 assert(Size
<= 64 && "Not expecting to pass arguments larger than 64 bits"
73 " through registers");
83 if (2 <= (*RegsLeft
& (~1U))) {
84 *RegsLeft
= (*RegsLeft
& (~1U)) - 2;
88 // Next available register was r5 but candidate was greater than 32-bits so it
89 // has to go on the stack. However we still consume r5
96 ABIArgInfo
HexagonABIInfo::classifyArgumentType(QualType Ty
,
97 unsigned *RegsLeft
) const {
98 if (!isAggregateTypeForABI(Ty
)) {
99 // Treat an enum type as its underlying type.
100 if (const EnumType
*EnumTy
= Ty
->getAs
<EnumType
>())
101 Ty
= EnumTy
->getDecl()->getIntegerType();
103 uint64_t Size
= getContext().getTypeSize(Ty
);
105 HexagonAdjustRegsLeft(Size
, RegsLeft
);
107 if (Size
> 64 && Ty
->isBitIntType())
108 return getNaturalAlignIndirect(Ty
, /*ByVal=*/true);
110 return isPromotableIntegerTypeForABI(Ty
) ? ABIArgInfo::getExtend(Ty
)
111 : ABIArgInfo::getDirect();
114 if (CGCXXABI::RecordArgABI RAA
= getRecordArgABI(Ty
, getCXXABI()))
115 return getNaturalAlignIndirect(Ty
, RAA
== CGCXXABI::RAA_DirectInMemory
);
117 // Ignore empty records.
118 if (isEmptyRecord(getContext(), Ty
, true))
119 return ABIArgInfo::getIgnore();
121 uint64_t Size
= getContext().getTypeSize(Ty
);
122 unsigned Align
= getContext().getTypeAlign(Ty
);
125 return getNaturalAlignIndirect(Ty
, /*ByVal=*/true);
127 if (HexagonAdjustRegsLeft(Size
, RegsLeft
))
128 Align
= Size
<= 32 ? 32 : 64;
130 // Pass in the smallest viable integer type.
131 Size
= llvm::bit_ceil(Size
);
132 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size
));
134 return DefaultABIInfo::classifyArgumentType(Ty
);
137 ABIArgInfo
HexagonABIInfo::classifyReturnType(QualType RetTy
) const {
138 if (RetTy
->isVoidType())
139 return ABIArgInfo::getIgnore();
141 const TargetInfo
&T
= CGT
.getTarget();
142 uint64_t Size
= getContext().getTypeSize(RetTy
);
144 if (RetTy
->getAs
<VectorType
>()) {
145 // HVX vectors are returned in vector registers or register pairs.
146 if (T
.hasFeature("hvx")) {
147 assert(T
.hasFeature("hvx-length64b") || T
.hasFeature("hvx-length128b"));
148 uint64_t VecSize
= T
.hasFeature("hvx-length64b") ? 64*8 : 128*8;
149 if (Size
== VecSize
|| Size
== 2*VecSize
)
150 return ABIArgInfo::getDirectInReg();
152 // Large vector types should be returned via memory.
154 return getNaturalAlignIndirect(RetTy
);
157 if (!isAggregateTypeForABI(RetTy
)) {
158 // Treat an enum type as its underlying type.
159 if (const EnumType
*EnumTy
= RetTy
->getAs
<EnumType
>())
160 RetTy
= EnumTy
->getDecl()->getIntegerType();
162 if (Size
> 64 && RetTy
->isBitIntType())
163 return getNaturalAlignIndirect(RetTy
, /*ByVal=*/false);
165 return isPromotableIntegerTypeForABI(RetTy
) ? ABIArgInfo::getExtend(RetTy
)
166 : ABIArgInfo::getDirect();
169 if (isEmptyRecord(getContext(), RetTy
, true))
170 return ABIArgInfo::getIgnore();
172 // Aggregates <= 8 bytes are returned in registers, other aggregates
173 // are returned indirectly.
175 // Return in the smallest viable integer type.
176 Size
= llvm::bit_ceil(Size
);
177 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size
));
179 return getNaturalAlignIndirect(RetTy
, /*ByVal=*/true);
182 Address
HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction
&CGF
,
185 // Load the overflow area pointer.
186 Address __overflow_area_pointer_p
=
187 CGF
.Builder
.CreateStructGEP(VAListAddr
, 2, "__overflow_area_pointer_p");
188 llvm::Value
*__overflow_area_pointer
= CGF
.Builder
.CreateLoad(
189 __overflow_area_pointer_p
, "__overflow_area_pointer");
191 uint64_t Align
= CGF
.getContext().getTypeAlign(Ty
) / 8;
193 // Alignment should be a power of 2.
194 assert((Align
& (Align
- 1)) == 0 && "Alignment is not power of 2!");
196 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
197 llvm::Value
*Offset
= llvm::ConstantInt::get(CGF
.Int64Ty
, Align
- 1);
199 // Add offset to the current pointer to access the argument.
200 __overflow_area_pointer
=
201 CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, __overflow_area_pointer
, Offset
);
203 CGF
.Builder
.CreatePtrToInt(__overflow_area_pointer
, CGF
.Int32Ty
);
205 // Create a mask which should be "AND"ed
206 // with (overflow_arg_area + align - 1)
207 llvm::Value
*Mask
= llvm::ConstantInt::get(CGF
.Int32Ty
, -(int)Align
);
208 __overflow_area_pointer
= CGF
.Builder
.CreateIntToPtr(
209 CGF
.Builder
.CreateAnd(AsInt
, Mask
), __overflow_area_pointer
->getType(),
210 "__overflow_area_pointer.align");
213 // Get the type of the argument from memory and bitcast
214 // overflow area pointer to the argument type.
215 llvm::Type
*PTy
= CGF
.ConvertTypeForMem(Ty
);
217 Address(__overflow_area_pointer
, PTy
, CharUnits::fromQuantity(Align
));
219 // Round up to the minimum stack alignment for varargs which is 4 bytes.
220 uint64_t Offset
= llvm::alignTo(CGF
.getContext().getTypeSize(Ty
) / 8, 4);
222 __overflow_area_pointer
= CGF
.Builder
.CreateGEP(
223 CGF
.Int8Ty
, __overflow_area_pointer
,
224 llvm::ConstantInt::get(CGF
.Int32Ty
, Offset
),
225 "__overflow_area_pointer.next");
226 CGF
.Builder
.CreateStore(__overflow_area_pointer
, __overflow_area_pointer_p
);
231 Address
HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction
&CGF
,
234 // FIXME: Need to handle alignment
235 llvm::Type
*BP
= CGF
.Int8PtrTy
;
236 CGBuilderTy
&Builder
= CGF
.Builder
;
237 Address VAListAddrAsBPP
= VAListAddr
.withElementType(BP
);
238 llvm::Value
*Addr
= Builder
.CreateLoad(VAListAddrAsBPP
, "ap.cur");
239 // Handle address alignment for type alignment > 32 bits
240 uint64_t TyAlign
= CGF
.getContext().getTypeAlign(Ty
) / 8;
242 assert((TyAlign
& (TyAlign
- 1)) == 0 && "Alignment is not power of 2!");
243 llvm::Value
*AddrAsInt
= Builder
.CreatePtrToInt(Addr
, CGF
.Int32Ty
);
244 AddrAsInt
= Builder
.CreateAdd(AddrAsInt
, Builder
.getInt32(TyAlign
- 1));
245 AddrAsInt
= Builder
.CreateAnd(AddrAsInt
, Builder
.getInt32(~(TyAlign
- 1)));
246 Addr
= Builder
.CreateIntToPtr(AddrAsInt
, BP
);
249 Address(Addr
, CGF
.ConvertType(Ty
), CharUnits::fromQuantity(TyAlign
));
251 uint64_t Offset
= llvm::alignTo(CGF
.getContext().getTypeSize(Ty
) / 8, 4);
252 llvm::Value
*NextAddr
= Builder
.CreateGEP(
253 CGF
.Int8Ty
, Addr
, llvm::ConstantInt::get(CGF
.Int32Ty
, Offset
), "ap.next");
254 Builder
.CreateStore(NextAddr
, VAListAddrAsBPP
);
259 Address
HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction
&CGF
,
262 int ArgSize
= CGF
.getContext().getTypeSize(Ty
) / 8;
265 return EmitVAArgFromMemory(CGF
, VAListAddr
, Ty
);
267 // Here we have check if the argument is in register area or
269 // If the saved register area pointer + argsize rounded up to alignment >
270 // saved register area end pointer, argument is in overflow area.
271 unsigned RegsLeft
= 6;
272 Ty
= CGF
.getContext().getCanonicalType(Ty
);
273 (void)classifyArgumentType(Ty
, &RegsLeft
);
275 llvm::BasicBlock
*MaybeRegBlock
= CGF
.createBasicBlock("vaarg.maybe_reg");
276 llvm::BasicBlock
*InRegBlock
= CGF
.createBasicBlock("vaarg.in_reg");
277 llvm::BasicBlock
*OnStackBlock
= CGF
.createBasicBlock("vaarg.on_stack");
278 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("vaarg.end");
280 // Get rounded size of the argument.GCC does not allow vararg of
281 // size < 4 bytes. We follow the same logic here.
282 ArgSize
= (CGF
.getContext().getTypeSize(Ty
) <= 32) ? 4 : 8;
283 int ArgAlign
= (CGF
.getContext().getTypeSize(Ty
) <= 32) ? 4 : 8;
285 // Argument may be in saved register area
286 CGF
.EmitBlock(MaybeRegBlock
);
288 // Load the current saved register area pointer.
289 Address __current_saved_reg_area_pointer_p
= CGF
.Builder
.CreateStructGEP(
290 VAListAddr
, 0, "__current_saved_reg_area_pointer_p");
291 llvm::Value
*__current_saved_reg_area_pointer
= CGF
.Builder
.CreateLoad(
292 __current_saved_reg_area_pointer_p
, "__current_saved_reg_area_pointer");
294 // Load the saved register area end pointer.
295 Address __saved_reg_area_end_pointer_p
= CGF
.Builder
.CreateStructGEP(
296 VAListAddr
, 1, "__saved_reg_area_end_pointer_p");
297 llvm::Value
*__saved_reg_area_end_pointer
= CGF
.Builder
.CreateLoad(
298 __saved_reg_area_end_pointer_p
, "__saved_reg_area_end_pointer");
300 // If the size of argument is > 4 bytes, check if the stack
301 // location is aligned to 8 bytes
304 llvm::Value
*__current_saved_reg_area_pointer_int
=
305 CGF
.Builder
.CreatePtrToInt(__current_saved_reg_area_pointer
,
308 __current_saved_reg_area_pointer_int
= CGF
.Builder
.CreateAdd(
309 __current_saved_reg_area_pointer_int
,
310 llvm::ConstantInt::get(CGF
.Int32Ty
, (ArgAlign
- 1)),
311 "align_current_saved_reg_area_pointer");
313 __current_saved_reg_area_pointer_int
=
314 CGF
.Builder
.CreateAnd(__current_saved_reg_area_pointer_int
,
315 llvm::ConstantInt::get(CGF
.Int32Ty
, -ArgAlign
),
316 "align_current_saved_reg_area_pointer");
318 __current_saved_reg_area_pointer
=
319 CGF
.Builder
.CreateIntToPtr(__current_saved_reg_area_pointer_int
,
320 __current_saved_reg_area_pointer
->getType(),
321 "align_current_saved_reg_area_pointer");
324 llvm::Value
*__new_saved_reg_area_pointer
=
325 CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, __current_saved_reg_area_pointer
,
326 llvm::ConstantInt::get(CGF
.Int32Ty
, ArgSize
),
327 "__new_saved_reg_area_pointer");
329 llvm::Value
*UsingStack
= nullptr;
330 UsingStack
= CGF
.Builder
.CreateICmpSGT(__new_saved_reg_area_pointer
,
331 __saved_reg_area_end_pointer
);
333 CGF
.Builder
.CreateCondBr(UsingStack
, OnStackBlock
, InRegBlock
);
335 // Argument in saved register area
336 // Implement the block where argument is in register saved area
337 CGF
.EmitBlock(InRegBlock
);
339 llvm::Type
*PTy
= CGF
.ConvertType(Ty
);
340 llvm::Value
*__saved_reg_area_p
= CGF
.Builder
.CreateBitCast(
341 __current_saved_reg_area_pointer
, llvm::PointerType::getUnqual(PTy
));
343 CGF
.Builder
.CreateStore(__new_saved_reg_area_pointer
,
344 __current_saved_reg_area_pointer_p
);
346 CGF
.EmitBranch(ContBlock
);
348 // Argument in overflow area
349 // Implement the block where the argument is in overflow area.
350 CGF
.EmitBlock(OnStackBlock
);
352 // Load the overflow area pointer
353 Address __overflow_area_pointer_p
=
354 CGF
.Builder
.CreateStructGEP(VAListAddr
, 2, "__overflow_area_pointer_p");
355 llvm::Value
*__overflow_area_pointer
= CGF
.Builder
.CreateLoad(
356 __overflow_area_pointer_p
, "__overflow_area_pointer");
358 // Align the overflow area pointer according to the alignment of the argument
360 llvm::Value
*__overflow_area_pointer_int
=
361 CGF
.Builder
.CreatePtrToInt(__overflow_area_pointer
, CGF
.Int32Ty
);
363 __overflow_area_pointer_int
=
364 CGF
.Builder
.CreateAdd(__overflow_area_pointer_int
,
365 llvm::ConstantInt::get(CGF
.Int32Ty
, ArgAlign
- 1),
366 "align_overflow_area_pointer");
368 __overflow_area_pointer_int
=
369 CGF
.Builder
.CreateAnd(__overflow_area_pointer_int
,
370 llvm::ConstantInt::get(CGF
.Int32Ty
, -ArgAlign
),
371 "align_overflow_area_pointer");
373 __overflow_area_pointer
= CGF
.Builder
.CreateIntToPtr(
374 __overflow_area_pointer_int
, __overflow_area_pointer
->getType(),
375 "align_overflow_area_pointer");
378 // Get the pointer for next argument in overflow area and store it
379 // to overflow area pointer.
380 llvm::Value
*__new_overflow_area_pointer
= CGF
.Builder
.CreateGEP(
381 CGF
.Int8Ty
, __overflow_area_pointer
,
382 llvm::ConstantInt::get(CGF
.Int32Ty
, ArgSize
),
383 "__overflow_area_pointer.next");
385 CGF
.Builder
.CreateStore(__new_overflow_area_pointer
,
386 __overflow_area_pointer_p
);
388 CGF
.Builder
.CreateStore(__new_overflow_area_pointer
,
389 __current_saved_reg_area_pointer_p
);
391 // Bitcast the overflow area pointer to the type of argument.
392 llvm::Type
*OverflowPTy
= CGF
.ConvertTypeForMem(Ty
);
393 llvm::Value
*__overflow_area_p
= CGF
.Builder
.CreateBitCast(
394 __overflow_area_pointer
, llvm::PointerType::getUnqual(OverflowPTy
));
396 CGF
.EmitBranch(ContBlock
);
398 // Get the correct pointer to load the variable argument
399 // Implement the ContBlock
400 CGF
.EmitBlock(ContBlock
);
402 llvm::Type
*MemTy
= CGF
.ConvertTypeForMem(Ty
);
403 llvm::Type
*MemPTy
= llvm::PointerType::getUnqual(MemTy
);
404 llvm::PHINode
*ArgAddr
= CGF
.Builder
.CreatePHI(MemPTy
, 2, "vaarg.addr");
405 ArgAddr
->addIncoming(__saved_reg_area_p
, InRegBlock
);
406 ArgAddr
->addIncoming(__overflow_area_p
, OnStackBlock
);
408 return Address(ArgAddr
, MemTy
, CharUnits::fromQuantity(ArgAlign
));
411 RValue
HexagonABIInfo::EmitVAArg(CodeGenFunction
&CGF
, Address VAListAddr
,
412 QualType Ty
, AggValueSlot Slot
) const {
414 if (getTarget().getTriple().isMusl())
415 return CGF
.EmitLoadOfAnyValue(
416 CGF
.MakeAddrLValue(EmitVAArgForHexagonLinux(CGF
, VAListAddr
, Ty
), Ty
),
419 return CGF
.EmitLoadOfAnyValue(
420 CGF
.MakeAddrLValue(EmitVAArgForHexagon(CGF
, VAListAddr
, Ty
), Ty
), Slot
);
423 std::unique_ptr
<TargetCodeGenInfo
>
424 CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule
&CGM
) {
425 return std::make_unique
<HexagonTargetCodeGenInfo
>(CGM
.getTypes());