1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
15 #define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
18 #include "EHScopeStack.h"
19 #include "clang/AST/ASTFwd.h"
20 #include "clang/AST/CanonicalType.h"
21 #include "clang/AST/GlobalDecl.h"
22 #include "clang/AST/Type.h"
23 #include "llvm/IR/Value.h"
38 /// Abstract information about a function or function prototype.
40 /// The function prototype of the callee.
41 const FunctionProtoType
*CalleeProtoTy
;
42 /// The function declaration of the callee.
43 GlobalDecl CalleeDecl
;
46 explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
47 CGCalleeInfo(const FunctionProtoType
*calleeProtoTy
, GlobalDecl calleeDecl
)
48 : CalleeProtoTy(calleeProtoTy
), CalleeDecl(calleeDecl
) {}
49 CGCalleeInfo(const FunctionProtoType
*calleeProtoTy
)
50 : CalleeProtoTy(calleeProtoTy
) {}
51 CGCalleeInfo(GlobalDecl calleeDecl
)
52 : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl
) {}
54 const FunctionProtoType
*getCalleeFunctionProtoType() const {
57 const GlobalDecl
getCalleeDecl() const { return CalleeDecl
; }
60 /// All available information about a concrete callee.
62 enum class SpecialKind
: uintptr_t {
71 struct BuiltinInfoStorage
{
72 const FunctionDecl
*Decl
;
75 struct PseudoDestructorInfoStorage
{
76 const CXXPseudoDestructorExpr
*Expr
;
78 struct VirtualInfoStorage
{
82 llvm::FunctionType
*FTy
;
85 SpecialKind KindOrFunctionPointer
;
87 CGCalleeInfo AbstractInfo
;
88 BuiltinInfoStorage BuiltinInfo
;
89 PseudoDestructorInfoStorage PseudoDestructorInfo
;
90 VirtualInfoStorage VirtualInfo
;
93 explicit CGCallee(SpecialKind kind
) : KindOrFunctionPointer(kind
) {}
95 CGCallee(const FunctionDecl
*builtinDecl
, unsigned builtinID
)
96 : KindOrFunctionPointer(SpecialKind::Builtin
) {
97 BuiltinInfo
.Decl
= builtinDecl
;
98 BuiltinInfo
.ID
= builtinID
;
102 CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid
) {}
104 /// Construct a callee. Call this constructor directly when this
105 /// isn't a direct call.
106 CGCallee(const CGCalleeInfo
&abstractInfo
, llvm::Value
*functionPtr
)
107 : KindOrFunctionPointer(
108 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr
))) {
109 AbstractInfo
= abstractInfo
;
110 assert(functionPtr
&& "configuring callee without function pointer");
111 assert(functionPtr
->getType()->isPointerTy());
114 static CGCallee
forBuiltin(unsigned builtinID
,
115 const FunctionDecl
*builtinDecl
) {
116 CGCallee
result(SpecialKind::Builtin
);
117 result
.BuiltinInfo
.Decl
= builtinDecl
;
118 result
.BuiltinInfo
.ID
= builtinID
;
122 static CGCallee
forPseudoDestructor(const CXXPseudoDestructorExpr
*E
) {
123 CGCallee
result(SpecialKind::PseudoDestructor
);
124 result
.PseudoDestructorInfo
.Expr
= E
;
128 static CGCallee
forDirect(llvm::Constant
*functionPtr
,
129 const CGCalleeInfo
&abstractInfo
= CGCalleeInfo()) {
130 return CGCallee(abstractInfo
, functionPtr
);
133 static CGCallee
forDirect(llvm::FunctionCallee functionPtr
,
134 const CGCalleeInfo
&abstractInfo
= CGCalleeInfo()) {
135 return CGCallee(abstractInfo
, functionPtr
.getCallee());
138 static CGCallee
forVirtual(const CallExpr
*CE
, GlobalDecl MD
, Address Addr
,
139 llvm::FunctionType
*FTy
) {
140 CGCallee
result(SpecialKind::Virtual
);
141 result
.VirtualInfo
.CE
= CE
;
142 result
.VirtualInfo
.MD
= MD
;
143 result
.VirtualInfo
.Addr
= Addr
;
144 result
.VirtualInfo
.FTy
= FTy
;
148 bool isBuiltin() const {
149 return KindOrFunctionPointer
== SpecialKind::Builtin
;
151 const FunctionDecl
*getBuiltinDecl() const {
153 return BuiltinInfo
.Decl
;
155 unsigned getBuiltinID() const {
157 return BuiltinInfo
.ID
;
160 bool isPseudoDestructor() const {
161 return KindOrFunctionPointer
== SpecialKind::PseudoDestructor
;
163 const CXXPseudoDestructorExpr
*getPseudoDestructorExpr() const {
164 assert(isPseudoDestructor());
165 return PseudoDestructorInfo
.Expr
;
168 bool isOrdinary() const {
169 return uintptr_t(KindOrFunctionPointer
) > uintptr_t(SpecialKind::Last
);
171 CGCalleeInfo
getAbstractInfo() const {
173 return VirtualInfo
.MD
;
174 assert(isOrdinary());
177 llvm::Value
*getFunctionPointer() const {
178 assert(isOrdinary());
179 return reinterpret_cast<llvm::Value
*>(uintptr_t(KindOrFunctionPointer
));
181 void setFunctionPointer(llvm::Value
*functionPtr
) {
182 assert(isOrdinary());
183 KindOrFunctionPointer
=
184 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr
));
187 bool isVirtual() const {
188 return KindOrFunctionPointer
== SpecialKind::Virtual
;
190 const CallExpr
*getVirtualCallExpr() const {
192 return VirtualInfo
.CE
;
194 GlobalDecl
getVirtualMethodDecl() const {
196 return VirtualInfo
.MD
;
198 Address
getThisAddress() const {
200 return VirtualInfo
.Addr
;
202 llvm::FunctionType
*getVirtualFunctionType() const {
204 return VirtualInfo
.FTy
;
207 /// If this is a delayed callee computation of some sort, prepare
208 /// a concrete callee.
209 CGCallee
prepareConcreteCallee(CodeGenFunction
&CGF
) const;
216 LValue LV
; /// The argument is semantically a load from this l-value.
220 /// A data-flow flag to make sure getRValue and/or copyInto are not
221 /// called twice for duplicated IR emission.
226 CallArg(RValue rv
, QualType ty
)
227 : RV(rv
), HasLV(false), IsUsed(false), Ty(ty
) {}
228 CallArg(LValue lv
, QualType ty
)
229 : LV(lv
), HasLV(true), IsUsed(false), Ty(ty
) {}
230 bool hasLValue() const { return HasLV
; }
231 QualType
getType() const { return Ty
; }
233 /// \returns an independent RValue. If the CallArg contains an LValue,
234 /// a temporary copy is returned.
235 RValue
getRValue(CodeGenFunction
&CGF
) const;
237 LValue
getKnownLValue() const {
238 assert(HasLV
&& !IsUsed
);
241 RValue
getKnownRValue() const {
242 assert(!HasLV
&& !IsUsed
);
245 void setRValue(RValue _RV
) {
250 bool isAggregate() const { return HasLV
|| RV
.isAggregate(); }
252 void copyInto(CodeGenFunction
&CGF
, Address A
) const;
255 /// CallArgList - Type for representing both the value and type of
256 /// arguments in a call.
257 class CallArgList
: public SmallVector
<CallArg
, 8> {
259 CallArgList() = default;
262 /// The original argument. Note that the argument l-value
263 /// is potentially null.
266 /// The temporary alloca.
269 /// A value to "use" after the writeback, or null.
273 struct CallArgCleanup
{
274 EHScopeStack::stable_iterator Cleanup
;
276 /// The "is active" insertion point. This instruction is temporary and
277 /// will be removed after insertion.
278 llvm::Instruction
*IsActiveIP
;
281 void add(RValue rvalue
, QualType type
) { push_back(CallArg(rvalue
, type
)); }
283 void addUncopiedAggregate(LValue LV
, QualType type
) {
284 push_back(CallArg(LV
, type
));
287 /// Add all the arguments from another CallArgList to this one. After doing
288 /// this, the old CallArgList retains its list of arguments, but must not
289 /// be used to emit a call.
290 void addFrom(const CallArgList
&other
) {
291 insert(end(), other
.begin(), other
.end());
292 Writebacks
.insert(Writebacks
.end(), other
.Writebacks
.begin(),
293 other
.Writebacks
.end());
294 CleanupsToDeactivate
.insert(CleanupsToDeactivate
.end(),
295 other
.CleanupsToDeactivate
.begin(),
296 other
.CleanupsToDeactivate
.end());
297 assert(!(StackBase
&& other
.StackBase
) && "can't merge stackbases");
299 StackBase
= other
.StackBase
;
302 void addWriteback(LValue srcLV
, Address temporary
, llvm::Value
*toUse
) {
303 Writeback writeback
= {srcLV
, temporary
, toUse
};
304 Writebacks
.push_back(writeback
);
307 bool hasWritebacks() const { return !Writebacks
.empty(); }
309 typedef llvm::iterator_range
<SmallVectorImpl
<Writeback
>::const_iterator
>
310 writeback_const_range
;
312 writeback_const_range
writebacks() const {
313 return writeback_const_range(Writebacks
.begin(), Writebacks
.end());
316 void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup
,
317 llvm::Instruction
*IsActiveIP
) {
318 CallArgCleanup ArgCleanup
;
319 ArgCleanup
.Cleanup
= Cleanup
;
320 ArgCleanup
.IsActiveIP
= IsActiveIP
;
321 CleanupsToDeactivate
.push_back(ArgCleanup
);
324 ArrayRef
<CallArgCleanup
> getCleanupsToDeactivate() const {
325 return CleanupsToDeactivate
;
328 void allocateArgumentMemory(CodeGenFunction
&CGF
);
329 llvm::Instruction
*getStackBase() const { return StackBase
; }
330 void freeArgumentMemory(CodeGenFunction
&CGF
) const;
332 /// Returns if we're using an inalloca struct to pass arguments in
334 bool isUsingInAlloca() const { return StackBase
; }
337 SmallVector
<Writeback
, 1> Writebacks
;
339 /// Deactivate these cleanups immediately before making the call. This
340 /// is used to cleanup objects that are owned by the callee once the call
342 SmallVector
<CallArgCleanup
, 1> CleanupsToDeactivate
;
344 /// The stacksave call. It dominates all of the argument evaluation.
345 llvm::CallInst
*StackBase
= nullptr;
348 /// FunctionArgList - Type for representing both the decl and type
349 /// of parameters to a function. The decl must be either a
350 /// ParmVarDecl or ImplicitParamDecl.
351 class FunctionArgList
: public SmallVector
<const VarDecl
*, 16> {};
353 /// ReturnValueSlot - Contains the address where the return value of a
354 /// function can be stored, and whether the address is volatile or not.
355 class ReturnValueSlot
{
356 Address Addr
= Address::invalid();
358 // Return value slot flags
359 unsigned IsVolatile
: 1;
360 unsigned IsUnused
: 1;
361 unsigned IsExternallyDestructed
: 1;
365 : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {}
366 ReturnValueSlot(Address Addr
, bool IsVolatile
, bool IsUnused
= false,
367 bool IsExternallyDestructed
= false)
368 : Addr(Addr
), IsVolatile(IsVolatile
), IsUnused(IsUnused
),
369 IsExternallyDestructed(IsExternallyDestructed
) {}
371 bool isNull() const { return !Addr
.isValid(); }
372 bool isVolatile() const { return IsVolatile
; }
373 Address
getValue() const { return Addr
; }
374 bool isUnused() const { return IsUnused
; }
375 bool isExternallyDestructed() const { return IsExternallyDestructed
; }
378 /// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
379 /// though we had emitted it ourselves. We remove any attributes on F that
380 /// conflict with the attributes we add here.
382 /// This is useful for adding attrs to bitcode modules that you want to link
383 /// with but don't control, such as CUDA's libdevice. When linking with such
384 /// a bitcode library, you might want to set e.g. its functions'
385 /// "unsafe-fp-math" attribute to match the attr of the functions you're
386 /// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of
387 /// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM
388 /// will propagate unsafe-fp-math=false up to every transitive caller of a
389 /// function in the bitcode library!
391 /// With the exception of fast-math attrs, this will only make the attributes
392 /// on the function more conservative. But it's unsafe to call this on a
393 /// function which relies on particular fast-math attributes for correctness.
394 /// It's up to you to ensure that this is safe.
395 void mergeDefaultFunctionDefinitionAttributes(llvm::Function
&F
,
396 const CodeGenOptions
&CodeGenOpts
,
397 const LangOptions
&LangOpts
,
398 const TargetOptions
&TargetOpts
,
399 bool WillInternalize
);
401 enum class FnInfoOpts
{
403 IsInstanceMethod
= 1 << 0,
404 IsChainCall
= 1 << 1,
405 IsDelegateCall
= 1 << 2,
408 inline FnInfoOpts
operator|(FnInfoOpts A
, FnInfoOpts B
) {
409 return static_cast<FnInfoOpts
>(
410 static_cast<std::underlying_type_t
<FnInfoOpts
>>(A
) |
411 static_cast<std::underlying_type_t
<FnInfoOpts
>>(B
));
414 inline FnInfoOpts
operator&(FnInfoOpts A
, FnInfoOpts B
) {
415 return static_cast<FnInfoOpts
>(
416 static_cast<std::underlying_type_t
<FnInfoOpts
>>(A
) &
417 static_cast<std::underlying_type_t
<FnInfoOpts
>>(B
));
420 inline FnInfoOpts
operator|=(FnInfoOpts A
, FnInfoOpts B
) {
425 inline FnInfoOpts
operator&=(FnInfoOpts A
, FnInfoOpts B
) {
430 } // end namespace CodeGen
431 } // end namespace clang