[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / clang / lib / CodeGen / CGCall.h
blobaee86a3242fd3f4468740464fc13f09be9e0d06b
1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
15 #define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
17 #include "CGValue.h"
18 #include "EHScopeStack.h"
19 #include "clang/AST/ASTFwd.h"
20 #include "clang/AST/CanonicalType.h"
21 #include "clang/AST/GlobalDecl.h"
22 #include "clang/AST/Type.h"
23 #include "llvm/IR/Value.h"
25 namespace llvm {
26 class Type;
27 class Value;
28 } // namespace llvm
30 namespace clang {
31 class Decl;
32 class FunctionDecl;
33 class TargetOptions;
34 class VarDecl;
36 namespace CodeGen {
38 /// Abstract information about a function or function prototype.
39 class CGCalleeInfo {
40 /// The function prototype of the callee.
41 const FunctionProtoType *CalleeProtoTy;
42 /// The function declaration of the callee.
43 GlobalDecl CalleeDecl;
45 public:
46 explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
47 CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
48 : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
49 CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
50 : CalleeProtoTy(calleeProtoTy) {}
51 CGCalleeInfo(GlobalDecl calleeDecl)
52 : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
54 const FunctionProtoType *getCalleeFunctionProtoType() const {
55 return CalleeProtoTy;
57 const GlobalDecl getCalleeDecl() const { return CalleeDecl; }
60 /// All available information about a concrete callee.
61 class CGCallee {
62 enum class SpecialKind : uintptr_t {
63 Invalid,
64 Builtin,
65 PseudoDestructor,
66 Virtual,
68 Last = Virtual
71 struct BuiltinInfoStorage {
72 const FunctionDecl *Decl;
73 unsigned ID;
75 struct PseudoDestructorInfoStorage {
76 const CXXPseudoDestructorExpr *Expr;
78 struct VirtualInfoStorage {
79 const CallExpr *CE;
80 GlobalDecl MD;
81 Address Addr;
82 llvm::FunctionType *FTy;
85 SpecialKind KindOrFunctionPointer;
86 union {
87 CGCalleeInfo AbstractInfo;
88 BuiltinInfoStorage BuiltinInfo;
89 PseudoDestructorInfoStorage PseudoDestructorInfo;
90 VirtualInfoStorage VirtualInfo;
93 explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
95 CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID)
96 : KindOrFunctionPointer(SpecialKind::Builtin) {
97 BuiltinInfo.Decl = builtinDecl;
98 BuiltinInfo.ID = builtinID;
101 public:
102 CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {}
104 /// Construct a callee. Call this constructor directly when this
105 /// isn't a direct call.
106 CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
107 : KindOrFunctionPointer(
108 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr))) {
109 AbstractInfo = abstractInfo;
110 assert(functionPtr && "configuring callee without function pointer");
111 assert(functionPtr->getType()->isPointerTy());
114 static CGCallee forBuiltin(unsigned builtinID,
115 const FunctionDecl *builtinDecl) {
116 CGCallee result(SpecialKind::Builtin);
117 result.BuiltinInfo.Decl = builtinDecl;
118 result.BuiltinInfo.ID = builtinID;
119 return result;
122 static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) {
123 CGCallee result(SpecialKind::PseudoDestructor);
124 result.PseudoDestructorInfo.Expr = E;
125 return result;
128 static CGCallee forDirect(llvm::Constant *functionPtr,
129 const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
130 return CGCallee(abstractInfo, functionPtr);
133 static CGCallee forDirect(llvm::FunctionCallee functionPtr,
134 const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
135 return CGCallee(abstractInfo, functionPtr.getCallee());
138 static CGCallee forVirtual(const CallExpr *CE, GlobalDecl MD, Address Addr,
139 llvm::FunctionType *FTy) {
140 CGCallee result(SpecialKind::Virtual);
141 result.VirtualInfo.CE = CE;
142 result.VirtualInfo.MD = MD;
143 result.VirtualInfo.Addr = Addr;
144 result.VirtualInfo.FTy = FTy;
145 return result;
148 bool isBuiltin() const {
149 return KindOrFunctionPointer == SpecialKind::Builtin;
151 const FunctionDecl *getBuiltinDecl() const {
152 assert(isBuiltin());
153 return BuiltinInfo.Decl;
155 unsigned getBuiltinID() const {
156 assert(isBuiltin());
157 return BuiltinInfo.ID;
160 bool isPseudoDestructor() const {
161 return KindOrFunctionPointer == SpecialKind::PseudoDestructor;
163 const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
164 assert(isPseudoDestructor());
165 return PseudoDestructorInfo.Expr;
168 bool isOrdinary() const {
169 return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
171 CGCalleeInfo getAbstractInfo() const {
172 if (isVirtual())
173 return VirtualInfo.MD;
174 assert(isOrdinary());
175 return AbstractInfo;
177 llvm::Value *getFunctionPointer() const {
178 assert(isOrdinary());
179 return reinterpret_cast<llvm::Value *>(uintptr_t(KindOrFunctionPointer));
181 void setFunctionPointer(llvm::Value *functionPtr) {
182 assert(isOrdinary());
183 KindOrFunctionPointer =
184 SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
187 bool isVirtual() const {
188 return KindOrFunctionPointer == SpecialKind::Virtual;
190 const CallExpr *getVirtualCallExpr() const {
191 assert(isVirtual());
192 return VirtualInfo.CE;
194 GlobalDecl getVirtualMethodDecl() const {
195 assert(isVirtual());
196 return VirtualInfo.MD;
198 Address getThisAddress() const {
199 assert(isVirtual());
200 return VirtualInfo.Addr;
202 llvm::FunctionType *getVirtualFunctionType() const {
203 assert(isVirtual());
204 return VirtualInfo.FTy;
207 /// If this is a delayed callee computation of some sort, prepare
208 /// a concrete callee.
209 CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const;
212 struct CallArg {
213 private:
214 union {
215 RValue RV;
216 LValue LV; /// The argument is semantically a load from this l-value.
218 bool HasLV;
220 /// A data-flow flag to make sure getRValue and/or copyInto are not
221 /// called twice for duplicated IR emission.
222 mutable bool IsUsed;
224 public:
225 QualType Ty;
226 CallArg(RValue rv, QualType ty)
227 : RV(rv), HasLV(false), IsUsed(false), Ty(ty) {}
228 CallArg(LValue lv, QualType ty)
229 : LV(lv), HasLV(true), IsUsed(false), Ty(ty) {}
230 bool hasLValue() const { return HasLV; }
231 QualType getType() const { return Ty; }
233 /// \returns an independent RValue. If the CallArg contains an LValue,
234 /// a temporary copy is returned.
235 RValue getRValue(CodeGenFunction &CGF) const;
237 LValue getKnownLValue() const {
238 assert(HasLV && !IsUsed);
239 return LV;
241 RValue getKnownRValue() const {
242 assert(!HasLV && !IsUsed);
243 return RV;
245 void setRValue(RValue _RV) {
246 assert(!HasLV);
247 RV = _RV;
250 bool isAggregate() const { return HasLV || RV.isAggregate(); }
252 void copyInto(CodeGenFunction &CGF, Address A) const;
255 /// CallArgList - Type for representing both the value and type of
256 /// arguments in a call.
257 class CallArgList : public SmallVector<CallArg, 8> {
258 public:
259 CallArgList() = default;
261 struct Writeback {
262 /// The original argument. Note that the argument l-value
263 /// is potentially null.
264 LValue Source;
266 /// The temporary alloca.
267 Address Temporary;
269 /// A value to "use" after the writeback, or null.
270 llvm::Value *ToUse;
273 struct CallArgCleanup {
274 EHScopeStack::stable_iterator Cleanup;
276 /// The "is active" insertion point. This instruction is temporary and
277 /// will be removed after insertion.
278 llvm::Instruction *IsActiveIP;
281 void add(RValue rvalue, QualType type) { push_back(CallArg(rvalue, type)); }
283 void addUncopiedAggregate(LValue LV, QualType type) {
284 push_back(CallArg(LV, type));
287 /// Add all the arguments from another CallArgList to this one. After doing
288 /// this, the old CallArgList retains its list of arguments, but must not
289 /// be used to emit a call.
290 void addFrom(const CallArgList &other) {
291 insert(end(), other.begin(), other.end());
292 Writebacks.insert(Writebacks.end(), other.Writebacks.begin(),
293 other.Writebacks.end());
294 CleanupsToDeactivate.insert(CleanupsToDeactivate.end(),
295 other.CleanupsToDeactivate.begin(),
296 other.CleanupsToDeactivate.end());
297 assert(!(StackBase && other.StackBase) && "can't merge stackbases");
298 if (!StackBase)
299 StackBase = other.StackBase;
302 void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse) {
303 Writeback writeback = {srcLV, temporary, toUse};
304 Writebacks.push_back(writeback);
307 bool hasWritebacks() const { return !Writebacks.empty(); }
309 typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator>
310 writeback_const_range;
312 writeback_const_range writebacks() const {
313 return writeback_const_range(Writebacks.begin(), Writebacks.end());
316 void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
317 llvm::Instruction *IsActiveIP) {
318 CallArgCleanup ArgCleanup;
319 ArgCleanup.Cleanup = Cleanup;
320 ArgCleanup.IsActiveIP = IsActiveIP;
321 CleanupsToDeactivate.push_back(ArgCleanup);
324 ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const {
325 return CleanupsToDeactivate;
328 void allocateArgumentMemory(CodeGenFunction &CGF);
329 llvm::Instruction *getStackBase() const { return StackBase; }
330 void freeArgumentMemory(CodeGenFunction &CGF) const;
332 /// Returns if we're using an inalloca struct to pass arguments in
333 /// memory.
334 bool isUsingInAlloca() const { return StackBase; }
336 private:
337 SmallVector<Writeback, 1> Writebacks;
339 /// Deactivate these cleanups immediately before making the call. This
340 /// is used to cleanup objects that are owned by the callee once the call
341 /// occurs.
342 SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
344 /// The stacksave call. It dominates all of the argument evaluation.
345 llvm::CallInst *StackBase = nullptr;
348 /// FunctionArgList - Type for representing both the decl and type
349 /// of parameters to a function. The decl must be either a
350 /// ParmVarDecl or ImplicitParamDecl.
351 class FunctionArgList : public SmallVector<const VarDecl *, 16> {};
353 /// ReturnValueSlot - Contains the address where the return value of a
354 /// function can be stored, and whether the address is volatile or not.
355 class ReturnValueSlot {
356 Address Addr = Address::invalid();
358 // Return value slot flags
359 unsigned IsVolatile : 1;
360 unsigned IsUnused : 1;
361 unsigned IsExternallyDestructed : 1;
363 public:
364 ReturnValueSlot()
365 : IsVolatile(false), IsUnused(false), IsExternallyDestructed(false) {}
366 ReturnValueSlot(Address Addr, bool IsVolatile, bool IsUnused = false,
367 bool IsExternallyDestructed = false)
368 : Addr(Addr), IsVolatile(IsVolatile), IsUnused(IsUnused),
369 IsExternallyDestructed(IsExternallyDestructed) {}
371 bool isNull() const { return !Addr.isValid(); }
372 bool isVolatile() const { return IsVolatile; }
373 Address getValue() const { return Addr; }
374 bool isUnused() const { return IsUnused; }
375 bool isExternallyDestructed() const { return IsExternallyDestructed; }
378 /// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
379 /// though we had emitted it ourselves. We remove any attributes on F that
380 /// conflict with the attributes we add here.
382 /// This is useful for adding attrs to bitcode modules that you want to link
383 /// with but don't control, such as CUDA's libdevice. When linking with such
384 /// a bitcode library, you might want to set e.g. its functions'
385 /// "unsafe-fp-math" attribute to match the attr of the functions you're
386 /// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of
387 /// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM
388 /// will propagate unsafe-fp-math=false up to every transitive caller of a
389 /// function in the bitcode library!
391 /// With the exception of fast-math attrs, this will only make the attributes
392 /// on the function more conservative. But it's unsafe to call this on a
393 /// function which relies on particular fast-math attributes for correctness.
394 /// It's up to you to ensure that this is safe.
395 void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
396 const CodeGenOptions &CodeGenOpts,
397 const LangOptions &LangOpts,
398 const TargetOptions &TargetOpts,
399 bool WillInternalize);
401 enum class FnInfoOpts {
402 None = 0,
403 IsInstanceMethod = 1 << 0,
404 IsChainCall = 1 << 1,
405 IsDelegateCall = 1 << 2,
408 inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) {
409 return static_cast<FnInfoOpts>(
410 static_cast<std::underlying_type_t<FnInfoOpts>>(A) |
411 static_cast<std::underlying_type_t<FnInfoOpts>>(B));
414 inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) {
415 return static_cast<FnInfoOpts>(
416 static_cast<std::underlying_type_t<FnInfoOpts>>(A) &
417 static_cast<std::underlying_type_t<FnInfoOpts>>(B));
420 inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) {
421 A = A | B;
422 return A;
425 inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) {
426 A = A & B;
427 return A;
430 } // end namespace CodeGen
431 } // end namespace clang
433 #endif