[WebAssembly] Add new target feature in support of 'extended-const' proposal
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64MachineFunctionInfo.h
blob1248d15a3bd5627b28d9f78ccad1d9cdb0185c19
1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AArch64-specific per-machine-function information.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MIRYamlMapping.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/MC/MCLinkerOptimizationHint.h"
25 #include <cassert>
27 namespace llvm {
29 namespace yaml {
30 struct AArch64FunctionInfo;
31 } // end namespace yaml
33 class MachineInstr;
35 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
36 /// contains private AArch64-specific information for each MachineFunction.
37 class AArch64FunctionInfo final : public MachineFunctionInfo {
38 /// Backreference to the machine function.
39 MachineFunction &MF;
41 /// Number of bytes of arguments this function has on the stack. If the callee
42 /// is expected to restore the argument stack this should be a multiple of 16,
43 /// all usable during a tail call.
44 ///
45 /// The alternative would forbid tail call optimisation in some cases: if we
46 /// want to transfer control from a function with 8-bytes of stack-argument
47 /// space to a function with 16-bytes then misalignment of this value would
48 /// make a stack adjustment necessary, which could not be undone by the
49 /// callee.
50 unsigned BytesInStackArgArea = 0;
52 /// The number of bytes to restore to deallocate space for incoming
53 /// arguments. Canonically 0 in the C calling convention, but non-zero when
54 /// callee is expected to pop the args.
55 unsigned ArgumentStackToRestore = 0;
57 /// Space just below incoming stack pointer reserved for arguments being
58 /// passed on the stack during a tail call. This will be the difference
59 /// between the largest tail call argument space needed in this function and
60 /// what's already available by reusing space of incoming arguments.
61 unsigned TailCallReservedStack = 0;
63 /// HasStackFrame - True if this function has a stack frame. Set by
64 /// determineCalleeSaves().
65 bool HasStackFrame = false;
67 /// Amount of stack frame size, not including callee-saved registers.
68 uint64_t LocalStackSize = 0;
70 /// The start and end frame indices for the SVE callee saves.
71 int MinSVECSFrameIndex = 0;
72 int MaxSVECSFrameIndex = 0;
74 /// Amount of stack frame size used for saving callee-saved registers.
75 unsigned CalleeSavedStackSize = 0;
76 unsigned SVECalleeSavedStackSize = 0;
77 bool HasCalleeSavedStackSize = false;
79 /// Number of TLS accesses using the special (combinable)
80 /// _TLS_MODULE_BASE_ symbol.
81 unsigned NumLocalDynamicTLSAccesses = 0;
83 /// FrameIndex for start of varargs area for arguments passed on the
84 /// stack.
85 int VarArgsStackIndex = 0;
87 /// FrameIndex for start of varargs area for arguments passed in
88 /// general purpose registers.
89 int VarArgsGPRIndex = 0;
91 /// Size of the varargs area for arguments passed in general purpose
92 /// registers.
93 unsigned VarArgsGPRSize = 0;
95 /// FrameIndex for start of varargs area for arguments passed in
96 /// floating-point registers.
97 int VarArgsFPRIndex = 0;
99 /// Size of the varargs area for arguments passed in floating-point
100 /// registers.
101 unsigned VarArgsFPRSize = 0;
103 /// True if this function has a subset of CSRs that is handled explicitly via
104 /// copies.
105 bool IsSplitCSR = false;
107 /// True when the stack gets realigned dynamically because the size of stack
108 /// frame is unknown at compile time. e.g., in case of VLAs.
109 bool StackRealigned = false;
111 /// True when the callee-save stack area has unused gaps that may be used for
112 /// other stack allocations.
113 bool CalleeSaveStackHasFreeSpace = false;
115 /// SRetReturnReg - sret lowering includes returning the value of the
116 /// returned struct in a register. This field holds the virtual register into
117 /// which the sret argument is passed.
118 unsigned SRetReturnReg = 0;
119 /// SVE stack size (for predicates and data vectors) are maintained here
120 /// rather than in FrameInfo, as the placement and Stack IDs are target
121 /// specific.
122 uint64_t StackSizeSVE = 0;
124 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
125 bool HasCalculatedStackSizeSVE = false;
127 /// Has a value when it is known whether or not the function uses a
128 /// redzone, and no value otherwise.
129 /// Initialized during frame lowering, unless the function has the noredzone
130 /// attribute, in which case it is set to false at construction.
131 Optional<bool> HasRedZone;
133 /// ForwardedMustTailRegParms - A list of virtual and physical registers
134 /// that must be forwarded to every musttail call.
135 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
137 /// FrameIndex for the tagged base pointer.
138 Optional<int> TaggedBasePointerIndex;
140 /// Offset from SP-at-entry to the tagged base pointer.
141 /// Tagged base pointer is set up to point to the first (lowest address)
142 /// tagged stack slot.
143 unsigned TaggedBasePointerOffset;
145 /// OutliningStyle denotes, if a function was outined, how it was outlined,
146 /// e.g. Tail Call, Thunk, or Function if none apply.
147 Optional<std::string> OutliningStyle;
149 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
150 // CalleeSavedStackSize) to the address of the frame record.
151 int CalleeSaveBaseToFrameRecordOffset = 0;
153 /// SignReturnAddress is true if PAC-RET is enabled for the function with
154 /// defaults being sign non-leaf functions only, with the B key.
155 bool SignReturnAddress = false;
157 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
158 /// functions as well.
159 bool SignReturnAddressAll = false;
161 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
162 bool SignWithBKey = false;
164 /// BranchTargetEnforcement enables placing BTI instructions at potential
165 /// indirect branch destinations.
166 bool BranchTargetEnforcement = false;
168 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
169 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
170 /// extended record.
171 bool HasSwiftAsyncContext = false;
173 /// The stack slot where the Swift asynchronous context is stored.
174 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
176 /// True if the function need unwind information.
177 mutable Optional<bool> NeedsDwarfUnwindInfo;
179 /// True if the function need asynchronous unwind information.
180 mutable Optional<bool> NeedsDwarfAsyncUnwindInfo;
182 public:
183 explicit AArch64FunctionInfo(MachineFunction &MF);
185 void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
187 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
188 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
190 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
191 void setArgumentStackToRestore(unsigned bytes) {
192 ArgumentStackToRestore = bytes;
195 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
196 void setTailCallReservedStack(unsigned bytes) {
197 TailCallReservedStack = bytes;
200 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
202 void setStackSizeSVE(uint64_t S) {
203 HasCalculatedStackSizeSVE = true;
204 StackSizeSVE = S;
207 uint64_t getStackSizeSVE() const { return StackSizeSVE; }
209 bool hasStackFrame() const { return HasStackFrame; }
210 void setHasStackFrame(bool s) { HasStackFrame = s; }
212 bool isStackRealigned() const { return StackRealigned; }
213 void setStackRealigned(bool s) { StackRealigned = s; }
215 bool hasCalleeSaveStackFreeSpace() const {
216 return CalleeSaveStackHasFreeSpace;
218 void setCalleeSaveStackHasFreeSpace(bool s) {
219 CalleeSaveStackHasFreeSpace = s;
221 bool isSplitCSR() const { return IsSplitCSR; }
222 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
224 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
225 uint64_t getLocalStackSize() const { return LocalStackSize; }
227 void setOutliningStyle(std::string Style) { OutliningStyle = Style; }
228 Optional<std::string> getOutliningStyle() const { return OutliningStyle; }
230 void setCalleeSavedStackSize(unsigned Size) {
231 CalleeSavedStackSize = Size;
232 HasCalleeSavedStackSize = true;
235 // When CalleeSavedStackSize has not been set (for example when
236 // some MachineIR pass is run in isolation), then recalculate
237 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
238 // Note: This information can only be recalculated after PEI
239 // has assigned offsets to the callee save objects.
240 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
241 bool ValidateCalleeSavedStackSize = false;
243 #ifndef NDEBUG
244 // Make sure the calculated size derived from the CalleeSavedInfo
245 // equals the cached size that was calculated elsewhere (e.g. in
246 // determineCalleeSaves).
247 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
248 #endif
250 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
251 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
252 if (MFI.getCalleeSavedInfo().empty())
253 return 0;
255 int64_t MinOffset = std::numeric_limits<int64_t>::max();
256 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
257 for (const auto &Info : MFI.getCalleeSavedInfo()) {
258 int FrameIdx = Info.getFrameIdx();
259 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
260 continue;
261 int64_t Offset = MFI.getObjectOffset(FrameIdx);
262 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
263 MinOffset = std::min<int64_t>(Offset, MinOffset);
264 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
267 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
268 int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx());
269 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
270 MinOffset = std::min<int64_t>(Offset, MinOffset);
271 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
274 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
275 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
276 "Invalid size calculated for callee saves");
277 return Size;
280 return getCalleeSavedStackSize();
283 unsigned getCalleeSavedStackSize() const {
284 assert(HasCalleeSavedStackSize &&
285 "CalleeSavedStackSize has not been calculated");
286 return CalleeSavedStackSize;
289 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
290 void setSVECalleeSavedStackSize(unsigned Size) {
291 SVECalleeSavedStackSize = Size;
293 unsigned getSVECalleeSavedStackSize() const {
294 return SVECalleeSavedStackSize;
297 void setMinMaxSVECSFrameIndex(int Min, int Max) {
298 MinSVECSFrameIndex = Min;
299 MaxSVECSFrameIndex = Max;
302 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
303 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
305 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
306 unsigned getNumLocalDynamicTLSAccesses() const {
307 return NumLocalDynamicTLSAccesses;
310 Optional<bool> hasRedZone() const { return HasRedZone; }
311 void setHasRedZone(bool s) { HasRedZone = s; }
313 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
314 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
316 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
317 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
319 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
320 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
322 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
323 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
325 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
326 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
328 unsigned getSRetReturnReg() const { return SRetReturnReg; }
329 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
331 unsigned getJumpTableEntrySize(int Idx) const {
332 return JumpTableEntryInfo[Idx].first;
334 MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
335 return JumpTableEntryInfo[Idx].second;
337 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
338 if ((unsigned)Idx >= JumpTableEntryInfo.size())
339 JumpTableEntryInfo.resize(Idx+1);
340 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
343 using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
345 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
347 // Shortcuts for LOH related types.
348 class MILOHDirective {
349 MCLOHType Kind;
351 /// Arguments of this directive. Order matters.
352 SmallVector<const MachineInstr *, 3> Args;
354 public:
355 using LOHArgs = ArrayRef<const MachineInstr *>;
357 MILOHDirective(MCLOHType Kind, LOHArgs Args)
358 : Kind(Kind), Args(Args.begin(), Args.end()) {
359 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
362 MCLOHType getKind() const { return Kind; }
363 LOHArgs getArgs() const { return Args; }
366 using MILOHArgs = MILOHDirective::LOHArgs;
367 using MILOHContainer = SmallVector<MILOHDirective, 32>;
369 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
371 /// Add a LOH directive of this @p Kind and this @p Args.
372 void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
373 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
374 LOHRelated.insert(Args.begin(), Args.end());
377 SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
378 return ForwardedMustTailRegParms;
381 Optional<int> getTaggedBasePointerIndex() const {
382 return TaggedBasePointerIndex;
384 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
386 unsigned getTaggedBasePointerOffset() const {
387 return TaggedBasePointerOffset;
389 void setTaggedBasePointerOffset(unsigned Offset) {
390 TaggedBasePointerOffset = Offset;
393 int getCalleeSaveBaseToFrameRecordOffset() const {
394 return CalleeSaveBaseToFrameRecordOffset;
396 void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
397 CalleeSaveBaseToFrameRecordOffset = Offset;
400 bool shouldSignReturnAddress() const;
401 bool shouldSignReturnAddress(bool SpillsLR) const;
403 bool shouldSignWithBKey() const { return SignWithBKey; }
405 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
407 void setHasSwiftAsyncContext(bool HasContext) {
408 HasSwiftAsyncContext = HasContext;
410 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
412 void setSwiftAsyncContextFrameIdx(int FI) {
413 SwiftAsyncContextFrameIdx = FI;
415 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
417 bool needsDwarfUnwindInfo() const;
418 bool needsAsyncDwarfUnwindInfo() const;
420 private:
421 // Hold the lists of LOHs.
422 MILOHContainer LOHContainerSet;
423 SetOfInstructions LOHRelated;
425 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
428 namespace yaml {
429 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
430 Optional<bool> HasRedZone;
432 AArch64FunctionInfo() = default;
433 AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
435 void mappingImpl(yaml::IO &YamlIO) override;
436 ~AArch64FunctionInfo() = default;
439 template <> struct MappingTraits<AArch64FunctionInfo> {
440 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
441 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
445 } // end namespace yaml
447 } // end namespace llvm
449 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H