[Utils] Identity map module-level debug info on first use in CloneFunction* (#118627)
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64MachineFunctionInfo.h
blob427d86ee1bb8ebf4c032464688cef9a3f5b06bb2
1 //=- AArch64MachineFunctionInfo.h - AArch64 machine function info -*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AArch64-specific per-machine-function information.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H
16 #include "AArch64Subtarget.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MIRYamlMapping.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/MC/MCLinkerOptimizationHint.h"
26 #include "llvm/MC/MCSymbol.h"
27 #include <cassert>
28 #include <optional>
30 namespace llvm {
32 namespace yaml {
33 struct AArch64FunctionInfo;
34 } // end namespace yaml
36 class AArch64Subtarget;
37 class MachineInstr;
39 struct TPIDR2Object {
40 int FrameIndex = std::numeric_limits<int>::max();
41 unsigned Uses = 0;
44 /// AArch64FunctionInfo - This class is derived from MachineFunctionInfo and
45 /// contains private AArch64-specific information for each MachineFunction.
46 class AArch64FunctionInfo final : public MachineFunctionInfo {
47 /// Number of bytes of arguments this function has on the stack. If the callee
48 /// is expected to restore the argument stack this should be a multiple of 16,
49 /// all usable during a tail call.
50 ///
51 /// The alternative would forbid tail call optimisation in some cases: if we
52 /// want to transfer control from a function with 8-bytes of stack-argument
53 /// space to a function with 16-bytes then misalignment of this value would
54 /// make a stack adjustment necessary, which could not be undone by the
55 /// callee.
56 unsigned BytesInStackArgArea = 0;
58 /// The number of bytes to restore to deallocate space for incoming
59 /// arguments. Canonically 0 in the C calling convention, but non-zero when
60 /// callee is expected to pop the args.
61 unsigned ArgumentStackToRestore = 0;
63 /// Space just below incoming stack pointer reserved for arguments being
64 /// passed on the stack during a tail call. This will be the difference
65 /// between the largest tail call argument space needed in this function and
66 /// what's already available by reusing space of incoming arguments.
67 unsigned TailCallReservedStack = 0;
69 /// HasStackFrame - True if this function has a stack frame. Set by
70 /// determineCalleeSaves().
71 bool HasStackFrame = false;
73 /// Amount of stack frame size, not including callee-saved registers.
74 uint64_t LocalStackSize = 0;
76 /// The start and end frame indices for the SVE callee saves.
77 int MinSVECSFrameIndex = 0;
78 int MaxSVECSFrameIndex = 0;
80 /// Amount of stack frame size used for saving callee-saved registers.
81 unsigned CalleeSavedStackSize = 0;
82 unsigned SVECalleeSavedStackSize = 0;
83 bool HasCalleeSavedStackSize = false;
85 /// Number of TLS accesses using the special (combinable)
86 /// _TLS_MODULE_BASE_ symbol.
87 unsigned NumLocalDynamicTLSAccesses = 0;
89 /// FrameIndex for start of varargs area for arguments passed on the
90 /// stack.
91 int VarArgsStackIndex = 0;
93 /// Offset of start of varargs area for arguments passed on the stack.
94 unsigned VarArgsStackOffset = 0;
96 /// FrameIndex for start of varargs area for arguments passed in
97 /// general purpose registers.
98 int VarArgsGPRIndex = 0;
100 /// Size of the varargs area for arguments passed in general purpose
101 /// registers.
102 unsigned VarArgsGPRSize = 0;
104 /// FrameIndex for start of varargs area for arguments passed in
105 /// floating-point registers.
106 int VarArgsFPRIndex = 0;
108 /// Size of the varargs area for arguments passed in floating-point
109 /// registers.
110 unsigned VarArgsFPRSize = 0;
112 /// The stack slots used to add space between FPR and GPR accesses when using
113 /// hazard padding. StackHazardCSRSlotIndex is added between GPR and FPR CSRs.
114 /// StackHazardSlotIndex is added between (sorted) stack objects.
115 int StackHazardSlotIndex = std::numeric_limits<int>::max();
116 int StackHazardCSRSlotIndex = std::numeric_limits<int>::max();
118 /// True if this function has a subset of CSRs that is handled explicitly via
119 /// copies.
120 bool IsSplitCSR = false;
122 /// True when the stack gets realigned dynamically because the size of stack
123 /// frame is unknown at compile time. e.g., in case of VLAs.
124 bool StackRealigned = false;
126 /// True when the callee-save stack area has unused gaps that may be used for
127 /// other stack allocations.
128 bool CalleeSaveStackHasFreeSpace = false;
130 /// SRetReturnReg - sret lowering includes returning the value of the
131 /// returned struct in a register. This field holds the virtual register into
132 /// which the sret argument is passed.
133 Register SRetReturnReg;
135 /// SVE stack size (for predicates and data vectors) are maintained here
136 /// rather than in FrameInfo, as the placement and Stack IDs are target
137 /// specific.
138 uint64_t StackSizeSVE = 0;
140 /// HasCalculatedStackSizeSVE indicates whether StackSizeSVE is valid.
141 bool HasCalculatedStackSizeSVE = false;
143 /// Has a value when it is known whether or not the function uses a
144 /// redzone, and no value otherwise.
145 /// Initialized during frame lowering, unless the function has the noredzone
146 /// attribute, in which case it is set to false at construction.
147 std::optional<bool> HasRedZone;
149 /// ForwardedMustTailRegParms - A list of virtual and physical registers
150 /// that must be forwarded to every musttail call.
151 SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
153 /// FrameIndex for the tagged base pointer.
154 std::optional<int> TaggedBasePointerIndex;
156 /// Offset from SP-at-entry to the tagged base pointer.
157 /// Tagged base pointer is set up to point to the first (lowest address)
158 /// tagged stack slot.
159 unsigned TaggedBasePointerOffset;
161 /// OutliningStyle denotes, if a function was outined, how it was outlined,
162 /// e.g. Tail Call, Thunk, or Function if none apply.
163 std::optional<std::string> OutliningStyle;
165 // Offset from SP-after-callee-saved-spills (i.e. SP-at-entry minus
166 // CalleeSavedStackSize) to the address of the frame record.
167 int CalleeSaveBaseToFrameRecordOffset = 0;
169 /// SignReturnAddress is true if PAC-RET is enabled for the function with
170 /// defaults being sign non-leaf functions only, with the B key.
171 bool SignReturnAddress = false;
173 /// SignReturnAddressAll modifies the default PAC-RET mode to signing leaf
174 /// functions as well.
175 bool SignReturnAddressAll = false;
177 /// SignWithBKey modifies the default PAC-RET mode to signing with the B key.
178 bool SignWithBKey = false;
180 /// HasELFSignedGOT is true if the target binary format is ELF and the IR
181 /// module containing the corresponding function has "ptrauth-elf-got" flag
182 /// set to 1.
183 bool HasELFSignedGOT = false;
185 /// SigningInstrOffset captures the offset of the PAC-RET signing instruction
186 /// within the prologue, so it can be re-used for authentication in the
187 /// epilogue when using PC as a second salt (FEAT_PAuth_LR)
188 MCSymbol *SignInstrLabel = nullptr;
190 /// BranchTargetEnforcement enables placing BTI instructions at potential
191 /// indirect branch destinations.
192 bool BranchTargetEnforcement = false;
194 /// Indicates that SP signing should be diversified with PC as-per PAuthLR.
195 /// This is set by -mbranch-protection and will emit NOP instructions unless
196 /// the subtarget feature +pauthlr is also used (in which case non-NOP
197 /// instructions are emitted).
198 bool BranchProtectionPAuthLR = false;
200 /// Whether this function has an extended frame record [Ctx, FP, LR]. If so,
201 /// bit 60 of the in-memory FP will be 1 to enable other tools to detect the
202 /// extended record.
203 bool HasSwiftAsyncContext = false;
205 /// The stack slot where the Swift asynchronous context is stored.
206 int SwiftAsyncContextFrameIdx = std::numeric_limits<int>::max();
208 bool IsMTETagged = false;
210 /// The function has Scalable Vector or Scalable Predicate register argument
211 /// or return type
212 bool IsSVECC = false;
214 /// The frame-index for the TPIDR2 object used for lazy saves.
215 TPIDR2Object TPIDR2;
217 /// Whether this function changes streaming mode within the function.
218 bool HasStreamingModeChanges = false;
220 /// True if the function need unwind information.
221 mutable std::optional<bool> NeedsDwarfUnwindInfo;
223 /// True if the function need asynchronous unwind information.
224 mutable std::optional<bool> NeedsAsyncDwarfUnwindInfo;
226 int64_t StackProbeSize = 0;
228 // Holds a register containing pstate.sm. This is set
229 // on function entry to record the initial pstate of a function.
230 Register PStateSMReg = MCRegister::NoRegister;
232 // Holds a pointer to a buffer that is large enough to represent
233 // all SME ZA state and any additional state required by the
234 // __arm_sme_save/restore support routines.
235 Register SMESaveBufferAddr = MCRegister::NoRegister;
237 // true if SMESaveBufferAddr is used.
238 bool SMESaveBufferUsed = false;
240 // Has the PNReg used to build PTRUE instruction.
241 // The PTRUE is used for the LD/ST of ZReg pairs in save and restore.
242 unsigned PredicateRegForFillSpill = 0;
244 // The stack slots where VG values are stored to.
245 int64_t VGIdx = std::numeric_limits<int>::max();
246 int64_t StreamingVGIdx = std::numeric_limits<int>::max();
248 public:
249 AArch64FunctionInfo(const Function &F, const AArch64Subtarget *STI);
251 MachineFunctionInfo *
252 clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
253 const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
254 const override;
256 void setPredicateRegForFillSpill(unsigned Reg) {
257 PredicateRegForFillSpill = Reg;
259 unsigned getPredicateRegForFillSpill() const {
260 return PredicateRegForFillSpill;
263 Register getSMESaveBufferAddr() const { return SMESaveBufferAddr; };
264 void setSMESaveBufferAddr(Register Reg) { SMESaveBufferAddr = Reg; };
266 unsigned isSMESaveBufferUsed() const { return SMESaveBufferUsed; };
267 void setSMESaveBufferUsed(bool Used = true) { SMESaveBufferUsed = Used; };
269 Register getPStateSMReg() const { return PStateSMReg; };
270 void setPStateSMReg(Register Reg) { PStateSMReg = Reg; };
272 int64_t getVGIdx() const { return VGIdx; };
273 void setVGIdx(unsigned Idx) { VGIdx = Idx; };
275 int64_t getStreamingVGIdx() const { return StreamingVGIdx; };
276 void setStreamingVGIdx(unsigned FrameIdx) { StreamingVGIdx = FrameIdx; };
278 bool isSVECC() const { return IsSVECC; };
279 void setIsSVECC(bool s) { IsSVECC = s; };
281 TPIDR2Object &getTPIDR2Obj() { return TPIDR2; }
283 void initializeBaseYamlFields(const yaml::AArch64FunctionInfo &YamlMFI);
285 unsigned getBytesInStackArgArea() const { return BytesInStackArgArea; }
286 void setBytesInStackArgArea(unsigned bytes) { BytesInStackArgArea = bytes; }
288 unsigned getArgumentStackToRestore() const { return ArgumentStackToRestore; }
289 void setArgumentStackToRestore(unsigned bytes) {
290 ArgumentStackToRestore = bytes;
293 unsigned getTailCallReservedStack() const { return TailCallReservedStack; }
294 void setTailCallReservedStack(unsigned bytes) {
295 TailCallReservedStack = bytes;
298 bool hasCalculatedStackSizeSVE() const { return HasCalculatedStackSizeSVE; }
300 void setStackSizeSVE(uint64_t S) {
301 HasCalculatedStackSizeSVE = true;
302 StackSizeSVE = S;
305 uint64_t getStackSizeSVE() const { return StackSizeSVE; }
307 bool hasStackFrame() const { return HasStackFrame; }
308 void setHasStackFrame(bool s) { HasStackFrame = s; }
310 bool isStackRealigned() const { return StackRealigned; }
311 void setStackRealigned(bool s) { StackRealigned = s; }
313 bool hasCalleeSaveStackFreeSpace() const {
314 return CalleeSaveStackHasFreeSpace;
316 void setCalleeSaveStackHasFreeSpace(bool s) {
317 CalleeSaveStackHasFreeSpace = s;
319 bool isSplitCSR() const { return IsSplitCSR; }
320 void setIsSplitCSR(bool s) { IsSplitCSR = s; }
322 void setLocalStackSize(uint64_t Size) { LocalStackSize = Size; }
323 uint64_t getLocalStackSize() const { return LocalStackSize; }
325 void setOutliningStyle(const std::string &Style) { OutliningStyle = Style; }
326 std::optional<std::string> getOutliningStyle() const {
327 return OutliningStyle;
330 void setCalleeSavedStackSize(unsigned Size) {
331 CalleeSavedStackSize = Size;
332 HasCalleeSavedStackSize = true;
335 // When CalleeSavedStackSize has not been set (for example when
336 // some MachineIR pass is run in isolation), then recalculate
337 // the CalleeSavedStackSize directly from the CalleeSavedInfo.
338 // Note: This information can only be recalculated after PEI
339 // has assigned offsets to the callee save objects.
340 unsigned getCalleeSavedStackSize(const MachineFrameInfo &MFI) const {
341 bool ValidateCalleeSavedStackSize = false;
343 #ifndef NDEBUG
344 // Make sure the calculated size derived from the CalleeSavedInfo
345 // equals the cached size that was calculated elsewhere (e.g. in
346 // determineCalleeSaves).
347 ValidateCalleeSavedStackSize = HasCalleeSavedStackSize;
348 #endif
350 if (!HasCalleeSavedStackSize || ValidateCalleeSavedStackSize) {
351 assert(MFI.isCalleeSavedInfoValid() && "CalleeSavedInfo not calculated");
352 if (MFI.getCalleeSavedInfo().empty())
353 return 0;
355 int64_t MinOffset = std::numeric_limits<int64_t>::max();
356 int64_t MaxOffset = std::numeric_limits<int64_t>::min();
357 for (const auto &Info : MFI.getCalleeSavedInfo()) {
358 int FrameIdx = Info.getFrameIdx();
359 if (MFI.getStackID(FrameIdx) != TargetStackID::Default)
360 continue;
361 int64_t Offset = MFI.getObjectOffset(FrameIdx);
362 int64_t ObjSize = MFI.getObjectSize(FrameIdx);
363 MinOffset = std::min<int64_t>(Offset, MinOffset);
364 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
367 if (SwiftAsyncContextFrameIdx != std::numeric_limits<int>::max()) {
368 int64_t Offset = MFI.getObjectOffset(getSwiftAsyncContextFrameIdx());
369 int64_t ObjSize = MFI.getObjectSize(getSwiftAsyncContextFrameIdx());
370 MinOffset = std::min<int64_t>(Offset, MinOffset);
371 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
374 if (StackHazardCSRSlotIndex != std::numeric_limits<int>::max()) {
375 int64_t Offset = MFI.getObjectOffset(StackHazardCSRSlotIndex);
376 int64_t ObjSize = MFI.getObjectSize(StackHazardCSRSlotIndex);
377 MinOffset = std::min<int64_t>(Offset, MinOffset);
378 MaxOffset = std::max<int64_t>(Offset + ObjSize, MaxOffset);
381 unsigned Size = alignTo(MaxOffset - MinOffset, 16);
382 assert((!HasCalleeSavedStackSize || getCalleeSavedStackSize() == Size) &&
383 "Invalid size calculated for callee saves");
384 return Size;
387 return getCalleeSavedStackSize();
390 unsigned getCalleeSavedStackSize() const {
391 assert(HasCalleeSavedStackSize &&
392 "CalleeSavedStackSize has not been calculated");
393 return CalleeSavedStackSize;
396 // Saves the CalleeSavedStackSize for SVE vectors in 'scalable bytes'
397 void setSVECalleeSavedStackSize(unsigned Size) {
398 SVECalleeSavedStackSize = Size;
400 unsigned getSVECalleeSavedStackSize() const {
401 return SVECalleeSavedStackSize;
404 void setMinMaxSVECSFrameIndex(int Min, int Max) {
405 MinSVECSFrameIndex = Min;
406 MaxSVECSFrameIndex = Max;
409 int getMinSVECSFrameIndex() const { return MinSVECSFrameIndex; }
410 int getMaxSVECSFrameIndex() const { return MaxSVECSFrameIndex; }
412 void incNumLocalDynamicTLSAccesses() { ++NumLocalDynamicTLSAccesses; }
413 unsigned getNumLocalDynamicTLSAccesses() const {
414 return NumLocalDynamicTLSAccesses;
417 std::optional<bool> hasRedZone() const { return HasRedZone; }
418 void setHasRedZone(bool s) { HasRedZone = s; }
420 int getVarArgsStackIndex() const { return VarArgsStackIndex; }
421 void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
423 unsigned getVarArgsStackOffset() const { return VarArgsStackOffset; }
424 void setVarArgsStackOffset(unsigned Offset) { VarArgsStackOffset = Offset; }
426 int getVarArgsGPRIndex() const { return VarArgsGPRIndex; }
427 void setVarArgsGPRIndex(int Index) { VarArgsGPRIndex = Index; }
429 unsigned getVarArgsGPRSize() const { return VarArgsGPRSize; }
430 void setVarArgsGPRSize(unsigned Size) { VarArgsGPRSize = Size; }
432 int getVarArgsFPRIndex() const { return VarArgsFPRIndex; }
433 void setVarArgsFPRIndex(int Index) { VarArgsFPRIndex = Index; }
435 unsigned getVarArgsFPRSize() const { return VarArgsFPRSize; }
436 void setVarArgsFPRSize(unsigned Size) { VarArgsFPRSize = Size; }
438 bool hasStackHazardSlotIndex() const {
439 return StackHazardSlotIndex != std::numeric_limits<int>::max();
441 int getStackHazardSlotIndex() const { return StackHazardSlotIndex; }
442 void setStackHazardSlotIndex(int Index) {
443 assert(StackHazardSlotIndex == std::numeric_limits<int>::max());
444 StackHazardSlotIndex = Index;
446 int getStackHazardCSRSlotIndex() const { return StackHazardCSRSlotIndex; }
447 void setStackHazardCSRSlotIndex(int Index) {
448 assert(StackHazardCSRSlotIndex == std::numeric_limits<int>::max());
449 StackHazardCSRSlotIndex = Index;
452 unsigned getSRetReturnReg() const { return SRetReturnReg; }
453 void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
455 unsigned getJumpTableEntrySize(int Idx) const {
456 return JumpTableEntryInfo[Idx].first;
458 MCSymbol *getJumpTableEntryPCRelSymbol(int Idx) const {
459 return JumpTableEntryInfo[Idx].second;
461 void setJumpTableEntryInfo(int Idx, unsigned Size, MCSymbol *PCRelSym) {
462 if ((unsigned)Idx >= JumpTableEntryInfo.size())
463 JumpTableEntryInfo.resize(Idx+1);
464 JumpTableEntryInfo[Idx] = std::make_pair(Size, PCRelSym);
467 using SetOfInstructions = SmallPtrSet<const MachineInstr *, 16>;
469 const SetOfInstructions &getLOHRelated() const { return LOHRelated; }
471 // Shortcuts for LOH related types.
472 class MILOHDirective {
473 MCLOHType Kind;
475 /// Arguments of this directive. Order matters.
476 SmallVector<const MachineInstr *, 3> Args;
478 public:
479 using LOHArgs = ArrayRef<const MachineInstr *>;
481 MILOHDirective(MCLOHType Kind, LOHArgs Args)
482 : Kind(Kind), Args(Args.begin(), Args.end()) {
483 assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
486 MCLOHType getKind() const { return Kind; }
487 LOHArgs getArgs() const { return Args; }
490 using MILOHArgs = MILOHDirective::LOHArgs;
491 using MILOHContainer = SmallVector<MILOHDirective, 32>;
493 const MILOHContainer &getLOHContainer() const { return LOHContainerSet; }
495 /// Add a LOH directive of this @p Kind and this @p Args.
496 void addLOHDirective(MCLOHType Kind, MILOHArgs Args) {
497 LOHContainerSet.push_back(MILOHDirective(Kind, Args));
498 LOHRelated.insert(Args.begin(), Args.end());
501 SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
502 return ForwardedMustTailRegParms;
505 std::optional<int> getTaggedBasePointerIndex() const {
506 return TaggedBasePointerIndex;
508 void setTaggedBasePointerIndex(int Index) { TaggedBasePointerIndex = Index; }
510 unsigned getTaggedBasePointerOffset() const {
511 return TaggedBasePointerOffset;
513 void setTaggedBasePointerOffset(unsigned Offset) {
514 TaggedBasePointerOffset = Offset;
517 int getCalleeSaveBaseToFrameRecordOffset() const {
518 return CalleeSaveBaseToFrameRecordOffset;
520 void setCalleeSaveBaseToFrameRecordOffset(int Offset) {
521 CalleeSaveBaseToFrameRecordOffset = Offset;
524 bool shouldSignReturnAddress(const MachineFunction &MF) const;
525 bool shouldSignReturnAddress(bool SpillsLR) const;
527 bool needsShadowCallStackPrologueEpilogue(MachineFunction &MF) const;
529 bool shouldSignWithBKey() const { return SignWithBKey; }
531 bool hasELFSignedGOT() const { return HasELFSignedGOT; }
533 MCSymbol *getSigningInstrLabel() const { return SignInstrLabel; }
534 void setSigningInstrLabel(MCSymbol *Label) { SignInstrLabel = Label; }
536 bool isMTETagged() const { return IsMTETagged; }
538 bool branchTargetEnforcement() const { return BranchTargetEnforcement; }
540 bool branchProtectionPAuthLR() const { return BranchProtectionPAuthLR; }
542 void setHasSwiftAsyncContext(bool HasContext) {
543 HasSwiftAsyncContext = HasContext;
545 bool hasSwiftAsyncContext() const { return HasSwiftAsyncContext; }
547 void setSwiftAsyncContextFrameIdx(int FI) {
548 SwiftAsyncContextFrameIdx = FI;
550 int getSwiftAsyncContextFrameIdx() const { return SwiftAsyncContextFrameIdx; }
552 bool needsDwarfUnwindInfo(const MachineFunction &MF) const;
553 bool needsAsyncDwarfUnwindInfo(const MachineFunction &MF) const;
555 bool hasStreamingModeChanges() const { return HasStreamingModeChanges; }
556 void setHasStreamingModeChanges(bool HasChanges) {
557 HasStreamingModeChanges = HasChanges;
560 bool hasStackProbing() const { return StackProbeSize != 0; }
562 int64_t getStackProbeSize() const { return StackProbeSize; }
564 private:
565 // Hold the lists of LOHs.
566 MILOHContainer LOHContainerSet;
567 SetOfInstructions LOHRelated;
569 SmallVector<std::pair<unsigned, MCSymbol *>, 2> JumpTableEntryInfo;
572 namespace yaml {
573 struct AArch64FunctionInfo final : public yaml::MachineFunctionInfo {
574 std::optional<bool> HasRedZone;
576 AArch64FunctionInfo() = default;
577 AArch64FunctionInfo(const llvm::AArch64FunctionInfo &MFI);
579 void mappingImpl(yaml::IO &YamlIO) override;
580 ~AArch64FunctionInfo() = default;
583 template <> struct MappingTraits<AArch64FunctionInfo> {
584 static void mapping(IO &YamlIO, AArch64FunctionInfo &MFI) {
585 YamlIO.mapOptional("hasRedZone", MFI.HasRedZone);
589 } // end namespace yaml
591 } // end namespace llvm
593 #endif // LLVM_LIB_TARGET_AARCH64_AARCH64MACHINEFUNCTIONINFO_H