1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetRegisterInfo interface.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/BinaryFormat/Dwarf.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/CodeGenTypes/MachineValueType.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Printable.h"
38 #include "llvm/Support/raw_ostream.h"
42 #define DEBUG_TYPE "target-reg-info"
46 static cl::opt
<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden
,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
52 TargetRegisterInfo::TargetRegisterInfo(
53 const TargetRegisterInfoDesc
*ID
, regclass_iterator RCB
,
54 regclass_iterator RCE
, const char *const *SRINames
,
55 const SubRegCoveredBits
*SubIdxRanges
, const LaneBitmask
*SRILaneMasks
,
56 LaneBitmask SRICoveringLanes
, const RegClassInfo
*const RCIs
,
57 const MVT::SimpleValueType
*const RCVTLists
, unsigned Mode
)
58 : InfoDesc(ID
), SubRegIndexNames(SRINames
), SubRegIdxRanges(SubIdxRanges
),
59 SubRegIndexLaneMasks(SRILaneMasks
), RegClassBegin(RCB
), RegClassEnd(RCE
),
60 CoveringLanes(SRICoveringLanes
), RCInfos(RCIs
), RCVTLists(RCVTLists
),
63 TargetRegisterInfo::~TargetRegisterInfo() = default;
65 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
66 const MachineFunction
&MF
, const LiveInterval
&VirtReg
) const {
67 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
68 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
69 MachineInstr
*MI
= MRI
.getUniqueVRegDef(VirtReg
.reg());
70 if (MI
&& TII
->isTriviallyReMaterializable(*MI
) &&
71 VirtReg
.size() > HugeSizeForSplit
)
76 void TargetRegisterInfo::markSuperRegs(BitVector
&RegisterSet
,
77 MCRegister Reg
) const {
78 for (MCPhysReg SR
: superregs_inclusive(Reg
))
82 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector
&RegisterSet
,
83 ArrayRef
<MCPhysReg
> Exceptions
) const {
84 // Check that all super registers of reserved regs are reserved as well.
85 BitVector
Checked(getNumRegs());
86 for (unsigned Reg
: RegisterSet
.set_bits()) {
89 for (MCPhysReg SR
: superregs(Reg
)) {
90 if (!RegisterSet
[SR
] && !is_contained(Exceptions
, Reg
)) {
91 dbgs() << "Error: Super register " << printReg(SR
, this)
92 << " of reserved register " << printReg(Reg
, this)
93 << " is not reserved.\n";
97 // We transitively check superregs. So we can remember this for later
98 // to avoid compiletime explosion in deep register hierarchies.
107 Printable
printReg(Register Reg
, const TargetRegisterInfo
*TRI
,
108 unsigned SubIdx
, const MachineRegisterInfo
*MRI
) {
109 return Printable([Reg
, TRI
, SubIdx
, MRI
](raw_ostream
&OS
) {
112 else if (Register::isStackSlot(Reg
))
113 OS
<< "SS#" << Register::stackSlot2Index(Reg
);
114 else if (Reg
.isVirtual()) {
115 StringRef Name
= MRI
? MRI
->getVRegName(Reg
) : "";
119 OS
<< '%' << Register::virtReg2Index(Reg
);
122 OS
<< '$' << "physreg" << Reg
.id();
123 else if (Reg
< TRI
->getNumRegs()) {
125 printLowerCase(TRI
->getName(Reg
), OS
);
127 llvm_unreachable("Register kind is unsupported.");
131 OS
<< ':' << TRI
->getSubRegIndexName(SubIdx
);
133 OS
<< ":sub(" << SubIdx
<< ')';
138 Printable
printRegUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
139 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
140 // Generic printout when TRI is missing.
142 OS
<< "Unit~" << Unit
;
146 // Check for invalid register units.
147 if (Unit
>= TRI
->getNumRegUnits()) {
148 OS
<< "BadUnit~" << Unit
;
152 // Normal units have at least one root.
153 MCRegUnitRootIterator
Roots(Unit
, TRI
);
154 assert(Roots
.isValid() && "Unit has no roots.");
155 OS
<< TRI
->getName(*Roots
);
156 for (++Roots
; Roots
.isValid(); ++Roots
)
157 OS
<< '~' << TRI
->getName(*Roots
);
161 Printable
printVRegOrUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
162 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
163 if (Register::isVirtualRegister(Unit
)) {
164 OS
<< '%' << Register::virtReg2Index(Unit
);
166 OS
<< printRegUnit(Unit
, TRI
);
171 Printable
printRegClassOrBank(Register Reg
, const MachineRegisterInfo
&RegInfo
,
172 const TargetRegisterInfo
*TRI
) {
173 return Printable([Reg
, &RegInfo
, TRI
](raw_ostream
&OS
) {
174 if (RegInfo
.getRegClassOrNull(Reg
))
175 OS
<< StringRef(TRI
->getRegClassName(RegInfo
.getRegClass(Reg
))).lower();
176 else if (RegInfo
.getRegBankOrNull(Reg
))
177 OS
<< StringRef(RegInfo
.getRegBankOrNull(Reg
)->getName()).lower();
180 assert((RegInfo
.def_empty(Reg
) || RegInfo
.getType(Reg
).isValid()) &&
181 "Generic registers must have a valid type");
186 } // end namespace llvm
188 /// getAllocatableClass - Return the maximal subclass of the given register
189 /// class that is alloctable, or NULL.
190 const TargetRegisterClass
*
191 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass
*RC
) const {
192 if (!RC
|| RC
->isAllocatable())
195 for (BitMaskClassIterator
It(RC
->getSubClassMask(), *this); It
.isValid();
197 const TargetRegisterClass
*SubRC
= getRegClass(It
.getID());
198 if (SubRC
->isAllocatable())
204 /// getMinimalPhysRegClass - Returns the Register Class of a physical
205 /// register of the given type, picking the most sub register class of
206 /// the right type that contains this physreg.
207 const TargetRegisterClass
*
208 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg
, MVT VT
) const {
209 assert(Register::isPhysicalRegister(reg
) &&
210 "reg must be a physical register");
212 // Pick the most sub register class of the right type that contains
214 const TargetRegisterClass
* BestRC
= nullptr;
215 for (const TargetRegisterClass
* RC
: regclasses()) {
216 if ((VT
== MVT::Other
|| isTypeLegalForClass(*RC
, VT
)) &&
217 RC
->contains(reg
) && (!BestRC
|| BestRC
->hasSubClass(RC
)))
221 assert(BestRC
&& "Couldn't find the register class");
225 const TargetRegisterClass
*
226 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg
, LLT Ty
) const {
227 assert(Register::isPhysicalRegister(reg
) &&
228 "reg must be a physical register");
230 // Pick the most sub register class of the right type that contains
232 const TargetRegisterClass
*BestRC
= nullptr;
233 for (const TargetRegisterClass
*RC
: regclasses()) {
234 if ((!Ty
.isValid() || isTypeLegalForClass(*RC
, Ty
)) && RC
->contains(reg
) &&
235 (!BestRC
|| BestRC
->hasSubClass(RC
)))
242 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
243 /// registers for the specific register class.
244 static void getAllocatableSetForRC(const MachineFunction
&MF
,
245 const TargetRegisterClass
*RC
, BitVector
&R
){
246 assert(RC
->isAllocatable() && "invalid for nonallocatable sets");
247 ArrayRef
<MCPhysReg
> Order
= RC
->getRawAllocationOrder(MF
);
248 for (MCPhysReg PR
: Order
)
252 BitVector
TargetRegisterInfo::getAllocatableSet(const MachineFunction
&MF
,
253 const TargetRegisterClass
*RC
) const {
254 BitVector
Allocatable(getNumRegs());
256 // A register class with no allocatable subclass returns an empty set.
257 const TargetRegisterClass
*SubClass
= getAllocatableClass(RC
);
259 getAllocatableSetForRC(MF
, SubClass
, Allocatable
);
261 for (const TargetRegisterClass
*C
: regclasses())
262 if (C
->isAllocatable())
263 getAllocatableSetForRC(MF
, C
, Allocatable
);
266 // Mask out the reserved registers
267 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
268 const BitVector
&Reserved
= MRI
.getReservedRegs();
269 Allocatable
.reset(Reserved
);
275 const TargetRegisterClass
*firstCommonClass(const uint32_t *A
,
277 const TargetRegisterInfo
*TRI
) {
278 for (unsigned I
= 0, E
= TRI
->getNumRegClasses(); I
< E
; I
+= 32)
279 if (unsigned Common
= *A
++ & *B
++)
280 return TRI
->getRegClass(I
+ llvm::countr_zero(Common
));
284 const TargetRegisterClass
*
285 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass
*A
,
286 const TargetRegisterClass
*B
) const {
287 // First take care of the trivial cases.
293 // Register classes are ordered topologically, so the largest common
294 // sub-class it the common sub-class with the smallest ID.
295 return firstCommonClass(A
->getSubClassMask(), B
->getSubClassMask(), this);
298 const TargetRegisterClass
*
299 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass
*A
,
300 const TargetRegisterClass
*B
,
301 unsigned Idx
) const {
302 assert(A
&& B
&& "Missing register class");
303 assert(Idx
&& "Bad sub-register index");
305 // Find Idx in the list of super-register indices.
306 for (SuperRegClassIterator
RCI(B
, this); RCI
.isValid(); ++RCI
)
307 if (RCI
.getSubReg() == Idx
)
308 // The bit mask contains all register classes that are projected into B
309 // by Idx. Find a class that is also a sub-class of A.
310 return firstCommonClass(RCI
.getMask(), A
->getSubClassMask(), this);
314 const TargetRegisterClass
*TargetRegisterInfo::
315 getCommonSuperRegClass(const TargetRegisterClass
*RCA
, unsigned SubA
,
316 const TargetRegisterClass
*RCB
, unsigned SubB
,
317 unsigned &PreA
, unsigned &PreB
) const {
318 assert(RCA
&& SubA
&& RCB
&& SubB
&& "Invalid arguments");
320 // Search all pairs of sub-register indices that project into RCA and RCB
321 // respectively. This is quadratic, but usually the sets are very small. On
322 // most targets like X86, there will only be a single sub-register index
323 // (e.g., sub_16bit projecting into GR16).
325 // The worst case is a register class like DPR on ARM.
326 // We have indices dsub_0..dsub_7 projecting into that class.
328 // It is very common that one register class is a sub-register of the other.
329 // Arrange for RCA to be the larger register so the answer will be found in
330 // the first iteration. This makes the search linear for the most common
332 const TargetRegisterClass
*BestRC
= nullptr;
333 unsigned *BestPreA
= &PreA
;
334 unsigned *BestPreB
= &PreB
;
335 if (getRegSizeInBits(*RCA
) < getRegSizeInBits(*RCB
)) {
337 std::swap(SubA
, SubB
);
338 std::swap(BestPreA
, BestPreB
);
341 // Also terminate the search one we have found a register class as small as
343 unsigned MinSize
= getRegSizeInBits(*RCA
);
345 for (SuperRegClassIterator
IA(RCA
, this, true); IA
.isValid(); ++IA
) {
346 unsigned FinalA
= composeSubRegIndices(IA
.getSubReg(), SubA
);
347 for (SuperRegClassIterator
IB(RCB
, this, true); IB
.isValid(); ++IB
) {
348 // Check if a common super-register class exists for this index pair.
349 const TargetRegisterClass
*RC
=
350 firstCommonClass(IA
.getMask(), IB
.getMask(), this);
351 if (!RC
|| getRegSizeInBits(*RC
) < MinSize
)
354 // The indexes must compose identically: PreA+SubA == PreB+SubB.
355 unsigned FinalB
= composeSubRegIndices(IB
.getSubReg(), SubB
);
356 if (FinalA
!= FinalB
)
359 // Is RC a better candidate than BestRC?
360 if (BestRC
&& getRegSizeInBits(*RC
) >= getRegSizeInBits(*BestRC
))
363 // Yes, RC is the smallest super-register seen so far.
365 *BestPreA
= IA
.getSubReg();
366 *BestPreB
= IB
.getSubReg();
368 // Bail early if we reached MinSize. We won't find a better candidate.
369 if (getRegSizeInBits(*BestRC
) == MinSize
)
376 /// Check if the registers defined by the pair (RegisterClass, SubReg)
377 /// share the same register file.
378 static bool shareSameRegisterFile(const TargetRegisterInfo
&TRI
,
379 const TargetRegisterClass
*DefRC
,
381 const TargetRegisterClass
*SrcRC
,
382 unsigned SrcSubReg
) {
383 // Same register class.
387 // Both operands are sub registers. Check if they share a register class.
388 unsigned SrcIdx
, DefIdx
;
389 if (SrcSubReg
&& DefSubReg
) {
390 return TRI
.getCommonSuperRegClass(SrcRC
, SrcSubReg
, DefRC
, DefSubReg
,
391 SrcIdx
, DefIdx
) != nullptr;
394 // At most one of the register is a sub register, make it Src to avoid
395 // duplicating the test.
397 std::swap(DefSubReg
, SrcSubReg
);
398 std::swap(DefRC
, SrcRC
);
401 // One of the register is a sub register, check if we can get a superclass.
403 return TRI
.getMatchingSuperRegClass(SrcRC
, DefRC
, SrcSubReg
) != nullptr;
406 return TRI
.getCommonSubClass(DefRC
, SrcRC
) != nullptr;
409 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass
*DefRC
,
411 const TargetRegisterClass
*SrcRC
,
412 unsigned SrcSubReg
) const {
413 // If this source does not incur a cross register bank copy, use it.
414 return shareSameRegisterFile(*this, DefRC
, DefSubReg
, SrcRC
, SrcSubReg
);
417 // Compute target-independent register allocator hints to help eliminate copies.
418 bool TargetRegisterInfo::getRegAllocationHints(
419 Register VirtReg
, ArrayRef
<MCPhysReg
> Order
,
420 SmallVectorImpl
<MCPhysReg
> &Hints
, const MachineFunction
&MF
,
421 const VirtRegMap
*VRM
, const LiveRegMatrix
*Matrix
) const {
422 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
423 const std::pair
<unsigned, SmallVector
<Register
, 4>> *Hints_MRI
=
424 MRI
.getRegAllocationHints(VirtReg
);
429 SmallSet
<Register
, 32> HintedRegs
;
430 // First hint may be a target hint.
431 bool Skip
= (Hints_MRI
->first
!= 0);
432 for (auto Reg
: Hints_MRI
->second
) {
438 // Target-independent hints are either a physical or a virtual register.
440 if (VRM
&& Phys
.isVirtual())
441 Phys
= VRM
->getPhys(Phys
);
443 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
444 // registers allocated to the same physreg).
445 if (!HintedRegs
.insert(Phys
).second
)
447 // Check that Phys is a valid hint in VirtReg's register class.
448 if (!Phys
.isPhysical())
450 if (MRI
.isReserved(Phys
))
452 // Check that Phys is in the allocation order. We shouldn't heed hints
453 // from VirtReg's register class if they aren't in the allocation order. The
454 // target probably has a reason for removing the register.
455 if (!is_contained(Order
, Phys
))
458 // All clear, tell the register allocator to prefer this register.
459 Hints
.push_back(Phys
);
464 bool TargetRegisterInfo::isCalleeSavedPhysReg(
465 MCRegister PhysReg
, const MachineFunction
&MF
) const {
468 const uint32_t *callerPreservedRegs
=
469 getCallPreservedMask(MF
, MF
.getFunction().getCallingConv());
470 if (callerPreservedRegs
) {
471 assert(Register::isPhysicalRegister(PhysReg
) &&
472 "Expected physical register");
473 return (callerPreservedRegs
[PhysReg
/ 32] >> PhysReg
% 32) & 1;
478 bool TargetRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
479 return MF
.getFrameInfo().isStackRealignable();
482 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction
&MF
) const {
483 return MF
.getFrameInfo().shouldRealignStack();
486 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0
,
487 const uint32_t *mask1
) const {
488 unsigned N
= (getNumRegs()+31) / 32;
489 for (unsigned I
= 0; I
< N
; ++I
)
490 if ((mask0
[I
] & mask1
[I
]) != mask0
[I
])
496 TargetRegisterInfo::getRegSizeInBits(Register Reg
,
497 const MachineRegisterInfo
&MRI
) const {
498 const TargetRegisterClass
*RC
{};
499 if (Reg
.isPhysical()) {
500 // The size is not directly available for physical registers.
501 // Instead, we need to access a register class that contains Reg and
502 // get the size of that register class.
503 RC
= getMinimalPhysRegClass(Reg
);
504 assert(RC
&& "Unable to deduce the register class");
505 return getRegSizeInBits(*RC
);
507 LLT Ty
= MRI
.getType(Reg
);
509 return Ty
.getSizeInBits();
511 // Since Reg is not a generic register, it may have a register class.
512 RC
= MRI
.getRegClass(Reg
);
513 assert(RC
&& "Unable to deduce the register class");
514 return getRegSizeInBits(*RC
);
517 bool TargetRegisterInfo::getCoveringSubRegIndexes(
518 const MachineRegisterInfo
&MRI
, const TargetRegisterClass
*RC
,
519 LaneBitmask LaneMask
, SmallVectorImpl
<unsigned> &NeededIndexes
) const {
520 SmallVector
<unsigned, 8> PossibleIndexes
;
521 unsigned BestIdx
= 0;
522 unsigned BestCover
= 0;
524 for (unsigned Idx
= 1, E
= getNumSubRegIndices(); Idx
< E
; ++Idx
) {
525 // Is this index even compatible with the given class?
526 if (getSubClassWithSubReg(RC
, Idx
) != RC
)
528 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
529 // Early exit if we found a perfect match.
530 if (SubRegMask
== LaneMask
) {
535 // The index must not cover any lanes outside \p LaneMask.
536 if ((SubRegMask
& ~LaneMask
).any())
539 unsigned PopCount
= SubRegMask
.getNumLanes();
540 PossibleIndexes
.push_back(Idx
);
541 if (PopCount
> BestCover
) {
542 BestCover
= PopCount
;
547 // Abort if we cannot possibly implement the COPY with the given indexes.
551 NeededIndexes
.push_back(BestIdx
);
553 // Greedy heuristic: Keep iterating keeping the best covering subreg index
555 LaneBitmask LanesLeft
= LaneMask
& ~getSubRegIndexLaneMask(BestIdx
);
556 while (LanesLeft
.any()) {
557 unsigned BestIdx
= 0;
558 int BestCover
= std::numeric_limits
<int>::min();
559 for (unsigned Idx
: PossibleIndexes
) {
560 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
561 // Early exit if we found a perfect match.
562 if (SubRegMask
== LanesLeft
) {
567 // Do not cover already-covered lanes to avoid creating cycles
568 // in copy bundles (= bundle contains copies that write to the
570 if ((SubRegMask
& ~LanesLeft
).any())
573 // Try to cover as many of the remaining lanes as possible.
574 const int Cover
= (SubRegMask
& LanesLeft
).getNumLanes();
575 if (Cover
> BestCover
) {
582 return false; // Impossible to handle
584 NeededIndexes
.push_back(BestIdx
);
586 LanesLeft
&= ~getSubRegIndexLaneMask(BestIdx
);
592 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx
) const {
593 assert(Idx
&& Idx
< getNumSubRegIndices() &&
594 "This is not a subregister index");
595 return SubRegIdxRanges
[HwMode
* getNumSubRegIndices() + Idx
].Size
;
598 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx
) const {
599 assert(Idx
&& Idx
< getNumSubRegIndices() &&
600 "This is not a subregister index");
601 return SubRegIdxRanges
[HwMode
* getNumSubRegIndices() + Idx
].Offset
;
605 TargetRegisterInfo::lookThruCopyLike(Register SrcReg
,
606 const MachineRegisterInfo
*MRI
) const {
608 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
609 if (!MI
->isCopyLike())
614 CopySrcReg
= MI
->getOperand(1).getReg();
616 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
617 CopySrcReg
= MI
->getOperand(2).getReg();
620 if (!CopySrcReg
.isVirtual())
627 Register
TargetRegisterInfo::lookThruSingleUseCopyChain(
628 Register SrcReg
, const MachineRegisterInfo
*MRI
) const {
630 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
631 // Found the real definition, return it if it has a single use.
632 if (!MI
->isCopyLike())
633 return MRI
->hasOneNonDBGUse(SrcReg
) ? SrcReg
: Register();
637 CopySrcReg
= MI
->getOperand(1).getReg();
639 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
640 CopySrcReg
= MI
->getOperand(2).getReg();
643 // Continue only if the next definition in the chain is for a virtual
644 // register that has a single use.
645 if (!CopySrcReg
.isVirtual() || !MRI
->hasOneNonDBGUse(CopySrcReg
))
652 void TargetRegisterInfo::getOffsetOpcodes(
653 const StackOffset
&Offset
, SmallVectorImpl
<uint64_t> &Ops
) const {
654 assert(!Offset
.getScalable() && "Scalable offsets are not handled");
655 DIExpression::appendOffset(Ops
, Offset
.getFixed());
659 TargetRegisterInfo::prependOffsetExpression(const DIExpression
*Expr
,
660 unsigned PrependFlags
,
661 const StackOffset
&Offset
) const {
662 assert((PrependFlags
&
663 ~(DIExpression::DerefBefore
| DIExpression::DerefAfter
|
664 DIExpression::StackValue
| DIExpression::EntryValue
)) == 0 &&
665 "Unsupported prepend flag");
666 SmallVector
<uint64_t, 16> OffsetExpr
;
667 if (PrependFlags
& DIExpression::DerefBefore
)
668 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
669 getOffsetOpcodes(Offset
, OffsetExpr
);
670 if (PrependFlags
& DIExpression::DerefAfter
)
671 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
672 return DIExpression::prependOpcodes(Expr
, OffsetExpr
,
673 PrependFlags
& DIExpression::StackValue
,
674 PrependFlags
& DIExpression::EntryValue
);
677 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
679 void TargetRegisterInfo::dumpReg(Register Reg
, unsigned SubRegIndex
,
680 const TargetRegisterInfo
*TRI
) {
681 dbgs() << printReg(Reg
, TRI
, SubRegIndex
) << "\n";