1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetRegisterInfo interface.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/BinaryFormat/Dwarf.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/CodeGenTypes/MachineValueType.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/Printable.h"
39 #include "llvm/Support/raw_ostream.h"
43 #define DEBUG_TYPE "target-reg-info"
47 static cl::opt
<unsigned>
48 HugeSizeForSplit("huge-size-for-split", cl::Hidden
,
49 cl::desc("A threshold of live range size which may cause "
50 "high compile time cost in global splitting."),
53 TargetRegisterInfo::TargetRegisterInfo(
54 const TargetRegisterInfoDesc
*ID
, regclass_iterator RCB
,
55 regclass_iterator RCE
, const char *const *SRINames
,
56 const SubRegCoveredBits
*SubIdxRanges
, const LaneBitmask
*SRILaneMasks
,
57 LaneBitmask SRICoveringLanes
, const RegClassInfo
*const RCIs
,
58 const MVT::SimpleValueType
*const RCVTLists
, unsigned Mode
)
59 : InfoDesc(ID
), SubRegIndexNames(SRINames
), SubRegIdxRanges(SubIdxRanges
),
60 SubRegIndexLaneMasks(SRILaneMasks
), RegClassBegin(RCB
), RegClassEnd(RCE
),
61 CoveringLanes(SRICoveringLanes
), RCInfos(RCIs
), RCVTLists(RCVTLists
),
64 TargetRegisterInfo::~TargetRegisterInfo() = default;
66 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
67 const MachineFunction
&MF
, const LiveInterval
&VirtReg
) const {
68 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
69 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
70 MachineInstr
*MI
= MRI
.getUniqueVRegDef(VirtReg
.reg());
71 if (MI
&& TII
->isTriviallyReMaterializable(*MI
) &&
72 VirtReg
.size() > HugeSizeForSplit
)
77 void TargetRegisterInfo::markSuperRegs(BitVector
&RegisterSet
,
78 MCRegister Reg
) const {
79 for (MCPhysReg SR
: superregs_inclusive(Reg
))
83 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector
&RegisterSet
,
84 ArrayRef
<MCPhysReg
> Exceptions
) const {
85 // Check that all super registers of reserved regs are reserved as well.
86 BitVector
Checked(getNumRegs());
87 for (unsigned Reg
: RegisterSet
.set_bits()) {
90 for (MCPhysReg SR
: superregs(Reg
)) {
91 if (!RegisterSet
[SR
] && !is_contained(Exceptions
, Reg
)) {
92 dbgs() << "Error: Super register " << printReg(SR
, this)
93 << " of reserved register " << printReg(Reg
, this)
94 << " is not reserved.\n";
98 // We transitively check superregs. So we can remember this for later
99 // to avoid compiletime explosion in deep register hierarchies.
108 Printable
printReg(Register Reg
, const TargetRegisterInfo
*TRI
,
109 unsigned SubIdx
, const MachineRegisterInfo
*MRI
) {
110 return Printable([Reg
, TRI
, SubIdx
, MRI
](raw_ostream
&OS
) {
113 else if (Register::isStackSlot(Reg
))
114 OS
<< "SS#" << Register::stackSlot2Index(Reg
);
115 else if (Reg
.isVirtual()) {
116 StringRef Name
= MRI
? MRI
->getVRegName(Reg
) : "";
120 OS
<< '%' << Register::virtReg2Index(Reg
);
123 OS
<< '$' << "physreg" << Reg
;
124 else if (Reg
< TRI
->getNumRegs()) {
126 printLowerCase(TRI
->getName(Reg
), OS
);
128 llvm_unreachable("Register kind is unsupported.");
132 OS
<< ':' << TRI
->getSubRegIndexName(SubIdx
);
134 OS
<< ":sub(" << SubIdx
<< ')';
139 Printable
printRegUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
140 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
141 // Generic printout when TRI is missing.
143 OS
<< "Unit~" << Unit
;
147 // Check for invalid register units.
148 if (Unit
>= TRI
->getNumRegUnits()) {
149 OS
<< "BadUnit~" << Unit
;
153 // Normal units have at least one root.
154 MCRegUnitRootIterator
Roots(Unit
, TRI
);
155 assert(Roots
.isValid() && "Unit has no roots.");
156 OS
<< TRI
->getName(*Roots
);
157 for (++Roots
; Roots
.isValid(); ++Roots
)
158 OS
<< '~' << TRI
->getName(*Roots
);
162 Printable
printVRegOrUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
163 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
164 if (Register::isVirtualRegister(Unit
)) {
165 OS
<< '%' << Register::virtReg2Index(Unit
);
167 OS
<< printRegUnit(Unit
, TRI
);
172 Printable
printRegClassOrBank(Register Reg
, const MachineRegisterInfo
&RegInfo
,
173 const TargetRegisterInfo
*TRI
) {
174 return Printable([Reg
, &RegInfo
, TRI
](raw_ostream
&OS
) {
175 if (RegInfo
.getRegClassOrNull(Reg
))
176 OS
<< StringRef(TRI
->getRegClassName(RegInfo
.getRegClass(Reg
))).lower();
177 else if (RegInfo
.getRegBankOrNull(Reg
))
178 OS
<< StringRef(RegInfo
.getRegBankOrNull(Reg
)->getName()).lower();
181 assert((RegInfo
.def_empty(Reg
) || RegInfo
.getType(Reg
).isValid()) &&
182 "Generic registers must have a valid type");
187 } // end namespace llvm
189 /// getAllocatableClass - Return the maximal subclass of the given register
190 /// class that is alloctable, or NULL.
191 const TargetRegisterClass
*
192 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass
*RC
) const {
193 if (!RC
|| RC
->isAllocatable())
196 for (BitMaskClassIterator
It(RC
->getSubClassMask(), *this); It
.isValid();
198 const TargetRegisterClass
*SubRC
= getRegClass(It
.getID());
199 if (SubRC
->isAllocatable())
205 /// getMinimalPhysRegClass - Returns the Register Class of a physical
206 /// register of the given type, picking the most sub register class of
207 /// the right type that contains this physreg.
208 const TargetRegisterClass
*
209 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg
, MVT VT
) const {
210 assert(Register::isPhysicalRegister(reg
) &&
211 "reg must be a physical register");
213 // Pick the most sub register class of the right type that contains
215 const TargetRegisterClass
* BestRC
= nullptr;
216 for (const TargetRegisterClass
* RC
: regclasses()) {
217 if ((VT
== MVT::Other
|| isTypeLegalForClass(*RC
, VT
)) &&
218 RC
->contains(reg
) && (!BestRC
|| BestRC
->hasSubClass(RC
)))
222 assert(BestRC
&& "Couldn't find the register class");
226 const TargetRegisterClass
*
227 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg
, LLT Ty
) const {
228 assert(Register::isPhysicalRegister(reg
) &&
229 "reg must be a physical register");
231 // Pick the most sub register class of the right type that contains
233 const TargetRegisterClass
*BestRC
= nullptr;
234 for (const TargetRegisterClass
*RC
: regclasses()) {
235 if ((!Ty
.isValid() || isTypeLegalForClass(*RC
, Ty
)) && RC
->contains(reg
) &&
236 (!BestRC
|| BestRC
->hasSubClass(RC
)))
243 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
244 /// registers for the specific register class.
245 static void getAllocatableSetForRC(const MachineFunction
&MF
,
246 const TargetRegisterClass
*RC
, BitVector
&R
){
247 assert(RC
->isAllocatable() && "invalid for nonallocatable sets");
248 ArrayRef
<MCPhysReg
> Order
= RC
->getRawAllocationOrder(MF
);
249 for (MCPhysReg PR
: Order
)
253 BitVector
TargetRegisterInfo::getAllocatableSet(const MachineFunction
&MF
,
254 const TargetRegisterClass
*RC
) const {
255 BitVector
Allocatable(getNumRegs());
257 // A register class with no allocatable subclass returns an empty set.
258 const TargetRegisterClass
*SubClass
= getAllocatableClass(RC
);
260 getAllocatableSetForRC(MF
, SubClass
, Allocatable
);
262 for (const TargetRegisterClass
*C
: regclasses())
263 if (C
->isAllocatable())
264 getAllocatableSetForRC(MF
, C
, Allocatable
);
267 // Mask out the reserved registers
268 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
269 const BitVector
&Reserved
= MRI
.getReservedRegs();
270 Allocatable
.reset(Reserved
);
276 const TargetRegisterClass
*firstCommonClass(const uint32_t *A
,
278 const TargetRegisterInfo
*TRI
) {
279 for (unsigned I
= 0, E
= TRI
->getNumRegClasses(); I
< E
; I
+= 32)
280 if (unsigned Common
= *A
++ & *B
++)
281 return TRI
->getRegClass(I
+ llvm::countr_zero(Common
));
285 const TargetRegisterClass
*
286 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass
*A
,
287 const TargetRegisterClass
*B
) const {
288 // First take care of the trivial cases.
294 // Register classes are ordered topologically, so the largest common
295 // sub-class it the common sub-class with the smallest ID.
296 return firstCommonClass(A
->getSubClassMask(), B
->getSubClassMask(), this);
299 const TargetRegisterClass
*
300 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass
*A
,
301 const TargetRegisterClass
*B
,
302 unsigned Idx
) const {
303 assert(A
&& B
&& "Missing register class");
304 assert(Idx
&& "Bad sub-register index");
306 // Find Idx in the list of super-register indices.
307 for (SuperRegClassIterator
RCI(B
, this); RCI
.isValid(); ++RCI
)
308 if (RCI
.getSubReg() == Idx
)
309 // The bit mask contains all register classes that are projected into B
310 // by Idx. Find a class that is also a sub-class of A.
311 return firstCommonClass(RCI
.getMask(), A
->getSubClassMask(), this);
315 const TargetRegisterClass
*TargetRegisterInfo::
316 getCommonSuperRegClass(const TargetRegisterClass
*RCA
, unsigned SubA
,
317 const TargetRegisterClass
*RCB
, unsigned SubB
,
318 unsigned &PreA
, unsigned &PreB
) const {
319 assert(RCA
&& SubA
&& RCB
&& SubB
&& "Invalid arguments");
321 // Search all pairs of sub-register indices that project into RCA and RCB
322 // respectively. This is quadratic, but usually the sets are very small. On
323 // most targets like X86, there will only be a single sub-register index
324 // (e.g., sub_16bit projecting into GR16).
326 // The worst case is a register class like DPR on ARM.
327 // We have indices dsub_0..dsub_7 projecting into that class.
329 // It is very common that one register class is a sub-register of the other.
330 // Arrange for RCA to be the larger register so the answer will be found in
331 // the first iteration. This makes the search linear for the most common
333 const TargetRegisterClass
*BestRC
= nullptr;
334 unsigned *BestPreA
= &PreA
;
335 unsigned *BestPreB
= &PreB
;
336 if (getRegSizeInBits(*RCA
) < getRegSizeInBits(*RCB
)) {
338 std::swap(SubA
, SubB
);
339 std::swap(BestPreA
, BestPreB
);
342 // Also terminate the search one we have found a register class as small as
344 unsigned MinSize
= getRegSizeInBits(*RCA
);
346 for (SuperRegClassIterator
IA(RCA
, this, true); IA
.isValid(); ++IA
) {
347 unsigned FinalA
= composeSubRegIndices(IA
.getSubReg(), SubA
);
348 for (SuperRegClassIterator
IB(RCB
, this, true); IB
.isValid(); ++IB
) {
349 // Check if a common super-register class exists for this index pair.
350 const TargetRegisterClass
*RC
=
351 firstCommonClass(IA
.getMask(), IB
.getMask(), this);
352 if (!RC
|| getRegSizeInBits(*RC
) < MinSize
)
355 // The indexes must compose identically: PreA+SubA == PreB+SubB.
356 unsigned FinalB
= composeSubRegIndices(IB
.getSubReg(), SubB
);
357 if (FinalA
!= FinalB
)
360 // Is RC a better candidate than BestRC?
361 if (BestRC
&& getRegSizeInBits(*RC
) >= getRegSizeInBits(*BestRC
))
364 // Yes, RC is the smallest super-register seen so far.
366 *BestPreA
= IA
.getSubReg();
367 *BestPreB
= IB
.getSubReg();
369 // Bail early if we reached MinSize. We won't find a better candidate.
370 if (getRegSizeInBits(*BestRC
) == MinSize
)
377 /// Check if the registers defined by the pair (RegisterClass, SubReg)
378 /// share the same register file.
379 static bool shareSameRegisterFile(const TargetRegisterInfo
&TRI
,
380 const TargetRegisterClass
*DefRC
,
382 const TargetRegisterClass
*SrcRC
,
383 unsigned SrcSubReg
) {
384 // Same register class.
388 // Both operands are sub registers. Check if they share a register class.
389 unsigned SrcIdx
, DefIdx
;
390 if (SrcSubReg
&& DefSubReg
) {
391 return TRI
.getCommonSuperRegClass(SrcRC
, SrcSubReg
, DefRC
, DefSubReg
,
392 SrcIdx
, DefIdx
) != nullptr;
395 // At most one of the register is a sub register, make it Src to avoid
396 // duplicating the test.
398 std::swap(DefSubReg
, SrcSubReg
);
399 std::swap(DefRC
, SrcRC
);
402 // One of the register is a sub register, check if we can get a superclass.
404 return TRI
.getMatchingSuperRegClass(SrcRC
, DefRC
, SrcSubReg
) != nullptr;
407 return TRI
.getCommonSubClass(DefRC
, SrcRC
) != nullptr;
410 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass
*DefRC
,
412 const TargetRegisterClass
*SrcRC
,
413 unsigned SrcSubReg
) const {
414 // If this source does not incur a cross register bank copy, use it.
415 return shareSameRegisterFile(*this, DefRC
, DefSubReg
, SrcRC
, SrcSubReg
);
418 // Compute target-independent register allocator hints to help eliminate copies.
419 bool TargetRegisterInfo::getRegAllocationHints(
420 Register VirtReg
, ArrayRef
<MCPhysReg
> Order
,
421 SmallVectorImpl
<MCPhysReg
> &Hints
, const MachineFunction
&MF
,
422 const VirtRegMap
*VRM
, const LiveRegMatrix
*Matrix
) const {
423 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
424 const std::pair
<unsigned, SmallVector
<Register
, 4>> &Hints_MRI
=
425 MRI
.getRegAllocationHints(VirtReg
);
427 SmallSet
<Register
, 32> HintedRegs
;
428 // First hint may be a target hint.
429 bool Skip
= (Hints_MRI
.first
!= 0);
430 for (auto Reg
: Hints_MRI
.second
) {
436 // Target-independent hints are either a physical or a virtual register.
438 if (VRM
&& Phys
.isVirtual())
439 Phys
= VRM
->getPhys(Phys
);
441 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
442 // registers allocated to the same physreg).
443 if (!HintedRegs
.insert(Phys
).second
)
445 // Check that Phys is a valid hint in VirtReg's register class.
446 if (!Phys
.isPhysical())
448 if (MRI
.isReserved(Phys
))
450 // Check that Phys is in the allocation order. We shouldn't heed hints
451 // from VirtReg's register class if they aren't in the allocation order. The
452 // target probably has a reason for removing the register.
453 if (!is_contained(Order
, Phys
))
456 // All clear, tell the register allocator to prefer this register.
457 Hints
.push_back(Phys
);
462 bool TargetRegisterInfo::isCalleeSavedPhysReg(
463 MCRegister PhysReg
, const MachineFunction
&MF
) const {
466 const uint32_t *callerPreservedRegs
=
467 getCallPreservedMask(MF
, MF
.getFunction().getCallingConv());
468 if (callerPreservedRegs
) {
469 assert(Register::isPhysicalRegister(PhysReg
) &&
470 "Expected physical register");
471 return (callerPreservedRegs
[PhysReg
/ 32] >> PhysReg
% 32) & 1;
476 bool TargetRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
477 return MF
.getFrameInfo().isStackRealignable();
480 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction
&MF
) const {
481 return MF
.getFrameInfo().shouldRealignStack();
484 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0
,
485 const uint32_t *mask1
) const {
486 unsigned N
= (getNumRegs()+31) / 32;
487 for (unsigned I
= 0; I
< N
; ++I
)
488 if ((mask0
[I
] & mask1
[I
]) != mask0
[I
])
494 TargetRegisterInfo::getRegSizeInBits(Register Reg
,
495 const MachineRegisterInfo
&MRI
) const {
496 const TargetRegisterClass
*RC
{};
497 if (Reg
.isPhysical()) {
498 // The size is not directly available for physical registers.
499 // Instead, we need to access a register class that contains Reg and
500 // get the size of that register class.
501 RC
= getMinimalPhysRegClass(Reg
);
502 assert(RC
&& "Unable to deduce the register class");
503 return getRegSizeInBits(*RC
);
505 LLT Ty
= MRI
.getType(Reg
);
507 return Ty
.getSizeInBits();
509 // Since Reg is not a generic register, it may have a register class.
510 RC
= MRI
.getRegClass(Reg
);
511 assert(RC
&& "Unable to deduce the register class");
512 return getRegSizeInBits(*RC
);
515 bool TargetRegisterInfo::getCoveringSubRegIndexes(
516 const MachineRegisterInfo
&MRI
, const TargetRegisterClass
*RC
,
517 LaneBitmask LaneMask
, SmallVectorImpl
<unsigned> &NeededIndexes
) const {
518 SmallVector
<unsigned, 8> PossibleIndexes
;
519 unsigned BestIdx
= 0;
520 unsigned BestCover
= 0;
522 for (unsigned Idx
= 1, E
= getNumSubRegIndices(); Idx
< E
; ++Idx
) {
523 // Is this index even compatible with the given class?
524 if (getSubClassWithSubReg(RC
, Idx
) != RC
)
526 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
527 // Early exit if we found a perfect match.
528 if (SubRegMask
== LaneMask
) {
533 // The index must not cover any lanes outside \p LaneMask.
534 if ((SubRegMask
& ~LaneMask
).any())
537 unsigned PopCount
= SubRegMask
.getNumLanes();
538 PossibleIndexes
.push_back(Idx
);
539 if (PopCount
> BestCover
) {
540 BestCover
= PopCount
;
545 // Abort if we cannot possibly implement the COPY with the given indexes.
549 NeededIndexes
.push_back(BestIdx
);
551 // Greedy heuristic: Keep iterating keeping the best covering subreg index
553 LaneBitmask LanesLeft
= LaneMask
& ~getSubRegIndexLaneMask(BestIdx
);
554 while (LanesLeft
.any()) {
555 unsigned BestIdx
= 0;
556 int BestCover
= std::numeric_limits
<int>::min();
557 for (unsigned Idx
: PossibleIndexes
) {
558 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
559 // Early exit if we found a perfect match.
560 if (SubRegMask
== LanesLeft
) {
565 // Do not cover already-covered lanes to avoid creating cycles
566 // in copy bundles (= bundle contains copies that write to the
568 if ((SubRegMask
& ~LanesLeft
).any())
571 // Try to cover as many of the remaining lanes as possible.
572 const int Cover
= (SubRegMask
& LanesLeft
).getNumLanes();
573 if (Cover
> BestCover
) {
580 return false; // Impossible to handle
582 NeededIndexes
.push_back(BestIdx
);
584 LanesLeft
&= ~getSubRegIndexLaneMask(BestIdx
);
590 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx
) const {
591 assert(Idx
&& Idx
< getNumSubRegIndices() &&
592 "This is not a subregister index");
593 return SubRegIdxRanges
[HwMode
* getNumSubRegIndices() + Idx
].Size
;
596 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx
) const {
597 assert(Idx
&& Idx
< getNumSubRegIndices() &&
598 "This is not a subregister index");
599 return SubRegIdxRanges
[HwMode
* getNumSubRegIndices() + Idx
].Offset
;
603 TargetRegisterInfo::lookThruCopyLike(Register SrcReg
,
604 const MachineRegisterInfo
*MRI
) const {
606 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
607 if (!MI
->isCopyLike())
612 CopySrcReg
= MI
->getOperand(1).getReg();
614 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
615 CopySrcReg
= MI
->getOperand(2).getReg();
618 if (!CopySrcReg
.isVirtual())
625 Register
TargetRegisterInfo::lookThruSingleUseCopyChain(
626 Register SrcReg
, const MachineRegisterInfo
*MRI
) const {
628 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
629 // Found the real definition, return it if it has a single use.
630 if (!MI
->isCopyLike())
631 return MRI
->hasOneNonDBGUse(SrcReg
) ? SrcReg
: Register();
635 CopySrcReg
= MI
->getOperand(1).getReg();
637 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
638 CopySrcReg
= MI
->getOperand(2).getReg();
641 // Continue only if the next definition in the chain is for a virtual
642 // register that has a single use.
643 if (!CopySrcReg
.isVirtual() || !MRI
->hasOneNonDBGUse(CopySrcReg
))
650 void TargetRegisterInfo::getOffsetOpcodes(
651 const StackOffset
&Offset
, SmallVectorImpl
<uint64_t> &Ops
) const {
652 assert(!Offset
.getScalable() && "Scalable offsets are not handled");
653 DIExpression::appendOffset(Ops
, Offset
.getFixed());
657 TargetRegisterInfo::prependOffsetExpression(const DIExpression
*Expr
,
658 unsigned PrependFlags
,
659 const StackOffset
&Offset
) const {
660 assert((PrependFlags
&
661 ~(DIExpression::DerefBefore
| DIExpression::DerefAfter
|
662 DIExpression::StackValue
| DIExpression::EntryValue
)) == 0 &&
663 "Unsupported prepend flag");
664 SmallVector
<uint64_t, 16> OffsetExpr
;
665 if (PrependFlags
& DIExpression::DerefBefore
)
666 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
667 getOffsetOpcodes(Offset
, OffsetExpr
);
668 if (PrependFlags
& DIExpression::DerefAfter
)
669 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
670 return DIExpression::prependOpcodes(Expr
, OffsetExpr
,
671 PrependFlags
& DIExpression::StackValue
,
672 PrependFlags
& DIExpression::EntryValue
);
675 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
677 void TargetRegisterInfo::dumpReg(Register Reg
, unsigned SubRegIndex
,
678 const TargetRegisterInfo
*TRI
) {
679 dbgs() << printReg(Reg
, TRI
, SubRegIndex
) << "\n";