1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetRegisterInfo interface.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/LiveInterval.h"
23 #include "llvm/CodeGen/TargetFrameLowering.h"
24 #include "llvm/CodeGen/TargetInstrInfo.h"
25 #include "llvm/CodeGen/TargetSubtargetInfo.h"
26 #include "llvm/CodeGen/VirtRegMap.h"
27 #include "llvm/Config/llvm-config.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/DebugInfoMetadata.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/MC/MCRegisterInfo.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/MachineValueType.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Printable.h"
38 #include "llvm/Support/raw_ostream.h"
42 #define DEBUG_TYPE "target-reg-info"
46 static cl::opt
<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden
,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
52 TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc
*ID
,
53 regclass_iterator RCB
, regclass_iterator RCE
,
54 const char *const *SRINames
,
55 const LaneBitmask
*SRILaneMasks
,
56 LaneBitmask SRICoveringLanes
,
57 const RegClassInfo
*const RCIs
,
59 : InfoDesc(ID
), SubRegIndexNames(SRINames
),
60 SubRegIndexLaneMasks(SRILaneMasks
),
61 RegClassBegin(RCB
), RegClassEnd(RCE
),
62 CoveringLanes(SRICoveringLanes
),
63 RCInfos(RCIs
), HwMode(Mode
) {
66 TargetRegisterInfo::~TargetRegisterInfo() = default;
68 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
69 const MachineFunction
&MF
, const LiveInterval
&VirtReg
) const {
70 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
71 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
72 MachineInstr
*MI
= MRI
.getUniqueVRegDef(VirtReg
.reg());
73 if (MI
&& TII
->isTriviallyReMaterializable(*MI
) &&
74 VirtReg
.size() > HugeSizeForSplit
)
79 void TargetRegisterInfo::markSuperRegs(BitVector
&RegisterSet
,
80 MCRegister Reg
) const {
81 for (MCSuperRegIterator
AI(Reg
, this, true); AI
.isValid(); ++AI
)
85 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector
&RegisterSet
,
86 ArrayRef
<MCPhysReg
> Exceptions
) const {
87 // Check that all super registers of reserved regs are reserved as well.
88 BitVector
Checked(getNumRegs());
89 for (unsigned Reg
: RegisterSet
.set_bits()) {
92 for (MCSuperRegIterator
SR(Reg
, this); SR
.isValid(); ++SR
) {
93 if (!RegisterSet
[*SR
] && !is_contained(Exceptions
, Reg
)) {
94 dbgs() << "Error: Super register " << printReg(*SR
, this)
95 << " of reserved register " << printReg(Reg
, this)
96 << " is not reserved.\n";
100 // We transitively check superregs. So we can remember this for later
101 // to avoid compiletime explosion in deep register hierarchies.
110 Printable
printReg(Register Reg
, const TargetRegisterInfo
*TRI
,
111 unsigned SubIdx
, const MachineRegisterInfo
*MRI
) {
112 return Printable([Reg
, TRI
, SubIdx
, MRI
](raw_ostream
&OS
) {
115 else if (Register::isStackSlot(Reg
))
116 OS
<< "SS#" << Register::stackSlot2Index(Reg
);
117 else if (Register::isVirtualRegister(Reg
)) {
118 StringRef Name
= MRI
? MRI
->getVRegName(Reg
) : "";
122 OS
<< '%' << Register::virtReg2Index(Reg
);
125 OS
<< '$' << "physreg" << Reg
;
126 else if (Reg
< TRI
->getNumRegs()) {
128 printLowerCase(TRI
->getName(Reg
), OS
);
130 llvm_unreachable("Register kind is unsupported.");
134 OS
<< ':' << TRI
->getSubRegIndexName(SubIdx
);
136 OS
<< ":sub(" << SubIdx
<< ')';
141 Printable
printRegUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
142 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
143 // Generic printout when TRI is missing.
145 OS
<< "Unit~" << Unit
;
149 // Check for invalid register units.
150 if (Unit
>= TRI
->getNumRegUnits()) {
151 OS
<< "BadUnit~" << Unit
;
155 // Normal units have at least one root.
156 MCRegUnitRootIterator
Roots(Unit
, TRI
);
157 assert(Roots
.isValid() && "Unit has no roots.");
158 OS
<< TRI
->getName(*Roots
);
159 for (++Roots
; Roots
.isValid(); ++Roots
)
160 OS
<< '~' << TRI
->getName(*Roots
);
164 Printable
printVRegOrUnit(unsigned Unit
, const TargetRegisterInfo
*TRI
) {
165 return Printable([Unit
, TRI
](raw_ostream
&OS
) {
166 if (Register::isVirtualRegister(Unit
)) {
167 OS
<< '%' << Register::virtReg2Index(Unit
);
169 OS
<< printRegUnit(Unit
, TRI
);
174 Printable
printRegClassOrBank(Register Reg
, const MachineRegisterInfo
&RegInfo
,
175 const TargetRegisterInfo
*TRI
) {
176 return Printable([Reg
, &RegInfo
, TRI
](raw_ostream
&OS
) {
177 if (RegInfo
.getRegClassOrNull(Reg
))
178 OS
<< StringRef(TRI
->getRegClassName(RegInfo
.getRegClass(Reg
))).lower();
179 else if (RegInfo
.getRegBankOrNull(Reg
))
180 OS
<< StringRef(RegInfo
.getRegBankOrNull(Reg
)->getName()).lower();
183 assert((RegInfo
.def_empty(Reg
) || RegInfo
.getType(Reg
).isValid()) &&
184 "Generic registers must have a valid type");
189 } // end namespace llvm
191 /// getAllocatableClass - Return the maximal subclass of the given register
192 /// class that is alloctable, or NULL.
193 const TargetRegisterClass
*
194 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass
*RC
) const {
195 if (!RC
|| RC
->isAllocatable())
198 for (BitMaskClassIterator
It(RC
->getSubClassMask(), *this); It
.isValid();
200 const TargetRegisterClass
*SubRC
= getRegClass(It
.getID());
201 if (SubRC
->isAllocatable())
207 /// getMinimalPhysRegClass - Returns the Register Class of a physical
208 /// register of the given type, picking the most sub register class of
209 /// the right type that contains this physreg.
210 const TargetRegisterClass
*
211 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg
, MVT VT
) const {
212 assert(Register::isPhysicalRegister(reg
) &&
213 "reg must be a physical register");
215 // Pick the most sub register class of the right type that contains
217 const TargetRegisterClass
* BestRC
= nullptr;
218 for (const TargetRegisterClass
* RC
: regclasses()) {
219 if ((VT
== MVT::Other
|| isTypeLegalForClass(*RC
, VT
)) &&
220 RC
->contains(reg
) && (!BestRC
|| BestRC
->hasSubClass(RC
)))
224 assert(BestRC
&& "Couldn't find the register class");
228 const TargetRegisterClass
*
229 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg
, LLT Ty
) const {
230 assert(Register::isPhysicalRegister(reg
) &&
231 "reg must be a physical register");
233 // Pick the most sub register class of the right type that contains
235 const TargetRegisterClass
*BestRC
= nullptr;
236 for (const TargetRegisterClass
*RC
: regclasses()) {
237 if ((!Ty
.isValid() || isTypeLegalForClass(*RC
, Ty
)) && RC
->contains(reg
) &&
238 (!BestRC
|| BestRC
->hasSubClass(RC
)))
245 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
246 /// registers for the specific register class.
247 static void getAllocatableSetForRC(const MachineFunction
&MF
,
248 const TargetRegisterClass
*RC
, BitVector
&R
){
249 assert(RC
->isAllocatable() && "invalid for nonallocatable sets");
250 ArrayRef
<MCPhysReg
> Order
= RC
->getRawAllocationOrder(MF
);
251 for (unsigned i
= 0; i
!= Order
.size(); ++i
)
255 BitVector
TargetRegisterInfo::getAllocatableSet(const MachineFunction
&MF
,
256 const TargetRegisterClass
*RC
) const {
257 BitVector
Allocatable(getNumRegs());
259 // A register class with no allocatable subclass returns an empty set.
260 const TargetRegisterClass
*SubClass
= getAllocatableClass(RC
);
262 getAllocatableSetForRC(MF
, SubClass
, Allocatable
);
264 for (const TargetRegisterClass
*C
: regclasses())
265 if (C
->isAllocatable())
266 getAllocatableSetForRC(MF
, C
, Allocatable
);
269 // Mask out the reserved registers
270 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
271 const BitVector
&Reserved
= MRI
.getReservedRegs();
272 Allocatable
.reset(Reserved
);
278 const TargetRegisterClass
*firstCommonClass(const uint32_t *A
,
280 const TargetRegisterInfo
*TRI
) {
281 for (unsigned I
= 0, E
= TRI
->getNumRegClasses(); I
< E
; I
+= 32)
282 if (unsigned Common
= *A
++ & *B
++)
283 return TRI
->getRegClass(I
+ countTrailingZeros(Common
));
287 const TargetRegisterClass
*
288 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass
*A
,
289 const TargetRegisterClass
*B
) const {
290 // First take care of the trivial cases.
296 // Register classes are ordered topologically, so the largest common
297 // sub-class it the common sub-class with the smallest ID.
298 return firstCommonClass(A
->getSubClassMask(), B
->getSubClassMask(), this);
301 const TargetRegisterClass
*
302 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass
*A
,
303 const TargetRegisterClass
*B
,
304 unsigned Idx
) const {
305 assert(A
&& B
&& "Missing register class");
306 assert(Idx
&& "Bad sub-register index");
308 // Find Idx in the list of super-register indices.
309 for (SuperRegClassIterator
RCI(B
, this); RCI
.isValid(); ++RCI
)
310 if (RCI
.getSubReg() == Idx
)
311 // The bit mask contains all register classes that are projected into B
312 // by Idx. Find a class that is also a sub-class of A.
313 return firstCommonClass(RCI
.getMask(), A
->getSubClassMask(), this);
317 const TargetRegisterClass
*TargetRegisterInfo::
318 getCommonSuperRegClass(const TargetRegisterClass
*RCA
, unsigned SubA
,
319 const TargetRegisterClass
*RCB
, unsigned SubB
,
320 unsigned &PreA
, unsigned &PreB
) const {
321 assert(RCA
&& SubA
&& RCB
&& SubB
&& "Invalid arguments");
323 // Search all pairs of sub-register indices that project into RCA and RCB
324 // respectively. This is quadratic, but usually the sets are very small. On
325 // most targets like X86, there will only be a single sub-register index
326 // (e.g., sub_16bit projecting into GR16).
328 // The worst case is a register class like DPR on ARM.
329 // We have indices dsub_0..dsub_7 projecting into that class.
331 // It is very common that one register class is a sub-register of the other.
332 // Arrange for RCA to be the larger register so the answer will be found in
333 // the first iteration. This makes the search linear for the most common
335 const TargetRegisterClass
*BestRC
= nullptr;
336 unsigned *BestPreA
= &PreA
;
337 unsigned *BestPreB
= &PreB
;
338 if (getRegSizeInBits(*RCA
) < getRegSizeInBits(*RCB
)) {
340 std::swap(SubA
, SubB
);
341 std::swap(BestPreA
, BestPreB
);
344 // Also terminate the search one we have found a register class as small as
346 unsigned MinSize
= getRegSizeInBits(*RCA
);
348 for (SuperRegClassIterator
IA(RCA
, this, true); IA
.isValid(); ++IA
) {
349 unsigned FinalA
= composeSubRegIndices(IA
.getSubReg(), SubA
);
350 for (SuperRegClassIterator
IB(RCB
, this, true); IB
.isValid(); ++IB
) {
351 // Check if a common super-register class exists for this index pair.
352 const TargetRegisterClass
*RC
=
353 firstCommonClass(IA
.getMask(), IB
.getMask(), this);
354 if (!RC
|| getRegSizeInBits(*RC
) < MinSize
)
357 // The indexes must compose identically: PreA+SubA == PreB+SubB.
358 unsigned FinalB
= composeSubRegIndices(IB
.getSubReg(), SubB
);
359 if (FinalA
!= FinalB
)
362 // Is RC a better candidate than BestRC?
363 if (BestRC
&& getRegSizeInBits(*RC
) >= getRegSizeInBits(*BestRC
))
366 // Yes, RC is the smallest super-register seen so far.
368 *BestPreA
= IA
.getSubReg();
369 *BestPreB
= IB
.getSubReg();
371 // Bail early if we reached MinSize. We won't find a better candidate.
372 if (getRegSizeInBits(*BestRC
) == MinSize
)
379 /// Check if the registers defined by the pair (RegisterClass, SubReg)
380 /// share the same register file.
381 static bool shareSameRegisterFile(const TargetRegisterInfo
&TRI
,
382 const TargetRegisterClass
*DefRC
,
384 const TargetRegisterClass
*SrcRC
,
385 unsigned SrcSubReg
) {
386 // Same register class.
390 // Both operands are sub registers. Check if they share a register class.
391 unsigned SrcIdx
, DefIdx
;
392 if (SrcSubReg
&& DefSubReg
) {
393 return TRI
.getCommonSuperRegClass(SrcRC
, SrcSubReg
, DefRC
, DefSubReg
,
394 SrcIdx
, DefIdx
) != nullptr;
397 // At most one of the register is a sub register, make it Src to avoid
398 // duplicating the test.
400 std::swap(DefSubReg
, SrcSubReg
);
401 std::swap(DefRC
, SrcRC
);
404 // One of the register is a sub register, check if we can get a superclass.
406 return TRI
.getMatchingSuperRegClass(SrcRC
, DefRC
, SrcSubReg
) != nullptr;
409 return TRI
.getCommonSubClass(DefRC
, SrcRC
) != nullptr;
412 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass
*DefRC
,
414 const TargetRegisterClass
*SrcRC
,
415 unsigned SrcSubReg
) const {
416 // If this source does not incur a cross register bank copy, use it.
417 return shareSameRegisterFile(*this, DefRC
, DefSubReg
, SrcRC
, SrcSubReg
);
420 // Compute target-independent register allocator hints to help eliminate copies.
421 bool TargetRegisterInfo::getRegAllocationHints(
422 Register VirtReg
, ArrayRef
<MCPhysReg
> Order
,
423 SmallVectorImpl
<MCPhysReg
> &Hints
, const MachineFunction
&MF
,
424 const VirtRegMap
*VRM
, const LiveRegMatrix
*Matrix
) const {
425 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
426 const std::pair
<Register
, SmallVector
<Register
, 4>> &Hints_MRI
=
427 MRI
.getRegAllocationHints(VirtReg
);
429 SmallSet
<Register
, 32> HintedRegs
;
430 // First hint may be a target hint.
431 bool Skip
= (Hints_MRI
.first
!= 0);
432 for (auto Reg
: Hints_MRI
.second
) {
438 // Target-independent hints are either a physical or a virtual register.
440 if (VRM
&& Phys
.isVirtual())
441 Phys
= VRM
->getPhys(Phys
);
443 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
444 // registers allocated to the same physreg).
445 if (!HintedRegs
.insert(Phys
).second
)
447 // Check that Phys is a valid hint in VirtReg's register class.
448 if (!Phys
.isPhysical())
450 if (MRI
.isReserved(Phys
))
452 // Check that Phys is in the allocation order. We shouldn't heed hints
453 // from VirtReg's register class if they aren't in the allocation order. The
454 // target probably has a reason for removing the register.
455 if (!is_contained(Order
, Phys
))
458 // All clear, tell the register allocator to prefer this register.
459 Hints
.push_back(Phys
);
464 bool TargetRegisterInfo::isCalleeSavedPhysReg(
465 MCRegister PhysReg
, const MachineFunction
&MF
) const {
468 const uint32_t *callerPreservedRegs
=
469 getCallPreservedMask(MF
, MF
.getFunction().getCallingConv());
470 if (callerPreservedRegs
) {
471 assert(Register::isPhysicalRegister(PhysReg
) &&
472 "Expected physical register");
473 return (callerPreservedRegs
[PhysReg
/ 32] >> PhysReg
% 32) & 1;
478 bool TargetRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
479 return !MF
.getFunction().hasFnAttribute("no-realign-stack");
482 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction
&MF
) const {
483 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
484 const TargetFrameLowering
*TFI
= MF
.getSubtarget().getFrameLowering();
485 const Function
&F
= MF
.getFunction();
486 return F
.hasFnAttribute("stackrealign") ||
487 (MFI
.getMaxAlign() > TFI
->getStackAlign()) ||
488 F
.hasFnAttribute(Attribute::StackAlignment
);
491 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0
,
492 const uint32_t *mask1
) const {
493 unsigned N
= (getNumRegs()+31) / 32;
494 for (unsigned I
= 0; I
< N
; ++I
)
495 if ((mask0
[I
] & mask1
[I
]) != mask0
[I
])
501 TargetRegisterInfo::getRegSizeInBits(Register Reg
,
502 const MachineRegisterInfo
&MRI
) const {
503 const TargetRegisterClass
*RC
{};
504 if (Reg
.isPhysical()) {
505 // The size is not directly available for physical registers.
506 // Instead, we need to access a register class that contains Reg and
507 // get the size of that register class.
508 RC
= getMinimalPhysRegClass(Reg
);
510 LLT Ty
= MRI
.getType(Reg
);
511 unsigned RegSize
= Ty
.isValid() ? Ty
.getSizeInBits() : 0;
512 // If Reg is not a generic register, query the register class to
516 // Since Reg is not a generic register, it must have a register class.
517 RC
= MRI
.getRegClass(Reg
);
519 assert(RC
&& "Unable to deduce the register class");
520 return getRegSizeInBits(*RC
);
523 bool TargetRegisterInfo::getCoveringSubRegIndexes(
524 const MachineRegisterInfo
&MRI
, const TargetRegisterClass
*RC
,
525 LaneBitmask LaneMask
, SmallVectorImpl
<unsigned> &NeededIndexes
) const {
526 SmallVector
<unsigned, 8> PossibleIndexes
;
527 unsigned BestIdx
= 0;
528 unsigned BestCover
= 0;
530 for (unsigned Idx
= 1, E
= getNumSubRegIndices(); Idx
< E
; ++Idx
) {
531 // Is this index even compatible with the given class?
532 if (getSubClassWithSubReg(RC
, Idx
) != RC
)
534 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
535 // Early exit if we found a perfect match.
536 if (SubRegMask
== LaneMask
) {
541 // The index must not cover any lanes outside \p LaneMask.
542 if ((SubRegMask
& ~LaneMask
).any())
545 unsigned PopCount
= SubRegMask
.getNumLanes();
546 PossibleIndexes
.push_back(Idx
);
547 if (PopCount
> BestCover
) {
548 BestCover
= PopCount
;
553 // Abort if we cannot possibly implement the COPY with the given indexes.
557 NeededIndexes
.push_back(BestIdx
);
559 // Greedy heuristic: Keep iterating keeping the best covering subreg index
561 LaneBitmask LanesLeft
= LaneMask
& ~getSubRegIndexLaneMask(BestIdx
);
562 while (LanesLeft
.any()) {
563 unsigned BestIdx
= 0;
564 int BestCover
= std::numeric_limits
<int>::min();
565 for (unsigned Idx
: PossibleIndexes
) {
566 LaneBitmask SubRegMask
= getSubRegIndexLaneMask(Idx
);
567 // Early exit if we found a perfect match.
568 if (SubRegMask
== LanesLeft
) {
573 // Try to cover as much of the remaining lanes as possible but
574 // as few of the already covered lanes as possible.
575 int Cover
= (SubRegMask
& LanesLeft
).getNumLanes() -
576 (SubRegMask
& ~LanesLeft
).getNumLanes();
577 if (Cover
> BestCover
) {
584 return 0; // Impossible to handle
586 NeededIndexes
.push_back(BestIdx
);
588 LanesLeft
&= ~getSubRegIndexLaneMask(BestIdx
);
595 TargetRegisterInfo::lookThruCopyLike(Register SrcReg
,
596 const MachineRegisterInfo
*MRI
) const {
598 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
599 if (!MI
->isCopyLike())
604 CopySrcReg
= MI
->getOperand(1).getReg();
606 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
607 CopySrcReg
= MI
->getOperand(2).getReg();
610 if (!CopySrcReg
.isVirtual())
617 Register
TargetRegisterInfo::lookThruSingleUseCopyChain(
618 Register SrcReg
, const MachineRegisterInfo
*MRI
) const {
620 const MachineInstr
*MI
= MRI
->getVRegDef(SrcReg
);
621 // Found the real definition, return it if it has a single use.
622 if (!MI
->isCopyLike())
623 return MRI
->hasOneNonDBGUse(SrcReg
) ? SrcReg
: Register();
627 CopySrcReg
= MI
->getOperand(1).getReg();
629 assert(MI
->isSubregToReg() && "Bad opcode for lookThruCopyLike");
630 CopySrcReg
= MI
->getOperand(2).getReg();
633 // Continue only if the next definition in the chain is for a virtual
634 // register that has a single use.
635 if (!CopySrcReg
.isVirtual() || !MRI
->hasOneNonDBGUse(CopySrcReg
))
642 void TargetRegisterInfo::getOffsetOpcodes(
643 const StackOffset
&Offset
, SmallVectorImpl
<uint64_t> &Ops
) const {
644 assert(!Offset
.getScalable() && "Scalable offsets are not handled");
645 DIExpression::appendOffset(Ops
, Offset
.getFixed());
649 TargetRegisterInfo::prependOffsetExpression(const DIExpression
*Expr
,
650 unsigned PrependFlags
,
651 const StackOffset
&Offset
) const {
652 assert((PrependFlags
&
653 ~(DIExpression::DerefBefore
| DIExpression::DerefAfter
|
654 DIExpression::StackValue
| DIExpression::EntryValue
)) == 0 &&
655 "Unsupported prepend flag");
656 SmallVector
<uint64_t, 16> OffsetExpr
;
657 if (PrependFlags
& DIExpression::DerefBefore
)
658 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
659 getOffsetOpcodes(Offset
, OffsetExpr
);
660 if (PrependFlags
& DIExpression::DerefAfter
)
661 OffsetExpr
.push_back(dwarf::DW_OP_deref
);
662 return DIExpression::prependOpcodes(Expr
, OffsetExpr
,
663 PrependFlags
& DIExpression::StackValue
,
664 PrependFlags
& DIExpression::EntryValue
);
667 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
669 void TargetRegisterInfo::dumpReg(Register Reg
, unsigned SubRegIndex
,
670 const TargetRegisterInfo
*TRI
) {
671 dbgs() << printReg(Reg
, TRI
, SubRegIndex
) << "\n";