[clang][NFC] simplify the unset check in `ParseLabeledStatement` (#117430)
[llvm-project.git] / llvm / lib / CodeGen / TargetRegisterInfo.cpp
blob032f1a33e75c43b3a7acbc454dcdfec21882274f
1 //==- TargetRegisterInfo.cpp - Target Register Information Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetRegisterInfo interface.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetRegisterInfo.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/BinaryFormat/Dwarf.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetInstrInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/CodeGenTypes/MachineValueType.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/Printable.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <cassert>
40 #include <utility>
42 #define DEBUG_TYPE "target-reg-info"
44 using namespace llvm;
46 static cl::opt<unsigned>
47 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
48 cl::desc("A threshold of live range size which may cause "
49 "high compile time cost in global splitting."),
50 cl::init(5000));
52 TargetRegisterInfo::TargetRegisterInfo(
53 const TargetRegisterInfoDesc *ID, regclass_iterator RCB,
54 regclass_iterator RCE, const char *const *SRINames,
55 const SubRegCoveredBits *SubIdxRanges, const LaneBitmask *SRILaneMasks,
56 LaneBitmask SRICoveringLanes, const RegClassInfo *const RCIs,
57 const MVT::SimpleValueType *const RCVTLists, unsigned Mode)
58 : InfoDesc(ID), SubRegIndexNames(SRINames), SubRegIdxRanges(SubIdxRanges),
59 SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE),
60 CoveringLanes(SRICoveringLanes), RCInfos(RCIs), RCVTLists(RCVTLists),
61 HwMode(Mode) {}
63 TargetRegisterInfo::~TargetRegisterInfo() = default;
65 bool TargetRegisterInfo::shouldRegionSplitForVirtReg(
66 const MachineFunction &MF, const LiveInterval &VirtReg) const {
67 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
68 const MachineRegisterInfo &MRI = MF.getRegInfo();
69 MachineInstr *MI = MRI.getUniqueVRegDef(VirtReg.reg());
70 if (MI && TII->isTriviallyReMaterializable(*MI) &&
71 VirtReg.size() > HugeSizeForSplit)
72 return false;
73 return true;
76 void TargetRegisterInfo::markSuperRegs(BitVector &RegisterSet,
77 MCRegister Reg) const {
78 for (MCPhysReg SR : superregs_inclusive(Reg))
79 RegisterSet.set(SR);
82 bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet,
83 ArrayRef<MCPhysReg> Exceptions) const {
84 // Check that all super registers of reserved regs are reserved as well.
85 BitVector Checked(getNumRegs());
86 for (unsigned Reg : RegisterSet.set_bits()) {
87 if (Checked[Reg])
88 continue;
89 for (MCPhysReg SR : superregs(Reg)) {
90 if (!RegisterSet[SR] && !is_contained(Exceptions, Reg)) {
91 dbgs() << "Error: Super register " << printReg(SR, this)
92 << " of reserved register " << printReg(Reg, this)
93 << " is not reserved.\n";
94 return false;
97 // We transitively check superregs. So we can remember this for later
98 // to avoid compiletime explosion in deep register hierarchies.
99 Checked.set(SR);
102 return true;
105 namespace llvm {
107 Printable printReg(Register Reg, const TargetRegisterInfo *TRI,
108 unsigned SubIdx, const MachineRegisterInfo *MRI) {
109 return Printable([Reg, TRI, SubIdx, MRI](raw_ostream &OS) {
110 if (!Reg)
111 OS << "$noreg";
112 else if (Register::isStackSlot(Reg))
113 OS << "SS#" << Register::stackSlot2Index(Reg);
114 else if (Reg.isVirtual()) {
115 StringRef Name = MRI ? MRI->getVRegName(Reg) : "";
116 if (Name != "") {
117 OS << '%' << Name;
118 } else {
119 OS << '%' << Register::virtReg2Index(Reg);
121 } else if (!TRI)
122 OS << '$' << "physreg" << Reg.id();
123 else if (Reg < TRI->getNumRegs()) {
124 OS << '$';
125 printLowerCase(TRI->getName(Reg), OS);
126 } else
127 llvm_unreachable("Register kind is unsupported.");
129 if (SubIdx) {
130 if (TRI)
131 OS << ':' << TRI->getSubRegIndexName(SubIdx);
132 else
133 OS << ":sub(" << SubIdx << ')';
138 Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
139 return Printable([Unit, TRI](raw_ostream &OS) {
140 // Generic printout when TRI is missing.
141 if (!TRI) {
142 OS << "Unit~" << Unit;
143 return;
146 // Check for invalid register units.
147 if (Unit >= TRI->getNumRegUnits()) {
148 OS << "BadUnit~" << Unit;
149 return;
152 // Normal units have at least one root.
153 MCRegUnitRootIterator Roots(Unit, TRI);
154 assert(Roots.isValid() && "Unit has no roots.");
155 OS << TRI->getName(*Roots);
156 for (++Roots; Roots.isValid(); ++Roots)
157 OS << '~' << TRI->getName(*Roots);
161 Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
162 return Printable([Unit, TRI](raw_ostream &OS) {
163 if (Register::isVirtualRegister(Unit)) {
164 OS << '%' << Register::virtReg2Index(Unit);
165 } else {
166 OS << printRegUnit(Unit, TRI);
171 Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
172 const TargetRegisterInfo *TRI) {
173 return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
174 if (RegInfo.getRegClassOrNull(Reg))
175 OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
176 else if (RegInfo.getRegBankOrNull(Reg))
177 OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
178 else {
179 OS << "_";
180 assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
181 "Generic registers must have a valid type");
186 } // end namespace llvm
188 /// getAllocatableClass - Return the maximal subclass of the given register
189 /// class that is alloctable, or NULL.
190 const TargetRegisterClass *
191 TargetRegisterInfo::getAllocatableClass(const TargetRegisterClass *RC) const {
192 if (!RC || RC->isAllocatable())
193 return RC;
195 for (BitMaskClassIterator It(RC->getSubClassMask(), *this); It.isValid();
196 ++It) {
197 const TargetRegisterClass *SubRC = getRegClass(It.getID());
198 if (SubRC->isAllocatable())
199 return SubRC;
201 return nullptr;
204 /// getMinimalPhysRegClass - Returns the Register Class of a physical
205 /// register of the given type, picking the most sub register class of
206 /// the right type that contains this physreg.
207 const TargetRegisterClass *
208 TargetRegisterInfo::getMinimalPhysRegClass(MCRegister reg, MVT VT) const {
209 assert(Register::isPhysicalRegister(reg) &&
210 "reg must be a physical register");
212 // Pick the most sub register class of the right type that contains
213 // this physreg.
214 const TargetRegisterClass* BestRC = nullptr;
215 for (const TargetRegisterClass* RC : regclasses()) {
216 if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) &&
217 RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC)))
218 BestRC = RC;
221 assert(BestRC && "Couldn't find the register class");
222 return BestRC;
225 const TargetRegisterClass *
226 TargetRegisterInfo::getMinimalPhysRegClassLLT(MCRegister reg, LLT Ty) const {
227 assert(Register::isPhysicalRegister(reg) &&
228 "reg must be a physical register");
230 // Pick the most sub register class of the right type that contains
231 // this physreg.
232 const TargetRegisterClass *BestRC = nullptr;
233 for (const TargetRegisterClass *RC : regclasses()) {
234 if ((!Ty.isValid() || isTypeLegalForClass(*RC, Ty)) && RC->contains(reg) &&
235 (!BestRC || BestRC->hasSubClass(RC)))
236 BestRC = RC;
239 return BestRC;
242 /// getAllocatableSetForRC - Toggle the bits that represent allocatable
243 /// registers for the specific register class.
244 static void getAllocatableSetForRC(const MachineFunction &MF,
245 const TargetRegisterClass *RC, BitVector &R){
246 assert(RC->isAllocatable() && "invalid for nonallocatable sets");
247 ArrayRef<MCPhysReg> Order = RC->getRawAllocationOrder(MF);
248 for (MCPhysReg PR : Order)
249 R.set(PR);
252 BitVector TargetRegisterInfo::getAllocatableSet(const MachineFunction &MF,
253 const TargetRegisterClass *RC) const {
254 BitVector Allocatable(getNumRegs());
255 if (RC) {
256 // A register class with no allocatable subclass returns an empty set.
257 const TargetRegisterClass *SubClass = getAllocatableClass(RC);
258 if (SubClass)
259 getAllocatableSetForRC(MF, SubClass, Allocatable);
260 } else {
261 for (const TargetRegisterClass *C : regclasses())
262 if (C->isAllocatable())
263 getAllocatableSetForRC(MF, C, Allocatable);
266 // Mask out the reserved registers
267 const MachineRegisterInfo &MRI = MF.getRegInfo();
268 const BitVector &Reserved = MRI.getReservedRegs();
269 Allocatable.reset(Reserved);
271 return Allocatable;
274 static inline
275 const TargetRegisterClass *firstCommonClass(const uint32_t *A,
276 const uint32_t *B,
277 const TargetRegisterInfo *TRI) {
278 for (unsigned I = 0, E = TRI->getNumRegClasses(); I < E; I += 32)
279 if (unsigned Common = *A++ & *B++)
280 return TRI->getRegClass(I + llvm::countr_zero(Common));
281 return nullptr;
284 const TargetRegisterClass *
285 TargetRegisterInfo::getCommonSubClass(const TargetRegisterClass *A,
286 const TargetRegisterClass *B) const {
287 // First take care of the trivial cases.
288 if (A == B)
289 return A;
290 if (!A || !B)
291 return nullptr;
293 // Register classes are ordered topologically, so the largest common
294 // sub-class it the common sub-class with the smallest ID.
295 return firstCommonClass(A->getSubClassMask(), B->getSubClassMask(), this);
298 const TargetRegisterClass *
299 TargetRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
300 const TargetRegisterClass *B,
301 unsigned Idx) const {
302 assert(A && B && "Missing register class");
303 assert(Idx && "Bad sub-register index");
305 // Find Idx in the list of super-register indices.
306 for (SuperRegClassIterator RCI(B, this); RCI.isValid(); ++RCI)
307 if (RCI.getSubReg() == Idx)
308 // The bit mask contains all register classes that are projected into B
309 // by Idx. Find a class that is also a sub-class of A.
310 return firstCommonClass(RCI.getMask(), A->getSubClassMask(), this);
311 return nullptr;
314 const TargetRegisterClass *TargetRegisterInfo::
315 getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
316 const TargetRegisterClass *RCB, unsigned SubB,
317 unsigned &PreA, unsigned &PreB) const {
318 assert(RCA && SubA && RCB && SubB && "Invalid arguments");
320 // Search all pairs of sub-register indices that project into RCA and RCB
321 // respectively. This is quadratic, but usually the sets are very small. On
322 // most targets like X86, there will only be a single sub-register index
323 // (e.g., sub_16bit projecting into GR16).
325 // The worst case is a register class like DPR on ARM.
326 // We have indices dsub_0..dsub_7 projecting into that class.
328 // It is very common that one register class is a sub-register of the other.
329 // Arrange for RCA to be the larger register so the answer will be found in
330 // the first iteration. This makes the search linear for the most common
331 // case.
332 const TargetRegisterClass *BestRC = nullptr;
333 unsigned *BestPreA = &PreA;
334 unsigned *BestPreB = &PreB;
335 if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
336 std::swap(RCA, RCB);
337 std::swap(SubA, SubB);
338 std::swap(BestPreA, BestPreB);
341 // Also terminate the search one we have found a register class as small as
342 // RCA.
343 unsigned MinSize = getRegSizeInBits(*RCA);
345 for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
346 unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
347 for (SuperRegClassIterator IB(RCB, this, true); IB.isValid(); ++IB) {
348 // Check if a common super-register class exists for this index pair.
349 const TargetRegisterClass *RC =
350 firstCommonClass(IA.getMask(), IB.getMask(), this);
351 if (!RC || getRegSizeInBits(*RC) < MinSize)
352 continue;
354 // The indexes must compose identically: PreA+SubA == PreB+SubB.
355 unsigned FinalB = composeSubRegIndices(IB.getSubReg(), SubB);
356 if (FinalA != FinalB)
357 continue;
359 // Is RC a better candidate than BestRC?
360 if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
361 continue;
363 // Yes, RC is the smallest super-register seen so far.
364 BestRC = RC;
365 *BestPreA = IA.getSubReg();
366 *BestPreB = IB.getSubReg();
368 // Bail early if we reached MinSize. We won't find a better candidate.
369 if (getRegSizeInBits(*BestRC) == MinSize)
370 return BestRC;
373 return BestRC;
376 /// Check if the registers defined by the pair (RegisterClass, SubReg)
377 /// share the same register file.
378 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
379 const TargetRegisterClass *DefRC,
380 unsigned DefSubReg,
381 const TargetRegisterClass *SrcRC,
382 unsigned SrcSubReg) {
383 // Same register class.
384 if (DefRC == SrcRC)
385 return true;
387 // Both operands are sub registers. Check if they share a register class.
388 unsigned SrcIdx, DefIdx;
389 if (SrcSubReg && DefSubReg) {
390 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
391 SrcIdx, DefIdx) != nullptr;
394 // At most one of the register is a sub register, make it Src to avoid
395 // duplicating the test.
396 if (!SrcSubReg) {
397 std::swap(DefSubReg, SrcSubReg);
398 std::swap(DefRC, SrcRC);
401 // One of the register is a sub register, check if we can get a superclass.
402 if (SrcSubReg)
403 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
405 // Plain copy.
406 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
409 bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
410 unsigned DefSubReg,
411 const TargetRegisterClass *SrcRC,
412 unsigned SrcSubReg) const {
413 // If this source does not incur a cross register bank copy, use it.
414 return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg);
417 // Compute target-independent register allocator hints to help eliminate copies.
418 bool TargetRegisterInfo::getRegAllocationHints(
419 Register VirtReg, ArrayRef<MCPhysReg> Order,
420 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
421 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
422 const MachineRegisterInfo &MRI = MF.getRegInfo();
423 const std::pair<unsigned, SmallVector<Register, 4>> *Hints_MRI =
424 MRI.getRegAllocationHints(VirtReg);
426 if (!Hints_MRI)
427 return false;
429 SmallSet<Register, 32> HintedRegs;
430 // First hint may be a target hint.
431 bool Skip = (Hints_MRI->first != 0);
432 for (auto Reg : Hints_MRI->second) {
433 if (Skip) {
434 Skip = false;
435 continue;
438 // Target-independent hints are either a physical or a virtual register.
439 Register Phys = Reg;
440 if (VRM && Phys.isVirtual())
441 Phys = VRM->getPhys(Phys);
443 // Don't add the same reg twice (Hints_MRI may contain multiple virtual
444 // registers allocated to the same physreg).
445 if (!HintedRegs.insert(Phys).second)
446 continue;
447 // Check that Phys is a valid hint in VirtReg's register class.
448 if (!Phys.isPhysical())
449 continue;
450 if (MRI.isReserved(Phys))
451 continue;
452 // Check that Phys is in the allocation order. We shouldn't heed hints
453 // from VirtReg's register class if they aren't in the allocation order. The
454 // target probably has a reason for removing the register.
455 if (!is_contained(Order, Phys))
456 continue;
458 // All clear, tell the register allocator to prefer this register.
459 Hints.push_back(Phys);
461 return false;
464 bool TargetRegisterInfo::isCalleeSavedPhysReg(
465 MCRegister PhysReg, const MachineFunction &MF) const {
466 if (PhysReg == 0)
467 return false;
468 const uint32_t *callerPreservedRegs =
469 getCallPreservedMask(MF, MF.getFunction().getCallingConv());
470 if (callerPreservedRegs) {
471 assert(Register::isPhysicalRegister(PhysReg) &&
472 "Expected physical register");
473 return (callerPreservedRegs[PhysReg / 32] >> PhysReg % 32) & 1;
475 return false;
478 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
479 return MF.getFrameInfo().isStackRealignable();
482 bool TargetRegisterInfo::shouldRealignStack(const MachineFunction &MF) const {
483 return MF.getFrameInfo().shouldRealignStack();
486 bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
487 const uint32_t *mask1) const {
488 unsigned N = (getNumRegs()+31) / 32;
489 for (unsigned I = 0; I < N; ++I)
490 if ((mask0[I] & mask1[I]) != mask0[I])
491 return false;
492 return true;
495 TypeSize
496 TargetRegisterInfo::getRegSizeInBits(Register Reg,
497 const MachineRegisterInfo &MRI) const {
498 const TargetRegisterClass *RC{};
499 if (Reg.isPhysical()) {
500 // The size is not directly available for physical registers.
501 // Instead, we need to access a register class that contains Reg and
502 // get the size of that register class.
503 RC = getMinimalPhysRegClass(Reg);
504 assert(RC && "Unable to deduce the register class");
505 return getRegSizeInBits(*RC);
507 LLT Ty = MRI.getType(Reg);
508 if (Ty.isValid())
509 return Ty.getSizeInBits();
511 // Since Reg is not a generic register, it may have a register class.
512 RC = MRI.getRegClass(Reg);
513 assert(RC && "Unable to deduce the register class");
514 return getRegSizeInBits(*RC);
517 bool TargetRegisterInfo::getCoveringSubRegIndexes(
518 const MachineRegisterInfo &MRI, const TargetRegisterClass *RC,
519 LaneBitmask LaneMask, SmallVectorImpl<unsigned> &NeededIndexes) const {
520 SmallVector<unsigned, 8> PossibleIndexes;
521 unsigned BestIdx = 0;
522 unsigned BestCover = 0;
524 for (unsigned Idx = 1, E = getNumSubRegIndices(); Idx < E; ++Idx) {
525 // Is this index even compatible with the given class?
526 if (getSubClassWithSubReg(RC, Idx) != RC)
527 continue;
528 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
529 // Early exit if we found a perfect match.
530 if (SubRegMask == LaneMask) {
531 BestIdx = Idx;
532 break;
535 // The index must not cover any lanes outside \p LaneMask.
536 if ((SubRegMask & ~LaneMask).any())
537 continue;
539 unsigned PopCount = SubRegMask.getNumLanes();
540 PossibleIndexes.push_back(Idx);
541 if (PopCount > BestCover) {
542 BestCover = PopCount;
543 BestIdx = Idx;
547 // Abort if we cannot possibly implement the COPY with the given indexes.
548 if (BestIdx == 0)
549 return false;
551 NeededIndexes.push_back(BestIdx);
553 // Greedy heuristic: Keep iterating keeping the best covering subreg index
554 // each time.
555 LaneBitmask LanesLeft = LaneMask & ~getSubRegIndexLaneMask(BestIdx);
556 while (LanesLeft.any()) {
557 unsigned BestIdx = 0;
558 int BestCover = std::numeric_limits<int>::min();
559 for (unsigned Idx : PossibleIndexes) {
560 LaneBitmask SubRegMask = getSubRegIndexLaneMask(Idx);
561 // Early exit if we found a perfect match.
562 if (SubRegMask == LanesLeft) {
563 BestIdx = Idx;
564 break;
567 // Do not cover already-covered lanes to avoid creating cycles
568 // in copy bundles (= bundle contains copies that write to the
569 // registers).
570 if ((SubRegMask & ~LanesLeft).any())
571 continue;
573 // Try to cover as many of the remaining lanes as possible.
574 const int Cover = (SubRegMask & LanesLeft).getNumLanes();
575 if (Cover > BestCover) {
576 BestCover = Cover;
577 BestIdx = Idx;
581 if (BestIdx == 0)
582 return false; // Impossible to handle
584 NeededIndexes.push_back(BestIdx);
586 LanesLeft &= ~getSubRegIndexLaneMask(BestIdx);
589 return BestIdx;
592 unsigned TargetRegisterInfo::getSubRegIdxSize(unsigned Idx) const {
593 assert(Idx && Idx < getNumSubRegIndices() &&
594 "This is not a subregister index");
595 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Size;
598 unsigned TargetRegisterInfo::getSubRegIdxOffset(unsigned Idx) const {
599 assert(Idx && Idx < getNumSubRegIndices() &&
600 "This is not a subregister index");
601 return SubRegIdxRanges[HwMode * getNumSubRegIndices() + Idx].Offset;
604 Register
605 TargetRegisterInfo::lookThruCopyLike(Register SrcReg,
606 const MachineRegisterInfo *MRI) const {
607 while (true) {
608 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
609 if (!MI->isCopyLike())
610 return SrcReg;
612 Register CopySrcReg;
613 if (MI->isCopy())
614 CopySrcReg = MI->getOperand(1).getReg();
615 else {
616 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
617 CopySrcReg = MI->getOperand(2).getReg();
620 if (!CopySrcReg.isVirtual())
621 return CopySrcReg;
623 SrcReg = CopySrcReg;
627 Register TargetRegisterInfo::lookThruSingleUseCopyChain(
628 Register SrcReg, const MachineRegisterInfo *MRI) const {
629 while (true) {
630 const MachineInstr *MI = MRI->getVRegDef(SrcReg);
631 // Found the real definition, return it if it has a single use.
632 if (!MI->isCopyLike())
633 return MRI->hasOneNonDBGUse(SrcReg) ? SrcReg : Register();
635 Register CopySrcReg;
636 if (MI->isCopy())
637 CopySrcReg = MI->getOperand(1).getReg();
638 else {
639 assert(MI->isSubregToReg() && "Bad opcode for lookThruCopyLike");
640 CopySrcReg = MI->getOperand(2).getReg();
643 // Continue only if the next definition in the chain is for a virtual
644 // register that has a single use.
645 if (!CopySrcReg.isVirtual() || !MRI->hasOneNonDBGUse(CopySrcReg))
646 return Register();
648 SrcReg = CopySrcReg;
652 void TargetRegisterInfo::getOffsetOpcodes(
653 const StackOffset &Offset, SmallVectorImpl<uint64_t> &Ops) const {
654 assert(!Offset.getScalable() && "Scalable offsets are not handled");
655 DIExpression::appendOffset(Ops, Offset.getFixed());
658 DIExpression *
659 TargetRegisterInfo::prependOffsetExpression(const DIExpression *Expr,
660 unsigned PrependFlags,
661 const StackOffset &Offset) const {
662 assert((PrependFlags &
663 ~(DIExpression::DerefBefore | DIExpression::DerefAfter |
664 DIExpression::StackValue | DIExpression::EntryValue)) == 0 &&
665 "Unsupported prepend flag");
666 SmallVector<uint64_t, 16> OffsetExpr;
667 if (PrependFlags & DIExpression::DerefBefore)
668 OffsetExpr.push_back(dwarf::DW_OP_deref);
669 getOffsetOpcodes(Offset, OffsetExpr);
670 if (PrependFlags & DIExpression::DerefAfter)
671 OffsetExpr.push_back(dwarf::DW_OP_deref);
672 return DIExpression::prependOpcodes(Expr, OffsetExpr,
673 PrependFlags & DIExpression::StackValue,
674 PrependFlags & DIExpression::EntryValue);
677 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
678 LLVM_DUMP_METHOD
679 void TargetRegisterInfo::dumpReg(Register Reg, unsigned SubRegIndex,
680 const TargetRegisterInfo *TRI) {
681 dbgs() << printReg(Reg, TRI, SubRegIndex) << "\n";
683 #endif