1 //===- CriticalAntiDepBreaker.cpp - Anti-dep breaker ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the CriticalAntiDepBreaker class, which
11 // implements register anti-dependence breaking along a blocks
12 // critical path during post-RA scheduler.
14 //===----------------------------------------------------------------------===//
16 #include "CriticalAntiDepBreaker.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineOperand.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/RegisterClassInfo.h"
28 #include "llvm/CodeGen/ScheduleDAG.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/CodeGen/TargetSubtargetInfo.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
43 #define DEBUG_TYPE "post-RA-sched"
45 CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction
&MFi
,
46 const RegisterClassInfo
&RCI
)
47 : AntiDepBreaker(), MF(MFi
), MRI(MF
.getRegInfo()),
48 TII(MF
.getSubtarget().getInstrInfo()),
49 TRI(MF
.getSubtarget().getRegisterInfo()), RegClassInfo(RCI
),
50 Classes(TRI
->getNumRegs(), nullptr), KillIndices(TRI
->getNumRegs(), 0),
51 DefIndices(TRI
->getNumRegs(), 0), KeepRegs(TRI
->getNumRegs(), false) {}
53 CriticalAntiDepBreaker::~CriticalAntiDepBreaker() = default;
55 void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock
*BB
) {
56 const unsigned BBSize
= BB
->size();
57 for (unsigned i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
58 // Clear out the register class data.
61 // Initialize the indices to indicate that no registers are live.
63 DefIndices
[i
] = BBSize
;
66 // Clear "do not change" set.
69 bool IsReturnBlock
= BB
->isReturnBlock();
71 // Examine the live-in regs of all successors.
72 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
73 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
74 for (const auto &LI
: (*SI
)->liveins()) {
75 for (MCRegAliasIterator
AI(LI
.PhysReg
, TRI
, true); AI
.isValid(); ++AI
) {
77 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
78 KillIndices
[Reg
] = BBSize
;
79 DefIndices
[Reg
] = ~0u;
83 // Mark live-out callee-saved registers. In a return block this is
84 // all callee-saved registers. In non-return this is any
85 // callee-saved register that is not saved in the prolog.
86 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
87 BitVector Pristine
= MFI
.getPristineRegs(MF
);
88 for (const MCPhysReg
*I
= MF
.getRegInfo().getCalleeSavedRegs(); *I
;
91 if (!IsReturnBlock
&& !Pristine
.test(Reg
))
93 for (MCRegAliasIterator
AI(*I
, TRI
, true); AI
.isValid(); ++AI
) {
95 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
96 KillIndices
[Reg
] = BBSize
;
97 DefIndices
[Reg
] = ~0u;
102 void CriticalAntiDepBreaker::FinishBlock() {
107 void CriticalAntiDepBreaker::Observe(MachineInstr
&MI
, unsigned Count
,
108 unsigned InsertPosIndex
) {
109 // Kill instructions can define registers but are really nops, and there might
110 // be a real definition earlier that needs to be paired with uses dominated by
113 // FIXME: It may be possible to remove the isKill() restriction once PR18663
114 // has been properly fixed. There can be value in processing kills as seen in
115 // the AggressiveAntiDepBreaker class.
116 if (MI
.isDebugInstr() || MI
.isKill())
118 assert(Count
< InsertPosIndex
&& "Instruction index out of expected range!");
120 for (unsigned Reg
= 0; Reg
!= TRI
->getNumRegs(); ++Reg
) {
121 if (KillIndices
[Reg
] != ~0u) {
122 // If Reg is currently live, then mark that it can't be renamed as
123 // we don't know the extent of its live-range anymore (now that it
124 // has been scheduled).
125 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
126 KillIndices
[Reg
] = Count
;
127 } else if (DefIndices
[Reg
] < InsertPosIndex
&& DefIndices
[Reg
] >= Count
) {
128 // Any register which was defined within the previous scheduling region
129 // may have been rescheduled and its lifetime may overlap with registers
130 // in ways not reflected in our current liveness state. For each such
131 // register, adjust the liveness state to be conservatively correct.
132 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
134 // Move the def index to the end of the previous region, to reflect
135 // that the def could theoretically have been scheduled at the end.
136 DefIndices
[Reg
] = InsertPosIndex
;
140 PrescanInstruction(MI
);
141 ScanInstruction(MI
, Count
);
144 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
146 static const SDep
*CriticalPathStep(const SUnit
*SU
) {
147 const SDep
*Next
= nullptr;
148 unsigned NextDepth
= 0;
149 // Find the predecessor edge with the greatest depth.
150 for (SUnit::const_pred_iterator P
= SU
->Preds
.begin(), PE
= SU
->Preds
.end();
152 const SUnit
*PredSU
= P
->getSUnit();
153 unsigned PredLatency
= P
->getLatency();
154 unsigned PredTotalLatency
= PredSU
->getDepth() + PredLatency
;
155 // In the case of a latency tie, prefer an anti-dependency edge over
156 // other types of edges.
157 if (NextDepth
< PredTotalLatency
||
158 (NextDepth
== PredTotalLatency
&& P
->getKind() == SDep::Anti
)) {
159 NextDepth
= PredTotalLatency
;
166 void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr
&MI
) {
167 // It's not safe to change register allocation for source operands of
168 // instructions that have special allocation requirements. Also assume all
169 // registers used in a call must not be changed (ABI).
170 // FIXME: The issue with predicated instruction is more complex. We are being
171 // conservative here because the kill markers cannot be trusted after
173 // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14]
175 // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395]
176 // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12]
177 // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8)
179 // The first R6 kill is not really a kill since it's killed by a predicated
180 // instruction which may not be executed. The second R6 def may or may not
181 // re-define R6 so it's not safe to change it since the last R6 use cannot be
184 MI
.isCall() || MI
.hasExtraSrcRegAllocReq() || TII
->isPredicated(MI
);
186 // Scan the register operands for this instruction and update
187 // Classes and RegRefs.
188 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
189 MachineOperand
&MO
= MI
.getOperand(i
);
190 if (!MO
.isReg()) continue;
191 unsigned Reg
= MO
.getReg();
192 if (Reg
== 0) continue;
193 const TargetRegisterClass
*NewRC
= nullptr;
195 if (i
< MI
.getDesc().getNumOperands())
196 NewRC
= TII
->getRegClass(MI
.getDesc(), i
, TRI
, MF
);
198 // For now, only allow the register to be changed if its register
199 // class is consistent across all uses.
200 if (!Classes
[Reg
] && NewRC
)
201 Classes
[Reg
] = NewRC
;
202 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
203 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
205 // Now check for aliases.
206 for (MCRegAliasIterator
AI(Reg
, TRI
, false); AI
.isValid(); ++AI
) {
207 // If an alias of the reg is used during the live range, give up.
208 // Note that this allows us to skip checking if AntiDepReg
209 // overlaps with any of the aliases, among other things.
210 unsigned AliasReg
= *AI
;
211 if (Classes
[AliasReg
]) {
212 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
213 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
217 // If we're still willing to consider this register, note the reference.
218 if (Classes
[Reg
] != reinterpret_cast<TargetRegisterClass
*>(-1))
219 RegRefs
.insert(std::make_pair(Reg
, &MO
));
221 // If this reg is tied and live (Classes[Reg] is set to -1), we can't change
222 // it or any of its sub or super regs. We need to use KeepRegs to mark the
223 // reg because not all uses of the same reg within an instruction are
224 // necessarily tagged as tied.
225 // Example: an x86 "xor %eax, %eax" will have one source operand tied to the
226 // def register but not the second (see PR20020 for details).
227 // FIXME: can this check be relaxed to account for undef uses
228 // of a register? In the above 'xor' example, the uses of %eax are undef, so
229 // earlier instructions could still replace %eax even though the 'xor'
230 // itself can't be changed.
231 if (MI
.isRegTiedToUseOperand(i
) &&
232 Classes
[Reg
] == reinterpret_cast<TargetRegisterClass
*>(-1)) {
233 for (MCSubRegIterator
SubRegs(Reg
, TRI
, /*IncludeSelf=*/true);
234 SubRegs
.isValid(); ++SubRegs
) {
235 KeepRegs
.set(*SubRegs
);
237 for (MCSuperRegIterator
SuperRegs(Reg
, TRI
);
238 SuperRegs
.isValid(); ++SuperRegs
) {
239 KeepRegs
.set(*SuperRegs
);
243 if (MO
.isUse() && Special
) {
244 if (!KeepRegs
.test(Reg
)) {
245 for (MCSubRegIterator
SubRegs(Reg
, TRI
, /*IncludeSelf=*/true);
246 SubRegs
.isValid(); ++SubRegs
)
247 KeepRegs
.set(*SubRegs
);
253 void CriticalAntiDepBreaker::ScanInstruction(MachineInstr
&MI
, unsigned Count
) {
255 // Proceeding upwards, registers that are defed but not used in this
256 // instruction are now dead.
257 assert(!MI
.isKill() && "Attempting to scan a kill instruction");
259 if (!TII
->isPredicated(MI
)) {
260 // Predicated defs are modeled as read + write, i.e. similar to two
262 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
263 MachineOperand
&MO
= MI
.getOperand(i
);
266 for (unsigned i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
)
267 if (MO
.clobbersPhysReg(i
)) {
268 DefIndices
[i
] = Count
;
269 KillIndices
[i
] = ~0u;
271 Classes
[i
] = nullptr;
275 if (!MO
.isReg()) continue;
276 unsigned Reg
= MO
.getReg();
277 if (Reg
== 0) continue;
278 if (!MO
.isDef()) continue;
280 // Ignore two-addr defs.
281 if (MI
.isRegTiedToUseOperand(i
))
284 // If we've already marked this reg as unchangeable, don't remove
285 // it or any of its subregs from KeepRegs.
286 bool Keep
= KeepRegs
.test(Reg
);
288 // For the reg itself and all subregs: update the def to current;
289 // reset the kill state, any restrictions, and references.
290 for (MCSubRegIterator
SRI(Reg
, TRI
, true); SRI
.isValid(); ++SRI
) {
291 unsigned SubregReg
= *SRI
;
292 DefIndices
[SubregReg
] = Count
;
293 KillIndices
[SubregReg
] = ~0u;
294 Classes
[SubregReg
] = nullptr;
295 RegRefs
.erase(SubregReg
);
297 KeepRegs
.reset(SubregReg
);
299 // Conservatively mark super-registers as unusable.
300 for (MCSuperRegIterator
SR(Reg
, TRI
); SR
.isValid(); ++SR
)
301 Classes
[*SR
] = reinterpret_cast<TargetRegisterClass
*>(-1);
304 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
305 MachineOperand
&MO
= MI
.getOperand(i
);
306 if (!MO
.isReg()) continue;
307 unsigned Reg
= MO
.getReg();
308 if (Reg
== 0) continue;
309 if (!MO
.isUse()) continue;
311 const TargetRegisterClass
*NewRC
= nullptr;
312 if (i
< MI
.getDesc().getNumOperands())
313 NewRC
= TII
->getRegClass(MI
.getDesc(), i
, TRI
, MF
);
315 // For now, only allow the register to be changed if its register
316 // class is consistent across all uses.
317 if (!Classes
[Reg
] && NewRC
)
318 Classes
[Reg
] = NewRC
;
319 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
320 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
322 RegRefs
.insert(std::make_pair(Reg
, &MO
));
324 // It wasn't previously live but now it is, this is a kill.
325 // Repeat for all aliases.
326 for (MCRegAliasIterator
AI(Reg
, TRI
, true); AI
.isValid(); ++AI
) {
327 unsigned AliasReg
= *AI
;
328 if (KillIndices
[AliasReg
] == ~0u) {
329 KillIndices
[AliasReg
] = Count
;
330 DefIndices
[AliasReg
] = ~0u;
336 // Check all machine operands that reference the antidependent register and must
337 // be replaced by NewReg. Return true if any of their parent instructions may
338 // clobber the new register.
340 // Note: AntiDepReg may be referenced by a two-address instruction such that
341 // it's use operand is tied to a def operand. We guard against the case in which
342 // the two-address instruction also defines NewReg, as may happen with
343 // pre/postincrement loads. In this case, both the use and def operands are in
344 // RegRefs because the def is inserted by PrescanInstruction and not erased
345 // during ScanInstruction. So checking for an instruction with definitions of
346 // both NewReg and AntiDepReg covers it.
348 CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin
,
349 RegRefIter RegRefEnd
,
351 for (RegRefIter I
= RegRefBegin
; I
!= RegRefEnd
; ++I
) {
352 MachineOperand
*RefOper
= I
->second
;
354 // Don't allow the instruction defining AntiDepReg to earlyclobber its
355 // operands, in case they may be assigned to NewReg. In this case antidep
356 // breaking must fail, but it's too rare to bother optimizing.
357 if (RefOper
->isDef() && RefOper
->isEarlyClobber())
360 // Handle cases in which this instruction defines NewReg.
361 MachineInstr
*MI
= RefOper
->getParent();
362 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
363 const MachineOperand
&CheckOper
= MI
->getOperand(i
);
365 if (CheckOper
.isRegMask() && CheckOper
.clobbersPhysReg(NewReg
))
368 if (!CheckOper
.isReg() || !CheckOper
.isDef() ||
369 CheckOper
.getReg() != NewReg
)
372 // Don't allow the instruction to define NewReg and AntiDepReg.
373 // When AntiDepReg is renamed it will be an illegal op.
374 if (RefOper
->isDef())
377 // Don't allow an instruction using AntiDepReg to be earlyclobbered by
379 if (CheckOper
.isEarlyClobber())
382 // Don't allow inline asm to define NewReg at all. Who knows what it's
384 if (MI
->isInlineAsm())
391 unsigned CriticalAntiDepBreaker::
392 findSuitableFreeRegister(RegRefIter RegRefBegin
,
393 RegRefIter RegRefEnd
,
396 const TargetRegisterClass
*RC
,
397 SmallVectorImpl
<unsigned> &Forbid
) {
398 ArrayRef
<MCPhysReg
> Order
= RegClassInfo
.getOrder(RC
);
399 for (unsigned i
= 0; i
!= Order
.size(); ++i
) {
400 unsigned NewReg
= Order
[i
];
401 // Don't replace a register with itself.
402 if (NewReg
== AntiDepReg
) continue;
403 // Don't replace a register with one that was recently used to repair
404 // an anti-dependence with this AntiDepReg, because that would
405 // re-introduce that anti-dependence.
406 if (NewReg
== LastNewReg
) continue;
407 // If any instructions that define AntiDepReg also define the NewReg, it's
408 // not suitable. For example, Instruction with multiple definitions can
409 // result in this condition.
410 if (isNewRegClobberedByRefs(RegRefBegin
, RegRefEnd
, NewReg
)) continue;
411 // If NewReg is dead and NewReg's most recent def is not before
412 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
413 assert(((KillIndices
[AntiDepReg
] == ~0u) != (DefIndices
[AntiDepReg
] == ~0u))
414 && "Kill and Def maps aren't consistent for AntiDepReg!");
415 assert(((KillIndices
[NewReg
] == ~0u) != (DefIndices
[NewReg
] == ~0u))
416 && "Kill and Def maps aren't consistent for NewReg!");
417 if (KillIndices
[NewReg
] != ~0u ||
418 Classes
[NewReg
] == reinterpret_cast<TargetRegisterClass
*>(-1) ||
419 KillIndices
[AntiDepReg
] > DefIndices
[NewReg
])
421 // If NewReg overlaps any of the forbidden registers, we can't use it.
422 bool Forbidden
= false;
423 for (SmallVectorImpl
<unsigned>::iterator it
= Forbid
.begin(),
424 ite
= Forbid
.end(); it
!= ite
; ++it
)
425 if (TRI
->regsOverlap(NewReg
, *it
)) {
429 if (Forbidden
) continue;
433 // No registers are free and available!
437 unsigned CriticalAntiDepBreaker::
438 BreakAntiDependencies(const std::vector
<SUnit
> &SUnits
,
439 MachineBasicBlock::iterator Begin
,
440 MachineBasicBlock::iterator End
,
441 unsigned InsertPosIndex
,
442 DbgValueVector
&DbgValues
) {
443 // The code below assumes that there is at least one instruction,
444 // so just duck out immediately if the block is empty.
445 if (SUnits
.empty()) return 0;
447 // Keep a map of the MachineInstr*'s back to the SUnit representing them.
448 // This is used for updating debug information.
450 // FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
451 DenseMap
<MachineInstr
*, const SUnit
*> MISUnitMap
;
453 // Find the node at the bottom of the critical path.
454 const SUnit
*Max
= nullptr;
455 for (unsigned i
= 0, e
= SUnits
.size(); i
!= e
; ++i
) {
456 const SUnit
*SU
= &SUnits
[i
];
457 MISUnitMap
[SU
->getInstr()] = SU
;
458 if (!Max
|| SU
->getDepth() + SU
->Latency
> Max
->getDepth() + Max
->Latency
)
464 LLVM_DEBUG(dbgs() << "Critical path has total latency "
465 << (Max
->getDepth() + Max
->Latency
) << "\n");
466 LLVM_DEBUG(dbgs() << "Available regs:");
467 for (unsigned Reg
= 0; Reg
< TRI
->getNumRegs(); ++Reg
) {
468 if (KillIndices
[Reg
] == ~0u)
469 LLVM_DEBUG(dbgs() << " " << printReg(Reg
, TRI
));
471 LLVM_DEBUG(dbgs() << '\n');
475 // Track progress along the critical path through the SUnit graph as we walk
477 const SUnit
*CriticalPathSU
= Max
;
478 MachineInstr
*CriticalPathMI
= CriticalPathSU
->getInstr();
480 // Consider this pattern:
489 // There are three anti-dependencies here, and without special care,
490 // we'd break all of them using the same register:
499 // because at each anti-dependence, B is the first register that
500 // isn't A which is free. This re-introduces anti-dependencies
501 // at all but one of the original anti-dependencies that we were
502 // trying to break. To avoid this, keep track of the most recent
503 // register that each register was replaced with, avoid
504 // using it to repair an anti-dependence on the same register.
505 // This lets us produce this:
514 // This still has an anti-dependence on B, but at least it isn't on the
515 // original critical path.
517 // TODO: If we tracked more than one register here, we could potentially
518 // fix that remaining critical edge too. This is a little more involved,
519 // because unlike the most recent register, less recent registers should
520 // still be considered, though only if no other registers are available.
521 std::vector
<unsigned> LastNewReg(TRI
->getNumRegs(), 0);
523 // Attempt to break anti-dependence edges on the critical path. Walk the
524 // instructions from the bottom up, tracking information about liveness
525 // as we go to help determine which registers are available.
527 unsigned Count
= InsertPosIndex
- 1;
528 for (MachineBasicBlock::iterator I
= End
, E
= Begin
; I
!= E
; --Count
) {
529 MachineInstr
&MI
= *--I
;
530 // Kill instructions can define registers but are really nops, and there
531 // might be a real definition earlier that needs to be paired with uses
532 // dominated by this kill.
534 // FIXME: It may be possible to remove the isKill() restriction once PR18663
535 // has been properly fixed. There can be value in processing kills as seen
536 // in the AggressiveAntiDepBreaker class.
537 if (MI
.isDebugInstr() || MI
.isKill())
540 // Check if this instruction has a dependence on the critical path that
541 // is an anti-dependence that we may be able to break. If it is, set
542 // AntiDepReg to the non-zero register associated with the anti-dependence.
544 // We limit our attention to the critical path as a heuristic to avoid
545 // breaking anti-dependence edges that aren't going to significantly
546 // impact the overall schedule. There are a limited number of registers
547 // and we want to save them for the important edges.
549 // TODO: Instructions with multiple defs could have multiple
550 // anti-dependencies. The current code here only knows how to break one
551 // edge per instruction. Note that we'd have to be able to break all of
552 // the anti-dependencies in an instruction in order to be effective.
553 unsigned AntiDepReg
= 0;
554 if (&MI
== CriticalPathMI
) {
555 if (const SDep
*Edge
= CriticalPathStep(CriticalPathSU
)) {
556 const SUnit
*NextSU
= Edge
->getSUnit();
558 // Only consider anti-dependence edges.
559 if (Edge
->getKind() == SDep::Anti
) {
560 AntiDepReg
= Edge
->getReg();
561 assert(AntiDepReg
!= 0 && "Anti-dependence on reg0?");
562 if (!MRI
.isAllocatable(AntiDepReg
))
563 // Don't break anti-dependencies on non-allocatable registers.
565 else if (KeepRegs
.test(AntiDepReg
))
566 // Don't break anti-dependencies if a use down below requires
567 // this exact register.
570 // If the SUnit has other dependencies on the SUnit that it
571 // anti-depends on, don't bother breaking the anti-dependency
572 // since those edges would prevent such units from being
573 // scheduled past each other regardless.
575 // Also, if there are dependencies on other SUnits with the
576 // same register as the anti-dependency, don't attempt to
578 for (SUnit::const_pred_iterator P
= CriticalPathSU
->Preds
.begin(),
579 PE
= CriticalPathSU
->Preds
.end(); P
!= PE
; ++P
)
580 if (P
->getSUnit() == NextSU
?
581 (P
->getKind() != SDep::Anti
|| P
->getReg() != AntiDepReg
) :
582 (P
->getKind() == SDep::Data
&& P
->getReg() == AntiDepReg
)) {
588 CriticalPathSU
= NextSU
;
589 CriticalPathMI
= CriticalPathSU
->getInstr();
591 // We've reached the end of the critical path.
592 CriticalPathSU
= nullptr;
593 CriticalPathMI
= nullptr;
597 PrescanInstruction(MI
);
599 SmallVector
<unsigned, 2> ForbidRegs
;
601 // If MI's defs have a special allocation requirement, don't allow
602 // any def registers to be changed. Also assume all registers
603 // defined in a call must not be changed (ABI).
604 if (MI
.isCall() || MI
.hasExtraDefRegAllocReq() || TII
->isPredicated(MI
))
605 // If this instruction's defs have special allocation requirement, don't
606 // break this anti-dependency.
608 else if (AntiDepReg
) {
609 // If this instruction has a use of AntiDepReg, breaking it
610 // is invalid. If the instruction defines other registers,
611 // save a list of them so that we don't pick a new register
612 // that overlaps any of them.
613 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
614 MachineOperand
&MO
= MI
.getOperand(i
);
615 if (!MO
.isReg()) continue;
616 unsigned Reg
= MO
.getReg();
617 if (Reg
== 0) continue;
618 if (MO
.isUse() && TRI
->regsOverlap(AntiDepReg
, Reg
)) {
622 if (MO
.isDef() && Reg
!= AntiDepReg
)
623 ForbidRegs
.push_back(Reg
);
627 // Determine AntiDepReg's register class, if it is live and is
628 // consistently used within a single class.
629 const TargetRegisterClass
*RC
= AntiDepReg
!= 0 ? Classes
[AntiDepReg
]
631 assert((AntiDepReg
== 0 || RC
!= nullptr) &&
632 "Register should be live if it's causing an anti-dependence!");
633 if (RC
== reinterpret_cast<TargetRegisterClass
*>(-1))
636 // Look for a suitable register to use to break the anti-dependence.
638 // TODO: Instead of picking the first free register, consider which might
640 if (AntiDepReg
!= 0) {
641 std::pair
<std::multimap
<unsigned, MachineOperand
*>::iterator
,
642 std::multimap
<unsigned, MachineOperand
*>::iterator
>
643 Range
= RegRefs
.equal_range(AntiDepReg
);
644 if (unsigned NewReg
= findSuitableFreeRegister(Range
.first
, Range
.second
,
646 LastNewReg
[AntiDepReg
],
648 LLVM_DEBUG(dbgs() << "Breaking anti-dependence edge on "
649 << printReg(AntiDepReg
, TRI
) << " with "
650 << RegRefs
.count(AntiDepReg
) << " references"
651 << " using " << printReg(NewReg
, TRI
) << "!\n");
653 // Update the references to the old register to refer to the new
655 for (std::multimap
<unsigned, MachineOperand
*>::iterator
656 Q
= Range
.first
, QE
= Range
.second
; Q
!= QE
; ++Q
) {
657 Q
->second
->setReg(NewReg
);
658 // If the SU for the instruction being updated has debug information
659 // related to the anti-dependency register, make sure to update that
661 const SUnit
*SU
= MISUnitMap
[Q
->second
->getParent()];
663 UpdateDbgValues(DbgValues
, Q
->second
->getParent(),
667 // We just went back in time and modified history; the
668 // liveness information for the anti-dependence reg is now
669 // inconsistent. Set the state as if it were dead.
670 Classes
[NewReg
] = Classes
[AntiDepReg
];
671 DefIndices
[NewReg
] = DefIndices
[AntiDepReg
];
672 KillIndices
[NewReg
] = KillIndices
[AntiDepReg
];
673 assert(((KillIndices
[NewReg
] == ~0u) !=
674 (DefIndices
[NewReg
] == ~0u)) &&
675 "Kill and Def maps aren't consistent for NewReg!");
677 Classes
[AntiDepReg
] = nullptr;
678 DefIndices
[AntiDepReg
] = KillIndices
[AntiDepReg
];
679 KillIndices
[AntiDepReg
] = ~0u;
680 assert(((KillIndices
[AntiDepReg
] == ~0u) !=
681 (DefIndices
[AntiDepReg
] == ~0u)) &&
682 "Kill and Def maps aren't consistent for AntiDepReg!");
684 RegRefs
.erase(AntiDepReg
);
685 LastNewReg
[AntiDepReg
] = NewReg
;
690 ScanInstruction(MI
, Count
);