1 //===- CriticalAntiDepBreaker.cpp - Anti-dep breaker ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the CriticalAntiDepBreaker class, which
10 // implements register anti-dependence breaking along a blocks
11 // critical path during post-RA scheduler.
13 //===----------------------------------------------------------------------===//
15 #include "CriticalAntiDepBreaker.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineOperand.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterClassInfo.h"
27 #include "llvm/CodeGen/ScheduleDAG.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/CodeGen/TargetSubtargetInfo.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCRegisterInfo.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
42 #define DEBUG_TYPE "post-RA-sched"
44 CriticalAntiDepBreaker::CriticalAntiDepBreaker(MachineFunction
&MFi
,
45 const RegisterClassInfo
&RCI
)
46 : AntiDepBreaker(), MF(MFi
), MRI(MF
.getRegInfo()),
47 TII(MF
.getSubtarget().getInstrInfo()),
48 TRI(MF
.getSubtarget().getRegisterInfo()), RegClassInfo(RCI
),
49 Classes(TRI
->getNumRegs(), nullptr), KillIndices(TRI
->getNumRegs(), 0),
50 DefIndices(TRI
->getNumRegs(), 0), KeepRegs(TRI
->getNumRegs(), false) {}
52 CriticalAntiDepBreaker::~CriticalAntiDepBreaker() = default;
54 void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock
*BB
) {
55 const unsigned BBSize
= BB
->size();
56 for (unsigned i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
57 // Clear out the register class data.
60 // Initialize the indices to indicate that no registers are live.
62 DefIndices
[i
] = BBSize
;
65 // Clear "do not change" set.
68 bool IsReturnBlock
= BB
->isReturnBlock();
70 // Examine the live-in regs of all successors.
71 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
72 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
73 for (const auto &LI
: (*SI
)->liveins()) {
74 for (MCRegAliasIterator
AI(LI
.PhysReg
, TRI
, true); AI
.isValid(); ++AI
) {
76 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
77 KillIndices
[Reg
] = BBSize
;
78 DefIndices
[Reg
] = ~0u;
82 // Mark live-out callee-saved registers. In a return block this is
83 // all callee-saved registers. In non-return this is any
84 // callee-saved register that is not saved in the prolog.
85 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
86 BitVector Pristine
= MFI
.getPristineRegs(MF
);
87 for (const MCPhysReg
*I
= MF
.getRegInfo().getCalleeSavedRegs(); *I
;
90 if (!IsReturnBlock
&& !Pristine
.test(Reg
))
92 for (MCRegAliasIterator
AI(*I
, TRI
, true); AI
.isValid(); ++AI
) {
94 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
95 KillIndices
[Reg
] = BBSize
;
96 DefIndices
[Reg
] = ~0u;
101 void CriticalAntiDepBreaker::FinishBlock() {
106 void CriticalAntiDepBreaker::Observe(MachineInstr
&MI
, unsigned Count
,
107 unsigned InsertPosIndex
) {
108 // Kill instructions can define registers but are really nops, and there might
109 // be a real definition earlier that needs to be paired with uses dominated by
112 // FIXME: It may be possible to remove the isKill() restriction once PR18663
113 // has been properly fixed. There can be value in processing kills as seen in
114 // the AggressiveAntiDepBreaker class.
115 if (MI
.isDebugInstr() || MI
.isKill())
117 assert(Count
< InsertPosIndex
&& "Instruction index out of expected range!");
119 for (unsigned Reg
= 0; Reg
!= TRI
->getNumRegs(); ++Reg
) {
120 if (KillIndices
[Reg
] != ~0u) {
121 // If Reg is currently live, then mark that it can't be renamed as
122 // we don't know the extent of its live-range anymore (now that it
123 // has been scheduled).
124 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
125 KillIndices
[Reg
] = Count
;
126 } else if (DefIndices
[Reg
] < InsertPosIndex
&& DefIndices
[Reg
] >= Count
) {
127 // Any register which was defined within the previous scheduling region
128 // may have been rescheduled and its lifetime may overlap with registers
129 // in ways not reflected in our current liveness state. For each such
130 // register, adjust the liveness state to be conservatively correct.
131 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
133 // Move the def index to the end of the previous region, to reflect
134 // that the def could theoretically have been scheduled at the end.
135 DefIndices
[Reg
] = InsertPosIndex
;
139 PrescanInstruction(MI
);
140 ScanInstruction(MI
, Count
);
143 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
145 static const SDep
*CriticalPathStep(const SUnit
*SU
) {
146 const SDep
*Next
= nullptr;
147 unsigned NextDepth
= 0;
148 // Find the predecessor edge with the greatest depth.
149 for (SUnit::const_pred_iterator P
= SU
->Preds
.begin(), PE
= SU
->Preds
.end();
151 const SUnit
*PredSU
= P
->getSUnit();
152 unsigned PredLatency
= P
->getLatency();
153 unsigned PredTotalLatency
= PredSU
->getDepth() + PredLatency
;
154 // In the case of a latency tie, prefer an anti-dependency edge over
155 // other types of edges.
156 if (NextDepth
< PredTotalLatency
||
157 (NextDepth
== PredTotalLatency
&& P
->getKind() == SDep::Anti
)) {
158 NextDepth
= PredTotalLatency
;
165 void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr
&MI
) {
166 // It's not safe to change register allocation for source operands of
167 // instructions that have special allocation requirements. Also assume all
168 // registers used in a call must not be changed (ABI).
169 // FIXME: The issue with predicated instruction is more complex. We are being
170 // conservative here because the kill markers cannot be trusted after
172 // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14]
174 // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395]
175 // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12]
176 // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8)
178 // The first R6 kill is not really a kill since it's killed by a predicated
179 // instruction which may not be executed. The second R6 def may or may not
180 // re-define R6 so it's not safe to change it since the last R6 use cannot be
183 MI
.isCall() || MI
.hasExtraSrcRegAllocReq() || TII
->isPredicated(MI
);
185 // Scan the register operands for this instruction and update
186 // Classes and RegRefs.
187 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
188 MachineOperand
&MO
= MI
.getOperand(i
);
189 if (!MO
.isReg()) continue;
190 Register Reg
= MO
.getReg();
191 if (Reg
== 0) continue;
192 const TargetRegisterClass
*NewRC
= nullptr;
194 if (i
< MI
.getDesc().getNumOperands())
195 NewRC
= TII
->getRegClass(MI
.getDesc(), i
, TRI
, MF
);
197 // For now, only allow the register to be changed if its register
198 // class is consistent across all uses.
199 if (!Classes
[Reg
] && NewRC
)
200 Classes
[Reg
] = NewRC
;
201 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
202 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
204 // Now check for aliases.
205 for (MCRegAliasIterator
AI(Reg
, TRI
, false); AI
.isValid(); ++AI
) {
206 // If an alias of the reg is used during the live range, give up.
207 // Note that this allows us to skip checking if AntiDepReg
208 // overlaps with any of the aliases, among other things.
209 unsigned AliasReg
= *AI
;
210 if (Classes
[AliasReg
]) {
211 Classes
[AliasReg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
212 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
216 // If we're still willing to consider this register, note the reference.
217 if (Classes
[Reg
] != reinterpret_cast<TargetRegisterClass
*>(-1))
218 RegRefs
.insert(std::make_pair(Reg
, &MO
));
220 // If this reg is tied and live (Classes[Reg] is set to -1), we can't change
221 // it or any of its sub or super regs. We need to use KeepRegs to mark the
222 // reg because not all uses of the same reg within an instruction are
223 // necessarily tagged as tied.
224 // Example: an x86 "xor %eax, %eax" will have one source operand tied to the
225 // def register but not the second (see PR20020 for details).
226 // FIXME: can this check be relaxed to account for undef uses
227 // of a register? In the above 'xor' example, the uses of %eax are undef, so
228 // earlier instructions could still replace %eax even though the 'xor'
229 // itself can't be changed.
230 if (MI
.isRegTiedToUseOperand(i
) &&
231 Classes
[Reg
] == reinterpret_cast<TargetRegisterClass
*>(-1)) {
232 for (MCSubRegIterator
SubRegs(Reg
, TRI
, /*IncludeSelf=*/true);
233 SubRegs
.isValid(); ++SubRegs
) {
234 KeepRegs
.set(*SubRegs
);
236 for (MCSuperRegIterator
SuperRegs(Reg
, TRI
);
237 SuperRegs
.isValid(); ++SuperRegs
) {
238 KeepRegs
.set(*SuperRegs
);
242 if (MO
.isUse() && Special
) {
243 if (!KeepRegs
.test(Reg
)) {
244 for (MCSubRegIterator
SubRegs(Reg
, TRI
, /*IncludeSelf=*/true);
245 SubRegs
.isValid(); ++SubRegs
)
246 KeepRegs
.set(*SubRegs
);
252 void CriticalAntiDepBreaker::ScanInstruction(MachineInstr
&MI
, unsigned Count
) {
254 // Proceeding upwards, registers that are defed but not used in this
255 // instruction are now dead.
256 assert(!MI
.isKill() && "Attempting to scan a kill instruction");
258 if (!TII
->isPredicated(MI
)) {
259 // Predicated defs are modeled as read + write, i.e. similar to two
261 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
262 MachineOperand
&MO
= MI
.getOperand(i
);
265 for (unsigned i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
)
266 if (MO
.clobbersPhysReg(i
)) {
267 DefIndices
[i
] = Count
;
268 KillIndices
[i
] = ~0u;
270 Classes
[i
] = nullptr;
274 if (!MO
.isReg()) continue;
275 Register Reg
= MO
.getReg();
276 if (Reg
== 0) continue;
277 if (!MO
.isDef()) continue;
279 // Ignore two-addr defs.
280 if (MI
.isRegTiedToUseOperand(i
))
283 // If we've already marked this reg as unchangeable, don't remove
284 // it or any of its subregs from KeepRegs.
285 bool Keep
= KeepRegs
.test(Reg
);
287 // For the reg itself and all subregs: update the def to current;
288 // reset the kill state, any restrictions, and references.
289 for (MCSubRegIterator
SRI(Reg
, TRI
, true); SRI
.isValid(); ++SRI
) {
290 unsigned SubregReg
= *SRI
;
291 DefIndices
[SubregReg
] = Count
;
292 KillIndices
[SubregReg
] = ~0u;
293 Classes
[SubregReg
] = nullptr;
294 RegRefs
.erase(SubregReg
);
296 KeepRegs
.reset(SubregReg
);
298 // Conservatively mark super-registers as unusable.
299 for (MCSuperRegIterator
SR(Reg
, TRI
); SR
.isValid(); ++SR
)
300 Classes
[*SR
] = reinterpret_cast<TargetRegisterClass
*>(-1);
303 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
304 MachineOperand
&MO
= MI
.getOperand(i
);
305 if (!MO
.isReg()) continue;
306 Register Reg
= MO
.getReg();
307 if (Reg
== 0) continue;
308 if (!MO
.isUse()) continue;
310 const TargetRegisterClass
*NewRC
= nullptr;
311 if (i
< MI
.getDesc().getNumOperands())
312 NewRC
= TII
->getRegClass(MI
.getDesc(), i
, TRI
, MF
);
314 // For now, only allow the register to be changed if its register
315 // class is consistent across all uses.
316 if (!Classes
[Reg
] && NewRC
)
317 Classes
[Reg
] = NewRC
;
318 else if (!NewRC
|| Classes
[Reg
] != NewRC
)
319 Classes
[Reg
] = reinterpret_cast<TargetRegisterClass
*>(-1);
321 RegRefs
.insert(std::make_pair(Reg
, &MO
));
323 // It wasn't previously live but now it is, this is a kill.
324 // Repeat for all aliases.
325 for (MCRegAliasIterator
AI(Reg
, TRI
, true); AI
.isValid(); ++AI
) {
326 unsigned AliasReg
= *AI
;
327 if (KillIndices
[AliasReg
] == ~0u) {
328 KillIndices
[AliasReg
] = Count
;
329 DefIndices
[AliasReg
] = ~0u;
335 // Check all machine operands that reference the antidependent register and must
336 // be replaced by NewReg. Return true if any of their parent instructions may
337 // clobber the new register.
339 // Note: AntiDepReg may be referenced by a two-address instruction such that
340 // it's use operand is tied to a def operand. We guard against the case in which
341 // the two-address instruction also defines NewReg, as may happen with
342 // pre/postincrement loads. In this case, both the use and def operands are in
343 // RegRefs because the def is inserted by PrescanInstruction and not erased
344 // during ScanInstruction. So checking for an instruction with definitions of
345 // both NewReg and AntiDepReg covers it.
347 CriticalAntiDepBreaker::isNewRegClobberedByRefs(RegRefIter RegRefBegin
,
348 RegRefIter RegRefEnd
,
350 for (RegRefIter I
= RegRefBegin
; I
!= RegRefEnd
; ++I
) {
351 MachineOperand
*RefOper
= I
->second
;
353 // Don't allow the instruction defining AntiDepReg to earlyclobber its
354 // operands, in case they may be assigned to NewReg. In this case antidep
355 // breaking must fail, but it's too rare to bother optimizing.
356 if (RefOper
->isDef() && RefOper
->isEarlyClobber())
359 // Handle cases in which this instruction defines NewReg.
360 MachineInstr
*MI
= RefOper
->getParent();
361 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
362 const MachineOperand
&CheckOper
= MI
->getOperand(i
);
364 if (CheckOper
.isRegMask() && CheckOper
.clobbersPhysReg(NewReg
))
367 if (!CheckOper
.isReg() || !CheckOper
.isDef() ||
368 CheckOper
.getReg() != NewReg
)
371 // Don't allow the instruction to define NewReg and AntiDepReg.
372 // When AntiDepReg is renamed it will be an illegal op.
373 if (RefOper
->isDef())
376 // Don't allow an instruction using AntiDepReg to be earlyclobbered by
378 if (CheckOper
.isEarlyClobber())
381 // Don't allow inline asm to define NewReg at all. Who knows what it's
383 if (MI
->isInlineAsm())
390 unsigned CriticalAntiDepBreaker::
391 findSuitableFreeRegister(RegRefIter RegRefBegin
,
392 RegRefIter RegRefEnd
,
395 const TargetRegisterClass
*RC
,
396 SmallVectorImpl
<unsigned> &Forbid
) {
397 ArrayRef
<MCPhysReg
> Order
= RegClassInfo
.getOrder(RC
);
398 for (unsigned i
= 0; i
!= Order
.size(); ++i
) {
399 unsigned NewReg
= Order
[i
];
400 // Don't replace a register with itself.
401 if (NewReg
== AntiDepReg
) continue;
402 // Don't replace a register with one that was recently used to repair
403 // an anti-dependence with this AntiDepReg, because that would
404 // re-introduce that anti-dependence.
405 if (NewReg
== LastNewReg
) continue;
406 // If any instructions that define AntiDepReg also define the NewReg, it's
407 // not suitable. For example, Instruction with multiple definitions can
408 // result in this condition.
409 if (isNewRegClobberedByRefs(RegRefBegin
, RegRefEnd
, NewReg
)) continue;
410 // If NewReg is dead and NewReg's most recent def is not before
411 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
412 assert(((KillIndices
[AntiDepReg
] == ~0u) != (DefIndices
[AntiDepReg
] == ~0u))
413 && "Kill and Def maps aren't consistent for AntiDepReg!");
414 assert(((KillIndices
[NewReg
] == ~0u) != (DefIndices
[NewReg
] == ~0u))
415 && "Kill and Def maps aren't consistent for NewReg!");
416 if (KillIndices
[NewReg
] != ~0u ||
417 Classes
[NewReg
] == reinterpret_cast<TargetRegisterClass
*>(-1) ||
418 KillIndices
[AntiDepReg
] > DefIndices
[NewReg
])
420 // If NewReg overlaps any of the forbidden registers, we can't use it.
421 bool Forbidden
= false;
422 for (SmallVectorImpl
<unsigned>::iterator it
= Forbid
.begin(),
423 ite
= Forbid
.end(); it
!= ite
; ++it
)
424 if (TRI
->regsOverlap(NewReg
, *it
)) {
428 if (Forbidden
) continue;
432 // No registers are free and available!
436 unsigned CriticalAntiDepBreaker::
437 BreakAntiDependencies(const std::vector
<SUnit
> &SUnits
,
438 MachineBasicBlock::iterator Begin
,
439 MachineBasicBlock::iterator End
,
440 unsigned InsertPosIndex
,
441 DbgValueVector
&DbgValues
) {
442 // The code below assumes that there is at least one instruction,
443 // so just duck out immediately if the block is empty.
444 if (SUnits
.empty()) return 0;
446 // Keep a map of the MachineInstr*'s back to the SUnit representing them.
447 // This is used for updating debug information.
449 // FIXME: Replace this with the existing map in ScheduleDAGInstrs::MISUnitMap
450 DenseMap
<MachineInstr
*, const SUnit
*> MISUnitMap
;
452 // Find the node at the bottom of the critical path.
453 const SUnit
*Max
= nullptr;
454 for (unsigned i
= 0, e
= SUnits
.size(); i
!= e
; ++i
) {
455 const SUnit
*SU
= &SUnits
[i
];
456 MISUnitMap
[SU
->getInstr()] = SU
;
457 if (!Max
|| SU
->getDepth() + SU
->Latency
> Max
->getDepth() + Max
->Latency
)
460 assert(Max
&& "Failed to find bottom of the critical path");
464 LLVM_DEBUG(dbgs() << "Critical path has total latency "
465 << (Max
->getDepth() + Max
->Latency
) << "\n");
466 LLVM_DEBUG(dbgs() << "Available regs:");
467 for (unsigned Reg
= 0; Reg
< TRI
->getNumRegs(); ++Reg
) {
468 if (KillIndices
[Reg
] == ~0u)
469 LLVM_DEBUG(dbgs() << " " << printReg(Reg
, TRI
));
471 LLVM_DEBUG(dbgs() << '\n');
475 // Track progress along the critical path through the SUnit graph as we walk
477 const SUnit
*CriticalPathSU
= Max
;
478 MachineInstr
*CriticalPathMI
= CriticalPathSU
->getInstr();
480 // Consider this pattern:
489 // There are three anti-dependencies here, and without special care,
490 // we'd break all of them using the same register:
499 // because at each anti-dependence, B is the first register that
500 // isn't A which is free. This re-introduces anti-dependencies
501 // at all but one of the original anti-dependencies that we were
502 // trying to break. To avoid this, keep track of the most recent
503 // register that each register was replaced with, avoid
504 // using it to repair an anti-dependence on the same register.
505 // This lets us produce this:
514 // This still has an anti-dependence on B, but at least it isn't on the
515 // original critical path.
517 // TODO: If we tracked more than one register here, we could potentially
518 // fix that remaining critical edge too. This is a little more involved,
519 // because unlike the most recent register, less recent registers should
520 // still be considered, though only if no other registers are available.
521 std::vector
<unsigned> LastNewReg(TRI
->getNumRegs(), 0);
523 // Attempt to break anti-dependence edges on the critical path. Walk the
524 // instructions from the bottom up, tracking information about liveness
525 // as we go to help determine which registers are available.
527 unsigned Count
= InsertPosIndex
- 1;
528 for (MachineBasicBlock::iterator I
= End
, E
= Begin
; I
!= E
; --Count
) {
529 MachineInstr
&MI
= *--I
;
530 // Kill instructions can define registers but are really nops, and there
531 // might be a real definition earlier that needs to be paired with uses
532 // dominated by this kill.
534 // FIXME: It may be possible to remove the isKill() restriction once PR18663
535 // has been properly fixed. There can be value in processing kills as seen
536 // in the AggressiveAntiDepBreaker class.
537 if (MI
.isDebugInstr() || MI
.isKill())
540 // Check if this instruction has a dependence on the critical path that
541 // is an anti-dependence that we may be able to break. If it is, set
542 // AntiDepReg to the non-zero register associated with the anti-dependence.
544 // We limit our attention to the critical path as a heuristic to avoid
545 // breaking anti-dependence edges that aren't going to significantly
546 // impact the overall schedule. There are a limited number of registers
547 // and we want to save them for the important edges.
549 // TODO: Instructions with multiple defs could have multiple
550 // anti-dependencies. The current code here only knows how to break one
551 // edge per instruction. Note that we'd have to be able to break all of
552 // the anti-dependencies in an instruction in order to be effective.
553 unsigned AntiDepReg
= 0;
554 if (&MI
== CriticalPathMI
) {
555 if (const SDep
*Edge
= CriticalPathStep(CriticalPathSU
)) {
556 const SUnit
*NextSU
= Edge
->getSUnit();
558 // Only consider anti-dependence edges.
559 if (Edge
->getKind() == SDep::Anti
) {
560 AntiDepReg
= Edge
->getReg();
561 assert(AntiDepReg
!= 0 && "Anti-dependence on reg0?");
562 if (!MRI
.isAllocatable(AntiDepReg
))
563 // Don't break anti-dependencies on non-allocatable registers.
565 else if (KeepRegs
.test(AntiDepReg
))
566 // Don't break anti-dependencies if a use down below requires
567 // this exact register.
570 // If the SUnit has other dependencies on the SUnit that it
571 // anti-depends on, don't bother breaking the anti-dependency
572 // since those edges would prevent such units from being
573 // scheduled past each other regardless.
575 // Also, if there are dependencies on other SUnits with the
576 // same register as the anti-dependency, don't attempt to
578 for (SUnit::const_pred_iterator P
= CriticalPathSU
->Preds
.begin(),
579 PE
= CriticalPathSU
->Preds
.end(); P
!= PE
; ++P
)
580 if (P
->getSUnit() == NextSU
?
581 (P
->getKind() != SDep::Anti
|| P
->getReg() != AntiDepReg
) :
582 (P
->getKind() == SDep::Data
&& P
->getReg() == AntiDepReg
)) {
588 CriticalPathSU
= NextSU
;
589 CriticalPathMI
= CriticalPathSU
->getInstr();
591 // We've reached the end of the critical path.
592 CriticalPathSU
= nullptr;
593 CriticalPathMI
= nullptr;
597 PrescanInstruction(MI
);
599 SmallVector
<unsigned, 2> ForbidRegs
;
601 // If MI's defs have a special allocation requirement, don't allow
602 // any def registers to be changed. Also assume all registers
603 // defined in a call must not be changed (ABI).
604 if (MI
.isCall() || MI
.hasExtraDefRegAllocReq() || TII
->isPredicated(MI
))
605 // If this instruction's defs have special allocation requirement, don't
606 // break this anti-dependency.
608 else if (AntiDepReg
) {
609 // If this instruction has a use of AntiDepReg, breaking it
610 // is invalid. If the instruction defines other registers,
611 // save a list of them so that we don't pick a new register
612 // that overlaps any of them.
613 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
614 MachineOperand
&MO
= MI
.getOperand(i
);
615 if (!MO
.isReg()) continue;
616 Register Reg
= MO
.getReg();
617 if (Reg
== 0) continue;
618 if (MO
.isUse() && TRI
->regsOverlap(AntiDepReg
, Reg
)) {
622 if (MO
.isDef() && Reg
!= AntiDepReg
)
623 ForbidRegs
.push_back(Reg
);
627 // Determine AntiDepReg's register class, if it is live and is
628 // consistently used within a single class.
629 const TargetRegisterClass
*RC
= AntiDepReg
!= 0 ? Classes
[AntiDepReg
]
631 assert((AntiDepReg
== 0 || RC
!= nullptr) &&
632 "Register should be live if it's causing an anti-dependence!");
633 if (RC
== reinterpret_cast<TargetRegisterClass
*>(-1))
636 // Look for a suitable register to use to break the anti-dependence.
638 // TODO: Instead of picking the first free register, consider which might
640 if (AntiDepReg
!= 0) {
641 std::pair
<std::multimap
<unsigned, MachineOperand
*>::iterator
,
642 std::multimap
<unsigned, MachineOperand
*>::iterator
>
643 Range
= RegRefs
.equal_range(AntiDepReg
);
644 if (unsigned NewReg
= findSuitableFreeRegister(Range
.first
, Range
.second
,
646 LastNewReg
[AntiDepReg
],
648 LLVM_DEBUG(dbgs() << "Breaking anti-dependence edge on "
649 << printReg(AntiDepReg
, TRI
) << " with "
650 << RegRefs
.count(AntiDepReg
) << " references"
651 << " using " << printReg(NewReg
, TRI
) << "!\n");
653 // Update the references to the old register to refer to the new
655 for (std::multimap
<unsigned, MachineOperand
*>::iterator
656 Q
= Range
.first
, QE
= Range
.second
; Q
!= QE
; ++Q
) {
657 Q
->second
->setReg(NewReg
);
658 // If the SU for the instruction being updated has debug information
659 // related to the anti-dependency register, make sure to update that
661 const SUnit
*SU
= MISUnitMap
[Q
->second
->getParent()];
663 UpdateDbgValues(DbgValues
, Q
->second
->getParent(),
667 // We just went back in time and modified history; the
668 // liveness information for the anti-dependence reg is now
669 // inconsistent. Set the state as if it were dead.
670 Classes
[NewReg
] = Classes
[AntiDepReg
];
671 DefIndices
[NewReg
] = DefIndices
[AntiDepReg
];
672 KillIndices
[NewReg
] = KillIndices
[AntiDepReg
];
673 assert(((KillIndices
[NewReg
] == ~0u) !=
674 (DefIndices
[NewReg
] == ~0u)) &&
675 "Kill and Def maps aren't consistent for NewReg!");
677 Classes
[AntiDepReg
] = nullptr;
678 DefIndices
[AntiDepReg
] = KillIndices
[AntiDepReg
];
679 KillIndices
[AntiDepReg
] = ~0u;
680 assert(((KillIndices
[AntiDepReg
] == ~0u) !=
681 (DefIndices
[AntiDepReg
] == ~0u)) &&
682 "Kill and Def maps aren't consistent for AntiDepReg!");
684 RegRefs
.erase(AntiDepReg
);
685 LastNewReg
[AntiDepReg
] = NewReg
;
690 ScanInstruction(MI
, Count
);