1 //===-- SimpleRegisterCoalescing.cpp - Register Coalescing ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a simple register coalescing pass that attempts to
11 // aggressively coalesce every register copy that it can.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "simpleregistercoalescing"
16 #include "llvm/CodeGen/SimpleRegisterCoalescing.h"
17 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
18 #include "VirtRegMap.h"
19 #include "llvm/Value.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/SSARegMap.h"
26 #include "llvm/Target/MRegisterInfo.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/ADT/SmallSet.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/STLExtras.h"
38 STATISTIC(numJoins
, "Number of interval joins performed");
39 STATISTIC(numPeep
, "Number of identity moves eliminated after coalescing");
40 STATISTIC(numAborts
, "Number of times interval joining aborted");
42 char SimpleRegisterCoalescing::ID
= 0;
45 EnableJoining("join-liveintervals",
46 cl::desc("Coallesce copies (default=true)"),
49 RegisterPass
<SimpleRegisterCoalescing
>
50 X("simple-register-coalescing",
51 "Simple register coalescing to eliminate all possible register copies");
54 const PassInfo
*llvm::SimpleRegisterCoalescingID
= X
.getPassInfo();
56 void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage
&AU
) const {
57 //AU.addPreserved<LiveVariables>();
58 AU
.addPreserved
<LiveIntervals
>();
59 AU
.addPreservedID(PHIEliminationID
);
60 AU
.addPreservedID(TwoAddressInstructionPassID
);
61 AU
.addRequired
<LiveVariables
>();
62 AU
.addRequired
<LiveIntervals
>();
63 AU
.addRequired
<LoopInfo
>();
64 MachineFunctionPass::getAnalysisUsage(AU
);
67 /// AdjustCopiesBackFrom - We found a non-trivially-coallescable copy with IntA
68 /// being the source and IntB being the dest, thus this defines a value number
69 /// in IntB. If the source value number (in IntA) is defined by a copy from B,
70 /// see if we can merge these two pieces of B into a single value number,
71 /// eliminating a copy. For example:
75 /// B1 = A3 <- this copy
77 /// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
78 /// value number to be replaced with B0 (which simplifies the B liveinterval).
80 /// This returns true if an interval was modified.
82 bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval
&IntA
, LiveInterval
&IntB
,
83 MachineInstr
*CopyMI
) {
84 unsigned CopyIdx
= li_
->getDefIndex(li_
->getInstructionIndex(CopyMI
));
86 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
88 LiveInterval::iterator BLR
= IntB
.FindLiveRangeContaining(CopyIdx
);
89 unsigned BValNo
= BLR
->ValId
;
91 // Get the location that B is defined at. Two options: either this value has
92 // an unknown definition point or it is defined at CopyIdx. If unknown, we
94 unsigned BValNoDefIdx
= IntB
.getInstForValNum(BValNo
);
95 if (BValNoDefIdx
== ~0U) return false;
96 assert(BValNoDefIdx
== CopyIdx
&&
97 "Copy doesn't define the value?");
99 // AValNo is the value number in A that defines the copy, A0 in the example.
100 LiveInterval::iterator AValLR
= IntA
.FindLiveRangeContaining(CopyIdx
-1);
101 unsigned AValNo
= AValLR
->ValId
;
103 // If AValNo is defined as a copy from IntB, we can potentially process this.
105 // Get the instruction that defines this value number.
106 unsigned SrcReg
= IntA
.getSrcRegForValNum(AValNo
);
107 if (!SrcReg
) return false; // Not defined by a copy.
109 // If the value number is not defined by a copy instruction, ignore it.
111 // If the source register comes from an interval other than IntB, we can't
113 if (rep(SrcReg
) != IntB
.reg
) return false;
115 // Get the LiveRange in IntB that this value number starts with.
116 unsigned AValNoInstIdx
= IntA
.getInstForValNum(AValNo
);
117 LiveInterval::iterator ValLR
= IntB
.FindLiveRangeContaining(AValNoInstIdx
-1);
119 // Make sure that the end of the live range is inside the same block as
121 MachineInstr
*ValLREndInst
= li_
->getInstructionFromIndex(ValLR
->end
-1);
123 ValLREndInst
->getParent() != CopyMI
->getParent()) return false;
125 // Okay, we now know that ValLR ends in the same block that the CopyMI
126 // live-range starts. If there are no intervening live ranges between them in
127 // IntB, we can merge them.
128 if (ValLR
+1 != BLR
) return false;
130 DOUT
<< "\nExtending: "; IntB
.print(DOUT
, mri_
);
132 // We are about to delete CopyMI, so need to remove it as the 'instruction
133 // that defines this value #'.
134 IntB
.setValueNumberInfo(BValNo
, std::make_pair(~0U, 0));
136 // Okay, we can merge them. We need to insert a new liverange:
137 // [ValLR.end, BLR.begin) of either value number, then we merge the
138 // two value numbers.
139 unsigned FillerStart
= ValLR
->end
, FillerEnd
= BLR
->start
;
140 IntB
.addRange(LiveRange(FillerStart
, FillerEnd
, BValNo
));
142 // If the IntB live range is assigned to a physical register, and if that
143 // physreg has aliases,
144 if (MRegisterInfo::isPhysicalRegister(IntB
.reg
)) {
145 // Update the liveintervals of sub-registers.
146 for (const unsigned *AS
= mri_
->getSubRegisters(IntB
.reg
); *AS
; ++AS
) {
147 LiveInterval
&AliasLI
= li_
->getInterval(*AS
);
148 AliasLI
.addRange(LiveRange(FillerStart
, FillerEnd
,
149 AliasLI
.getNextValue(~0U, 0)));
153 // Okay, merge "B1" into the same value number as "B0".
154 if (BValNo
!= ValLR
->ValId
)
155 IntB
.MergeValueNumberInto(BValNo
, ValLR
->ValId
);
156 DOUT
<< " result = "; IntB
.print(DOUT
, mri_
);
159 // If the source instruction was killing the source register before the
160 // merge, unset the isKill marker given the live range has been extended.
161 int UIdx
= ValLREndInst
->findRegisterUseOperandIdx(IntB
.reg
, true);
163 ValLREndInst
->getOperand(UIdx
).unsetIsKill();
165 // Finally, delete the copy instruction.
166 li_
->RemoveMachineInstrFromMaps(CopyMI
);
167 CopyMI
->eraseFromParent();
172 /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
173 /// which are the src/dst of the copy instruction CopyMI. This returns true
174 /// if the copy was successfully coallesced away, or if it is never possible
175 /// to coallesce this copy, due to register constraints. It returns
176 /// false if it is not currently possible to coallesce this interval, but
177 /// it may be possible if other things get coallesced.
178 bool SimpleRegisterCoalescing::JoinCopy(MachineInstr
*CopyMI
,
179 unsigned SrcReg
, unsigned DstReg
, bool PhysOnly
) {
180 DOUT
<< li_
->getInstructionIndex(CopyMI
) << '\t' << *CopyMI
;
182 // Get representative registers.
183 unsigned repSrcReg
= rep(SrcReg
);
184 unsigned repDstReg
= rep(DstReg
);
186 // If they are already joined we continue.
187 if (repSrcReg
== repDstReg
) {
188 DOUT
<< "\tCopy already coallesced.\n";
189 return true; // Not coallescable.
192 bool SrcIsPhys
= MRegisterInfo::isPhysicalRegister(repSrcReg
);
193 bool DstIsPhys
= MRegisterInfo::isPhysicalRegister(repDstReg
);
194 if (PhysOnly
&& !SrcIsPhys
&& !DstIsPhys
)
195 // Only joining physical registers with virtual registers in this round.
198 // If they are both physical registers, we cannot join them.
199 if (SrcIsPhys
&& DstIsPhys
) {
200 DOUT
<< "\tCan not coallesce physregs.\n";
201 return true; // Not coallescable.
204 // We only join virtual registers with allocatable physical registers.
205 if (SrcIsPhys
&& !allocatableRegs_
[repSrcReg
]) {
206 DOUT
<< "\tSrc reg is unallocatable physreg.\n";
207 return true; // Not coallescable.
209 if (DstIsPhys
&& !allocatableRegs_
[repDstReg
]) {
210 DOUT
<< "\tDst reg is unallocatable physreg.\n";
211 return true; // Not coallescable.
214 // If they are not of the same register class, we cannot join them.
215 if (differingRegisterClasses(repSrcReg
, repDstReg
)) {
216 DOUT
<< "\tSrc/Dest are different register classes.\n";
217 return true; // Not coallescable.
220 LiveInterval
&SrcInt
= li_
->getInterval(repSrcReg
);
221 LiveInterval
&DstInt
= li_
->getInterval(repDstReg
);
222 assert(SrcInt
.reg
== repSrcReg
&& DstInt
.reg
== repDstReg
&&
223 "Register mapping is horribly broken!");
225 DOUT
<< "\t\tInspecting "; SrcInt
.print(DOUT
, mri_
);
226 DOUT
<< " and "; DstInt
.print(DOUT
, mri_
);
229 // Check if it is necessary to propagate "isDead" property before intervals
231 MachineOperand
*mopd
= CopyMI
->findRegisterDefOperand(DstReg
);
232 bool isDead
= mopd
->isDead();
233 bool isShorten
= false;
234 unsigned SrcStart
= 0, RemoveStart
= 0;
235 unsigned SrcEnd
= 0, RemoveEnd
= 0;
237 unsigned CopyIdx
= li_
->getInstructionIndex(CopyMI
);
238 LiveInterval::iterator SrcLR
=
239 SrcInt
.FindLiveRangeContaining(li_
->getUseIndex(CopyIdx
));
240 RemoveStart
= SrcStart
= SrcLR
->start
;
241 RemoveEnd
= SrcEnd
= SrcLR
->end
;
242 // The instruction which defines the src is only truly dead if there are
243 // no intermediate uses and there isn't a use beyond the copy.
244 // FIXME: find the last use, mark is kill and shorten the live range.
245 if (SrcEnd
> li_
->getDefIndex(CopyIdx
)) {
249 MachineInstr
*LastUse
= lastRegisterUse(SrcStart
, CopyIdx
, repSrcReg
, MOU
);
251 // Shorten the liveinterval to the end of last use.
255 RemoveStart
= li_
->getDefIndex(li_
->getInstructionIndex(LastUse
));
258 MachineInstr
*SrcMI
= li_
->getInstructionFromIndex(SrcStart
);
260 MachineOperand
*mops
= findDefOperand(SrcMI
, repSrcReg
);
262 // A dead def should have a single cycle interval.
269 // We need to be careful about coalescing a source physical register with a
270 // virtual register. Once the coalescing is done, it cannot be broken and
271 // these are not spillable! If the destination interval uses are far away,
272 // think twice about coalescing them!
273 if (!mopd
->isDead() && (SrcIsPhys
|| DstIsPhys
)) {
274 LiveInterval
&JoinVInt
= SrcIsPhys
? DstInt
: SrcInt
;
275 unsigned JoinVReg
= SrcIsPhys
? repDstReg
: repSrcReg
;
276 unsigned JoinPReg
= SrcIsPhys
? repSrcReg
: repDstReg
;
277 const TargetRegisterClass
*RC
= mf_
->getSSARegMap()->getRegClass(JoinVReg
);
278 unsigned Threshold
= allocatableRCRegs_
[RC
].count();
280 // If the virtual register live interval is long has it has low use desity,
281 // do not join them, instead mark the physical register as its allocation
283 unsigned Length
= JoinVInt
.getSize() / InstrSlots::NUM
;
284 LiveVariables::VarInfo
&vi
= lv_
->getVarInfo(JoinVReg
);
285 if (Length
> Threshold
&&
286 (((float)vi
.NumUses
/ Length
) < (1.0 / Threshold
))) {
287 JoinVInt
.preference
= JoinPReg
;
289 DOUT
<< "\tMay tie down a physical register, abort!\n";
294 // Okay, attempt to join these two intervals. On failure, this returns false.
295 // Otherwise, if one of the intervals being joined is a physreg, this method
296 // always canonicalizes DstInt to be it. The output "SrcInt" will not have
297 // been modified, so we can use this information below to update aliases.
298 if (JoinIntervals(DstInt
, SrcInt
)) {
300 // Result of the copy is dead. Propagate this property.
302 assert(MRegisterInfo::isPhysicalRegister(repSrcReg
) &&
303 "Live-in must be a physical register!");
304 // Live-in to the function but dead. Remove it from entry live-in set.
305 // JoinIntervals may end up swapping the two intervals.
306 mf_
->begin()->removeLiveIn(repSrcReg
);
308 MachineInstr
*SrcMI
= li_
->getInstructionFromIndex(SrcStart
);
310 MachineOperand
*mops
= findDefOperand(SrcMI
, repSrcReg
);
317 if (isShorten
|| isDead
) {
318 // Shorten the live interval.
319 LiveInterval
&LiveInInt
= (repSrcReg
== DstInt
.reg
) ? DstInt
: SrcInt
;
320 LiveInInt
.removeRange(RemoveStart
, RemoveEnd
);
323 // Coallescing failed.
325 // If we can eliminate the copy without merging the live ranges, do so now.
326 if (AdjustCopiesBackFrom(SrcInt
, DstInt
, CopyMI
))
329 // Otherwise, we are unable to join the intervals.
330 DOUT
<< "Interference!\n";
334 bool Swapped
= repSrcReg
== DstInt
.reg
;
336 std::swap(repSrcReg
, repDstReg
);
337 assert(MRegisterInfo::isVirtualRegister(repSrcReg
) &&
338 "LiveInterval::join didn't work right!");
340 // If we're about to merge live ranges into a physical register live range,
341 // we have to update any aliased register's live ranges to indicate that they
342 // have clobbered values for this range.
343 if (MRegisterInfo::isPhysicalRegister(repDstReg
)) {
344 // Unset unnecessary kills.
345 if (!DstInt
.containsOneValue()) {
346 for (LiveInterval::Ranges::const_iterator I
= SrcInt
.begin(),
347 E
= SrcInt
.end(); I
!= E
; ++I
)
348 unsetRegisterKills(I
->start
, I
->end
, repDstReg
);
351 // Update the liveintervals of sub-registers.
352 for (const unsigned *AS
= mri_
->getSubRegisters(repDstReg
); *AS
; ++AS
)
353 li_
->getInterval(*AS
).MergeInClobberRanges(SrcInt
);
355 // Merge use info if the destination is a virtual register.
356 LiveVariables::VarInfo
& dVI
= lv_
->getVarInfo(repDstReg
);
357 LiveVariables::VarInfo
& sVI
= lv_
->getVarInfo(repSrcReg
);
358 dVI
.NumUses
+= sVI
.NumUses
;
361 DOUT
<< "\n\t\tJoined. Result = "; DstInt
.print(DOUT
, mri_
);
364 // Remember these liveintervals have been joined.
365 JoinedLIs
.set(repSrcReg
- MRegisterInfo::FirstVirtualRegister
);
366 if (MRegisterInfo::isVirtualRegister(repDstReg
))
367 JoinedLIs
.set(repDstReg
- MRegisterInfo::FirstVirtualRegister
);
369 // If the intervals were swapped by Join, swap them back so that the register
370 // mapping (in the r2i map) is correct.
371 if (Swapped
) SrcInt
.swap(DstInt
);
372 li_
->removeInterval(repSrcReg
);
373 r2rMap_
[repSrcReg
] = repDstReg
;
375 // Finally, delete the copy instruction.
376 li_
->RemoveMachineInstrFromMaps(CopyMI
);
377 CopyMI
->eraseFromParent();
383 /// ComputeUltimateVN - Assuming we are going to join two live intervals,
384 /// compute what the resultant value numbers for each value in the input two
385 /// ranges will be. This is complicated by copies between the two which can
386 /// and will commonly cause multiple value numbers to be merged into one.
388 /// VN is the value number that we're trying to resolve. InstDefiningValue
389 /// keeps track of the new InstDefiningValue assignment for the result
390 /// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
391 /// whether a value in this or other is a copy from the opposite set.
392 /// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
393 /// already been assigned.
395 /// ThisFromOther[x] - If x is defined as a copy from the other interval, this
396 /// contains the value number the copy is from.
398 static unsigned ComputeUltimateVN(unsigned VN
,
399 SmallVector
<std::pair
<unsigned,
400 unsigned>, 16> &ValueNumberInfo
,
401 SmallVector
<int, 16> &ThisFromOther
,
402 SmallVector
<int, 16> &OtherFromThis
,
403 SmallVector
<int, 16> &ThisValNoAssignments
,
404 SmallVector
<int, 16> &OtherValNoAssignments
,
405 LiveInterval
&ThisLI
, LiveInterval
&OtherLI
) {
406 // If the VN has already been computed, just return it.
407 if (ThisValNoAssignments
[VN
] >= 0)
408 return ThisValNoAssignments
[VN
];
409 // assert(ThisValNoAssignments[VN] != -2 && "Cyclic case?");
411 // If this val is not a copy from the other val, then it must be a new value
412 // number in the destination.
413 int OtherValNo
= ThisFromOther
[VN
];
414 if (OtherValNo
== -1) {
415 ValueNumberInfo
.push_back(ThisLI
.getValNumInfo(VN
));
416 return ThisValNoAssignments
[VN
] = ValueNumberInfo
.size()-1;
419 // Otherwise, this *is* a copy from the RHS. If the other side has already
420 // been computed, return it.
421 if (OtherValNoAssignments
[OtherValNo
] >= 0)
422 return ThisValNoAssignments
[VN
] = OtherValNoAssignments
[OtherValNo
];
424 // Mark this value number as currently being computed, then ask what the
425 // ultimate value # of the other value is.
426 ThisValNoAssignments
[VN
] = -2;
427 unsigned UltimateVN
=
428 ComputeUltimateVN(OtherValNo
, ValueNumberInfo
,
429 OtherFromThis
, ThisFromOther
,
430 OtherValNoAssignments
, ThisValNoAssignments
,
432 return ThisValNoAssignments
[VN
] = UltimateVN
;
435 static bool InVector(unsigned Val
, const SmallVector
<unsigned, 8> &V
) {
436 return std::find(V
.begin(), V
.end(), Val
) != V
.end();
439 /// SimpleJoin - Attempt to joint the specified interval into this one. The
440 /// caller of this method must guarantee that the RHS only contains a single
441 /// value number and that the RHS is not defined by a copy from this
442 /// interval. This returns false if the intervals are not joinable, or it
443 /// joins them and returns true.
444 bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval
&LHS
, LiveInterval
&RHS
) {
445 assert(RHS
.containsOneValue());
447 // Some number (potentially more than one) value numbers in the current
448 // interval may be defined as copies from the RHS. Scan the overlapping
449 // portions of the LHS and RHS, keeping track of this and looking for
450 // overlapping live ranges that are NOT defined as copies. If these exist, we
453 LiveInterval::iterator LHSIt
= LHS
.begin(), LHSEnd
= LHS
.end();
454 LiveInterval::iterator RHSIt
= RHS
.begin(), RHSEnd
= RHS
.end();
456 if (LHSIt
->start
< RHSIt
->start
) {
457 LHSIt
= std::upper_bound(LHSIt
, LHSEnd
, RHSIt
->start
);
458 if (LHSIt
!= LHS
.begin()) --LHSIt
;
459 } else if (RHSIt
->start
< LHSIt
->start
) {
460 RHSIt
= std::upper_bound(RHSIt
, RHSEnd
, LHSIt
->start
);
461 if (RHSIt
!= RHS
.begin()) --RHSIt
;
464 SmallVector
<unsigned, 8> EliminatedLHSVals
;
467 // Determine if these live intervals overlap.
468 bool Overlaps
= false;
469 if (LHSIt
->start
<= RHSIt
->start
)
470 Overlaps
= LHSIt
->end
> RHSIt
->start
;
472 Overlaps
= RHSIt
->end
> LHSIt
->start
;
474 // If the live intervals overlap, there are two interesting cases: if the
475 // LHS interval is defined by a copy from the RHS, it's ok and we record
476 // that the LHS value # is the same as the RHS. If it's not, then we cannot
477 // coallesce these live ranges and we bail out.
479 // If we haven't already recorded that this value # is safe, check it.
480 if (!InVector(LHSIt
->ValId
, EliminatedLHSVals
)) {
481 // Copy from the RHS?
482 unsigned SrcReg
= LHS
.getSrcRegForValNum(LHSIt
->ValId
);
483 if (rep(SrcReg
) != RHS
.reg
)
484 return false; // Nope, bail out.
486 EliminatedLHSVals
.push_back(LHSIt
->ValId
);
489 // We know this entire LHS live range is okay, so skip it now.
490 if (++LHSIt
== LHSEnd
) break;
494 if (LHSIt
->end
< RHSIt
->end
) {
495 if (++LHSIt
== LHSEnd
) break;
497 // One interesting case to check here. It's possible that we have
498 // something like "X3 = Y" which defines a new value number in the LHS,
499 // and is the last use of this liverange of the RHS. In this case, we
500 // want to notice this copy (so that it gets coallesced away) even though
501 // the live ranges don't actually overlap.
502 if (LHSIt
->start
== RHSIt
->end
) {
503 if (InVector(LHSIt
->ValId
, EliminatedLHSVals
)) {
504 // We already know that this value number is going to be merged in
505 // if coallescing succeeds. Just skip the liverange.
506 if (++LHSIt
== LHSEnd
) break;
508 // Otherwise, if this is a copy from the RHS, mark it as being merged
510 if (rep(LHS
.getSrcRegForValNum(LHSIt
->ValId
)) == RHS
.reg
) {
511 EliminatedLHSVals
.push_back(LHSIt
->ValId
);
513 // We know this entire LHS live range is okay, so skip it now.
514 if (++LHSIt
== LHSEnd
) break;
519 if (++RHSIt
== RHSEnd
) break;
523 // If we got here, we know that the coallescing will be successful and that
524 // the value numbers in EliminatedLHSVals will all be merged together. Since
525 // the most common case is that EliminatedLHSVals has a single number, we
526 // optimize for it: if there is more than one value, we merge them all into
527 // the lowest numbered one, then handle the interval as if we were merging
528 // with one value number.
530 if (EliminatedLHSVals
.size() > 1) {
531 // Loop through all the equal value numbers merging them into the smallest
533 unsigned Smallest
= EliminatedLHSVals
[0];
534 for (unsigned i
= 1, e
= EliminatedLHSVals
.size(); i
!= e
; ++i
) {
535 if (EliminatedLHSVals
[i
] < Smallest
) {
536 // Merge the current notion of the smallest into the smaller one.
537 LHS
.MergeValueNumberInto(Smallest
, EliminatedLHSVals
[i
]);
538 Smallest
= EliminatedLHSVals
[i
];
540 // Merge into the smallest.
541 LHS
.MergeValueNumberInto(EliminatedLHSVals
[i
], Smallest
);
546 assert(!EliminatedLHSVals
.empty() && "No copies from the RHS?");
547 LHSValNo
= EliminatedLHSVals
[0];
550 // Okay, now that there is a single LHS value number that we're merging the
551 // RHS into, update the value number info for the LHS to indicate that the
552 // value number is defined where the RHS value number was.
553 LHS
.setValueNumberInfo(LHSValNo
, RHS
.getValNumInfo(0));
555 // Okay, the final step is to loop over the RHS live intervals, adding them to
557 LHS
.MergeRangesInAsValue(RHS
, LHSValNo
);
558 LHS
.weight
+= RHS
.weight
;
559 if (RHS
.preference
&& !LHS
.preference
)
560 LHS
.preference
= RHS
.preference
;
565 /// JoinIntervals - Attempt to join these two intervals. On failure, this
566 /// returns false. Otherwise, if one of the intervals being joined is a
567 /// physreg, this method always canonicalizes LHS to be it. The output
568 /// "RHS" will not have been modified, so we can use this information
569 /// below to update aliases.
570 bool SimpleRegisterCoalescing::JoinIntervals(LiveInterval
&LHS
, LiveInterval
&RHS
) {
571 // Compute the final value assignment, assuming that the live ranges can be
573 SmallVector
<int, 16> LHSValNoAssignments
;
574 SmallVector
<int, 16> RHSValNoAssignments
;
575 SmallVector
<std::pair
<unsigned,unsigned>, 16> ValueNumberInfo
;
577 // If a live interval is a physical register, conservatively check if any
578 // of its sub-registers is overlapping the live interval of the virtual
579 // register. If so, do not coalesce.
580 if (MRegisterInfo::isPhysicalRegister(LHS
.reg
) &&
581 *mri_
->getSubRegisters(LHS
.reg
)) {
582 for (const unsigned* SR
= mri_
->getSubRegisters(LHS
.reg
); *SR
; ++SR
)
583 if (li_
->hasInterval(*SR
) && RHS
.overlaps(li_
->getInterval(*SR
))) {
584 DOUT
<< "Interfere with sub-register ";
585 DEBUG(li_
->getInterval(*SR
).print(DOUT
, mri_
));
588 } else if (MRegisterInfo::isPhysicalRegister(RHS
.reg
) &&
589 *mri_
->getSubRegisters(RHS
.reg
)) {
590 for (const unsigned* SR
= mri_
->getSubRegisters(RHS
.reg
); *SR
; ++SR
)
591 if (li_
->hasInterval(*SR
) && LHS
.overlaps(li_
->getInterval(*SR
))) {
592 DOUT
<< "Interfere with sub-register ";
593 DEBUG(li_
->getInterval(*SR
).print(DOUT
, mri_
));
598 // Compute ultimate value numbers for the LHS and RHS values.
599 if (RHS
.containsOneValue()) {
600 // Copies from a liveinterval with a single value are simple to handle and
601 // very common, handle the special case here. This is important, because
602 // often RHS is small and LHS is large (e.g. a physreg).
604 // Find out if the RHS is defined as a copy from some value in the LHS.
606 std::pair
<unsigned,unsigned> RHSValNoInfo
;
607 unsigned RHSSrcReg
= RHS
.getSrcRegForValNum(0);
608 if ((RHSSrcReg
== 0 || rep(RHSSrcReg
) != LHS
.reg
)) {
609 // If RHS is not defined as a copy from the LHS, we can use simpler and
610 // faster checks to see if the live ranges are coallescable. This joiner
611 // can't swap the LHS/RHS intervals though.
612 if (!MRegisterInfo::isPhysicalRegister(RHS
.reg
)) {
613 return SimpleJoin(LHS
, RHS
);
615 RHSValNoInfo
= RHS
.getValNumInfo(0);
618 // It was defined as a copy from the LHS, find out what value # it is.
619 unsigned ValInst
= RHS
.getInstForValNum(0);
620 RHSValID
= LHS
.getLiveRangeContaining(ValInst
-1)->ValId
;
621 RHSValNoInfo
= LHS
.getValNumInfo(RHSValID
);
624 LHSValNoAssignments
.resize(LHS
.getNumValNums(), -1);
625 RHSValNoAssignments
.resize(RHS
.getNumValNums(), -1);
626 ValueNumberInfo
.resize(LHS
.getNumValNums());
628 // Okay, *all* of the values in LHS that are defined as a copy from RHS
629 // should now get updated.
630 for (unsigned VN
= 0, e
= LHS
.getNumValNums(); VN
!= e
; ++VN
) {
631 if (unsigned LHSSrcReg
= LHS
.getSrcRegForValNum(VN
)) {
632 if (rep(LHSSrcReg
) != RHS
.reg
) {
633 // If this is not a copy from the RHS, its value number will be
634 // unmodified by the coallescing.
635 ValueNumberInfo
[VN
] = LHS
.getValNumInfo(VN
);
636 LHSValNoAssignments
[VN
] = VN
;
637 } else if (RHSValID
== -1) {
638 // Otherwise, it is a copy from the RHS, and we don't already have a
639 // value# for it. Keep the current value number, but remember it.
640 LHSValNoAssignments
[VN
] = RHSValID
= VN
;
641 ValueNumberInfo
[VN
] = RHSValNoInfo
;
643 // Otherwise, use the specified value #.
644 LHSValNoAssignments
[VN
] = RHSValID
;
645 if (VN
!= (unsigned)RHSValID
)
646 ValueNumberInfo
[VN
].first
= ~1U;
648 ValueNumberInfo
[VN
] = RHSValNoInfo
;
651 ValueNumberInfo
[VN
] = LHS
.getValNumInfo(VN
);
652 LHSValNoAssignments
[VN
] = VN
;
656 assert(RHSValID
!= -1 && "Didn't find value #?");
657 RHSValNoAssignments
[0] = RHSValID
;
660 // Loop over the value numbers of the LHS, seeing if any are defined from
662 SmallVector
<int, 16> LHSValsDefinedFromRHS
;
663 LHSValsDefinedFromRHS
.resize(LHS
.getNumValNums(), -1);
664 for (unsigned VN
= 0, e
= LHS
.getNumValNums(); VN
!= e
; ++VN
) {
665 unsigned ValSrcReg
= LHS
.getSrcRegForValNum(VN
);
666 if (ValSrcReg
== 0) // Src not defined by a copy?
669 // DstReg is known to be a register in the LHS interval. If the src is
670 // from the RHS interval, we can use its value #.
671 if (rep(ValSrcReg
) != RHS
.reg
)
674 // Figure out the value # from the RHS.
675 unsigned ValInst
= LHS
.getInstForValNum(VN
);
676 LHSValsDefinedFromRHS
[VN
] = RHS
.getLiveRangeContaining(ValInst
-1)->ValId
;
679 // Loop over the value numbers of the RHS, seeing if any are defined from
681 SmallVector
<int, 16> RHSValsDefinedFromLHS
;
682 RHSValsDefinedFromLHS
.resize(RHS
.getNumValNums(), -1);
683 for (unsigned VN
= 0, e
= RHS
.getNumValNums(); VN
!= e
; ++VN
) {
684 unsigned ValSrcReg
= RHS
.getSrcRegForValNum(VN
);
685 if (ValSrcReg
== 0) // Src not defined by a copy?
688 // DstReg is known to be a register in the RHS interval. If the src is
689 // from the LHS interval, we can use its value #.
690 if (rep(ValSrcReg
) != LHS
.reg
)
693 // Figure out the value # from the LHS.
694 unsigned ValInst
= RHS
.getInstForValNum(VN
);
695 RHSValsDefinedFromLHS
[VN
] = LHS
.getLiveRangeContaining(ValInst
-1)->ValId
;
698 LHSValNoAssignments
.resize(LHS
.getNumValNums(), -1);
699 RHSValNoAssignments
.resize(RHS
.getNumValNums(), -1);
700 ValueNumberInfo
.reserve(LHS
.getNumValNums() + RHS
.getNumValNums());
702 for (unsigned VN
= 0, e
= LHS
.getNumValNums(); VN
!= e
; ++VN
) {
703 if (LHSValNoAssignments
[VN
] >= 0 || LHS
.getInstForValNum(VN
) == ~2U)
705 ComputeUltimateVN(VN
, ValueNumberInfo
,
706 LHSValsDefinedFromRHS
, RHSValsDefinedFromLHS
,
707 LHSValNoAssignments
, RHSValNoAssignments
, LHS
, RHS
);
709 for (unsigned VN
= 0, e
= RHS
.getNumValNums(); VN
!= e
; ++VN
) {
710 if (RHSValNoAssignments
[VN
] >= 0 || RHS
.getInstForValNum(VN
) == ~2U)
712 // If this value number isn't a copy from the LHS, it's a new number.
713 if (RHSValsDefinedFromLHS
[VN
] == -1) {
714 ValueNumberInfo
.push_back(RHS
.getValNumInfo(VN
));
715 RHSValNoAssignments
[VN
] = ValueNumberInfo
.size()-1;
719 ComputeUltimateVN(VN
, ValueNumberInfo
,
720 RHSValsDefinedFromLHS
, LHSValsDefinedFromRHS
,
721 RHSValNoAssignments
, LHSValNoAssignments
, RHS
, LHS
);
725 // Armed with the mappings of LHS/RHS values to ultimate values, walk the
726 // interval lists to see if these intervals are coallescable.
727 LiveInterval::const_iterator I
= LHS
.begin();
728 LiveInterval::const_iterator IE
= LHS
.end();
729 LiveInterval::const_iterator J
= RHS
.begin();
730 LiveInterval::const_iterator JE
= RHS
.end();
732 // Skip ahead until the first place of potential sharing.
733 if (I
->start
< J
->start
) {
734 I
= std::upper_bound(I
, IE
, J
->start
);
735 if (I
!= LHS
.begin()) --I
;
736 } else if (J
->start
< I
->start
) {
737 J
= std::upper_bound(J
, JE
, I
->start
);
738 if (J
!= RHS
.begin()) --J
;
742 // Determine if these two live ranges overlap.
744 if (I
->start
< J
->start
) {
745 Overlaps
= I
->end
> J
->start
;
747 Overlaps
= J
->end
> I
->start
;
750 // If so, check value # info to determine if they are really different.
752 // If the live range overlap will map to the same value number in the
753 // result liverange, we can still coallesce them. If not, we can't.
754 if (LHSValNoAssignments
[I
->ValId
] != RHSValNoAssignments
[J
->ValId
])
758 if (I
->end
< J
->end
) {
767 // If we get here, we know that we can coallesce the live ranges. Ask the
768 // intervals to coallesce themselves now.
769 LHS
.join(RHS
, &LHSValNoAssignments
[0], &RHSValNoAssignments
[0],
775 // DepthMBBCompare - Comparison predicate that sort first based on the loop
776 // depth of the basic block (the unsigned), and then on the MBB number.
777 struct DepthMBBCompare
{
778 typedef std::pair
<unsigned, MachineBasicBlock
*> DepthMBBPair
;
779 bool operator()(const DepthMBBPair
&LHS
, const DepthMBBPair
&RHS
) const {
780 if (LHS
.first
> RHS
.first
) return true; // Deeper loops first
781 return LHS
.first
== RHS
.first
&&
782 LHS
.second
->getNumber() < RHS
.second
->getNumber();
787 void SimpleRegisterCoalescing::CopyCoallesceInMBB(MachineBasicBlock
*MBB
,
788 std::vector
<CopyRec
> *TryAgain
, bool PhysOnly
) {
789 DOUT
<< ((Value
*)MBB
->getBasicBlock())->getName() << ":\n";
791 for (MachineBasicBlock::iterator MII
= MBB
->begin(), E
= MBB
->end();
793 MachineInstr
*Inst
= MII
++;
795 // If this isn't a copy, we can't join intervals.
796 unsigned SrcReg
, DstReg
;
797 if (!tii_
->isMoveInstr(*Inst
, SrcReg
, DstReg
)) continue;
799 if (TryAgain
&& !JoinCopy(Inst
, SrcReg
, DstReg
, PhysOnly
))
800 TryAgain
->push_back(getCopyRec(Inst
, SrcReg
, DstReg
));
804 void SimpleRegisterCoalescing::joinIntervals() {
805 DOUT
<< "********** JOINING INTERVALS ***********\n";
807 JoinedLIs
.resize(li_
->getNumIntervals());
810 std::vector
<CopyRec
> TryAgainList
;
811 const LoopInfo
&LI
= getAnalysis
<LoopInfo
>();
812 if (LI
.begin() == LI
.end()) {
813 // If there are no loops in the function, join intervals in function order.
814 for (MachineFunction::iterator I
= mf_
->begin(), E
= mf_
->end();
816 CopyCoallesceInMBB(I
, &TryAgainList
);
818 // Otherwise, join intervals in inner loops before other intervals.
819 // Unfortunately we can't just iterate over loop hierarchy here because
820 // there may be more MBB's than BB's. Collect MBB's for sorting.
822 // Join intervals in the function prolog first. We want to join physical
823 // registers with virtual registers before the intervals got too long.
824 std::vector
<std::pair
<unsigned, MachineBasicBlock
*> > MBBs
;
825 for (MachineFunction::iterator I
= mf_
->begin(), E
= mf_
->end(); I
!= E
;++I
)
826 MBBs
.push_back(std::make_pair(LI
.getLoopDepth(I
->getBasicBlock()), I
));
828 // Sort by loop depth.
829 std::sort(MBBs
.begin(), MBBs
.end(), DepthMBBCompare());
831 // Finally, join intervals in loop nest order.
832 for (unsigned i
= 0, e
= MBBs
.size(); i
!= e
; ++i
)
833 CopyCoallesceInMBB(MBBs
[i
].second
, NULL
, true);
834 for (unsigned i
= 0, e
= MBBs
.size(); i
!= e
; ++i
)
835 CopyCoallesceInMBB(MBBs
[i
].second
, &TryAgainList
, false);
838 // Joining intervals can allow other intervals to be joined. Iteratively join
839 // until we make no progress.
840 bool ProgressMade
= true;
841 while (ProgressMade
) {
842 ProgressMade
= false;
844 for (unsigned i
= 0, e
= TryAgainList
.size(); i
!= e
; ++i
) {
845 CopyRec
&TheCopy
= TryAgainList
[i
];
847 JoinCopy(TheCopy
.MI
, TheCopy
.SrcReg
, TheCopy
.DstReg
)) {
848 TheCopy
.MI
= 0; // Mark this one as done.
854 // Some live range has been lengthened due to colaescing, eliminate the
855 // unnecessary kills.
856 int RegNum
= JoinedLIs
.find_first();
857 while (RegNum
!= -1) {
858 unsigned Reg
= RegNum
+ MRegisterInfo::FirstVirtualRegister
;
859 unsigned repReg
= rep(Reg
);
860 LiveInterval
&LI
= li_
->getInterval(repReg
);
861 LiveVariables::VarInfo
& svi
= lv_
->getVarInfo(Reg
);
862 for (unsigned i
= 0, e
= svi
.Kills
.size(); i
!= e
; ++i
) {
863 MachineInstr
*Kill
= svi
.Kills
[i
];
864 // Suppose vr1 = op vr2, x
865 // and vr1 and vr2 are coalesced. vr2 should still be marked kill
866 // unless it is a two-address operand.
867 if (li_
->isRemoved(Kill
) || hasRegisterDef(Kill
, repReg
))
869 if (LI
.liveAt(li_
->getInstructionIndex(Kill
) + InstrSlots::NUM
))
870 unsetRegisterKill(Kill
, repReg
);
872 RegNum
= JoinedLIs
.find_next(RegNum
);
875 DOUT
<< "*** Register mapping ***\n";
876 for (int i
= 0, e
= r2rMap_
.size(); i
!= e
; ++i
)
878 DOUT
<< " reg " << i
<< " -> ";
879 DEBUG(printRegName(r2rMap_
[i
]));
884 /// Return true if the two specified registers belong to different register
885 /// classes. The registers may be either phys or virt regs.
886 bool SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA
,
887 unsigned RegB
) const {
889 // Get the register classes for the first reg.
890 if (MRegisterInfo::isPhysicalRegister(RegA
)) {
891 assert(MRegisterInfo::isVirtualRegister(RegB
) &&
892 "Shouldn't consider two physregs!");
893 return !mf_
->getSSARegMap()->getRegClass(RegB
)->contains(RegA
);
896 // Compare against the regclass for the second reg.
897 const TargetRegisterClass
*RegClass
= mf_
->getSSARegMap()->getRegClass(RegA
);
898 if (MRegisterInfo::isVirtualRegister(RegB
))
899 return RegClass
!= mf_
->getSSARegMap()->getRegClass(RegB
);
901 return !RegClass
->contains(RegB
);
904 /// lastRegisterUse - Returns the last use of the specific register between
905 /// cycles Start and End. It also returns the use operand by reference. It
906 /// returns NULL if there are no uses.
908 SimpleRegisterCoalescing::lastRegisterUse(unsigned Start
, unsigned End
, unsigned Reg
,
909 MachineOperand
*&MOU
) {
910 int e
= (End
-1) / InstrSlots::NUM
* InstrSlots::NUM
;
913 // Skip deleted instructions
914 MachineInstr
*MI
= li_
->getInstructionFromIndex(e
);
915 while ((e
- InstrSlots::NUM
) >= s
&& !MI
) {
916 e
-= InstrSlots::NUM
;
917 MI
= li_
->getInstructionFromIndex(e
);
919 if (e
< s
|| MI
== NULL
)
922 for (unsigned i
= 0, NumOps
= MI
->getNumOperands(); i
!= NumOps
; ++i
) {
923 MachineOperand
&MO
= MI
->getOperand(i
);
924 if (MO
.isReg() && MO
.isUse() && MO
.getReg() &&
925 mri_
->regsOverlap(rep(MO
.getReg()), Reg
)) {
931 e
-= InstrSlots::NUM
;
938 /// findDefOperand - Returns the MachineOperand that is a def of the specific
939 /// register. It returns NULL if the def is not found.
940 MachineOperand
*SimpleRegisterCoalescing::findDefOperand(MachineInstr
*MI
, unsigned Reg
) {
941 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
942 MachineOperand
&MO
= MI
->getOperand(i
);
943 if (MO
.isReg() && MO
.isDef() &&
944 mri_
->regsOverlap(rep(MO
.getReg()), Reg
))
950 /// unsetRegisterKill - Unset IsKill property of all uses of specific register
951 /// of the specific instruction.
952 void SimpleRegisterCoalescing::unsetRegisterKill(MachineInstr
*MI
, unsigned Reg
) {
953 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
954 MachineOperand
&MO
= MI
->getOperand(i
);
955 if (MO
.isReg() && MO
.isUse() && MO
.isKill() && MO
.getReg() &&
956 mri_
->regsOverlap(rep(MO
.getReg()), Reg
))
961 /// unsetRegisterKills - Unset IsKill property of all uses of specific register
962 /// between cycles Start and End.
963 void SimpleRegisterCoalescing::unsetRegisterKills(unsigned Start
, unsigned End
,
965 int e
= (End
-1) / InstrSlots::NUM
* InstrSlots::NUM
;
968 // Skip deleted instructions
969 MachineInstr
*MI
= li_
->getInstructionFromIndex(e
);
970 while ((e
- InstrSlots::NUM
) >= s
&& !MI
) {
971 e
-= InstrSlots::NUM
;
972 MI
= li_
->getInstructionFromIndex(e
);
974 if (e
< s
|| MI
== NULL
)
977 for (unsigned i
= 0, NumOps
= MI
->getNumOperands(); i
!= NumOps
; ++i
) {
978 MachineOperand
&MO
= MI
->getOperand(i
);
979 if (MO
.isReg() && MO
.isUse() && MO
.isKill() && MO
.getReg() &&
980 mri_
->regsOverlap(rep(MO
.getReg()), Reg
)) {
985 e
-= InstrSlots::NUM
;
989 /// hasRegisterDef - True if the instruction defines the specific register.
991 bool SimpleRegisterCoalescing::hasRegisterDef(MachineInstr
*MI
, unsigned Reg
) {
992 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
993 MachineOperand
&MO
= MI
->getOperand(i
);
994 if (MO
.isReg() && MO
.isDef() &&
995 mri_
->regsOverlap(rep(MO
.getReg()), Reg
))
1001 void SimpleRegisterCoalescing::printRegName(unsigned reg
) const {
1002 if (MRegisterInfo::isPhysicalRegister(reg
))
1003 cerr
<< mri_
->getName(reg
);
1005 cerr
<< "%reg" << reg
;
1008 void SimpleRegisterCoalescing::releaseMemory() {
1013 static bool isZeroLengthInterval(LiveInterval
*li
) {
1014 for (LiveInterval::Ranges::const_iterator
1015 i
= li
->ranges
.begin(), e
= li
->ranges
.end(); i
!= e
; ++i
)
1016 if (i
->end
- i
->start
> LiveIntervals::InstrSlots::NUM
)
1021 bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction
&fn
) {
1023 tm_
= &fn
.getTarget();
1024 mri_
= tm_
->getRegisterInfo();
1025 tii_
= tm_
->getInstrInfo();
1026 li_
= &getAnalysis
<LiveIntervals
>();
1027 lv_
= &getAnalysis
<LiveVariables
>();
1029 DOUT
<< "********** SIMPLE REGISTER COALESCING **********\n"
1030 << "********** Function: "
1031 << ((Value
*)mf_
->getFunction())->getName() << '\n';
1033 allocatableRegs_
= mri_
->getAllocatableSet(fn
);
1034 for (MRegisterInfo::regclass_iterator I
= mri_
->regclass_begin(),
1035 E
= mri_
->regclass_end(); I
!= E
; ++I
)
1036 allocatableRCRegs_
.insert(std::make_pair(*I
,mri_
->getAllocatableSet(fn
, *I
)));
1038 r2rMap_
.grow(mf_
->getSSARegMap()->getLastVirtReg());
1040 // Join (coallesce) intervals if requested.
1041 if (EnableJoining
) {
1043 DOUT
<< "********** INTERVALS POST JOINING **********\n";
1044 for (LiveIntervals::iterator I
= li_
->begin(), E
= li_
->end(); I
!= E
; ++I
) {
1045 I
->second
.print(DOUT
, mri_
);
1050 // perform a final pass over the instructions and compute spill
1051 // weights, coalesce virtual registers and remove identity moves.
1052 const LoopInfo
&loopInfo
= getAnalysis
<LoopInfo
>();
1054 for (MachineFunction::iterator mbbi
= mf_
->begin(), mbbe
= mf_
->end();
1055 mbbi
!= mbbe
; ++mbbi
) {
1056 MachineBasicBlock
* mbb
= mbbi
;
1057 unsigned loopDepth
= loopInfo
.getLoopDepth(mbb
->getBasicBlock());
1059 for (MachineBasicBlock::iterator mii
= mbb
->begin(), mie
= mbb
->end();
1061 // if the move will be an identity move delete it
1062 unsigned srcReg
, dstReg
, RegRep
;
1063 if (tii_
->isMoveInstr(*mii
, srcReg
, dstReg
) &&
1064 (RegRep
= rep(srcReg
)) == rep(dstReg
)) {
1065 // remove from def list
1066 LiveInterval
&RegInt
= li_
->getOrCreateInterval(RegRep
);
1067 MachineOperand
*MO
= mii
->findRegisterDefOperand(dstReg
);
1068 // If def of this move instruction is dead, remove its live range from
1069 // the dstination register's live interval.
1071 unsigned MoveIdx
= li_
->getDefIndex(li_
->getInstructionIndex(mii
));
1072 LiveInterval::iterator MLR
= RegInt
.FindLiveRangeContaining(MoveIdx
);
1073 RegInt
.removeRange(MLR
->start
, MoveIdx
+1);
1075 li_
->removeInterval(RegRep
);
1077 li_
->RemoveMachineInstrFromMaps(mii
);
1078 mii
= mbbi
->erase(mii
);
1081 SmallSet
<unsigned, 4> UniqueUses
;
1082 for (unsigned i
= 0, e
= mii
->getNumOperands(); i
!= e
; ++i
) {
1083 const MachineOperand
&mop
= mii
->getOperand(i
);
1084 if (mop
.isRegister() && mop
.getReg() &&
1085 MRegisterInfo::isVirtualRegister(mop
.getReg())) {
1086 // replace register with representative register
1087 unsigned reg
= rep(mop
.getReg());
1088 mii
->getOperand(i
).setReg(reg
);
1090 // Multiple uses of reg by the same instruction. It should not
1091 // contribute to spill weight again.
1092 if (UniqueUses
.count(reg
) != 0)
1094 LiveInterval
&RegInt
= li_
->getInterval(reg
);
1095 float w
= (mop
.isUse()+mop
.isDef()) * powf(10.0F
, (float)loopDepth
);
1096 // If the definition instruction is re-materializable, its spill
1097 // weight is half of what it would have been normally unless it's
1098 // a load from fixed stack slot.
1100 if (RegInt
.remat
&& !tii_
->isLoadFromStackSlot(RegInt
.remat
, Dummy
))
1103 UniqueUses
.insert(reg
);
1111 for (LiveIntervals::iterator I
= li_
->begin(), E
= li_
->end(); I
!= E
; ++I
) {
1112 LiveInterval
&LI
= I
->second
;
1113 if (MRegisterInfo::isVirtualRegister(LI
.reg
)) {
1114 // If the live interval length is essentially zero, i.e. in every live
1115 // range the use follows def immediately, it doesn't make sense to spill
1116 // it and hope it will be easier to allocate for this li.
1117 if (isZeroLengthInterval(&LI
))
1118 LI
.weight
= HUGE_VALF
;
1120 // Slightly prefer live interval that has been assigned a preferred reg.
1124 // Divide the weight of the interval by its size. This encourages
1125 // spilling of intervals that are large and have few uses, and
1126 // discourages spilling of small intervals with many uses.
1127 LI
.weight
/= LI
.getSize();
1135 /// print - Implement the dump method.
1136 void SimpleRegisterCoalescing::print(std::ostream
&O
, const Module
* m
) const {