1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The machine combiner pass uses machine trace metrics to ensure the combined
10 // instructions do not lengthen the critical path or the resource depth.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/CodeGen/MachineDominators.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineLoopInfo.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/MachineTraceMetrics.h"
21 #include "llvm/CodeGen/Passes.h"
22 #include "llvm/CodeGen/TargetInstrInfo.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSchedule.h"
25 #include "llvm/CodeGen/TargetSubtargetInfo.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
32 #define DEBUG_TYPE "machine-combiner"
34 STATISTIC(NumInstCombined
, "Number of machineinst combined");
36 static cl::opt
<unsigned>
37 inc_threshold("machine-combiner-inc-threshold", cl::Hidden
,
38 cl::desc("Incremental depth computation will be used for basic "
39 "blocks with more instructions."), cl::init(500));
41 static cl::opt
<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden
,
42 cl::desc("Dump all substituted intrs"),
45 #ifdef EXPENSIVE_CHECKS
46 static cl::opt
<bool> VerifyPatternOrder(
47 "machine-combiner-verify-pattern-order", cl::Hidden
,
49 "Verify that the generated patterns are ordered by increasing latency"),
52 static cl::opt
<bool> VerifyPatternOrder(
53 "machine-combiner-verify-pattern-order", cl::Hidden
,
55 "Verify that the generated patterns are ordered by increasing latency"),
60 class MachineCombiner
: public MachineFunctionPass
{
61 const TargetSubtargetInfo
*STI
;
62 const TargetInstrInfo
*TII
;
63 const TargetRegisterInfo
*TRI
;
64 MCSchedModel SchedModel
;
65 MachineRegisterInfo
*MRI
;
66 MachineLoopInfo
*MLI
; // Current MachineLoopInfo
67 MachineTraceMetrics
*Traces
;
68 MachineTraceMetrics::Ensemble
*MinInstr
;
70 TargetSchedModel TSchedModel
;
72 /// True if optimizing for code size.
77 MachineCombiner() : MachineFunctionPass(ID
) {
78 initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
80 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
81 bool runOnMachineFunction(MachineFunction
&MF
) override
;
82 StringRef
getPassName() const override
{ return "Machine InstCombiner"; }
85 bool doSubstitute(unsigned NewSize
, unsigned OldSize
);
86 bool combineInstructions(MachineBasicBlock
*);
87 MachineInstr
*getOperandDef(const MachineOperand
&MO
);
88 unsigned getDepth(SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
89 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
,
90 MachineTraceMetrics::Trace BlockTrace
);
91 unsigned getLatency(MachineInstr
*Root
, MachineInstr
*NewRoot
,
92 MachineTraceMetrics::Trace BlockTrace
);
94 improvesCriticalPathLen(MachineBasicBlock
*MBB
, MachineInstr
*Root
,
95 MachineTraceMetrics::Trace BlockTrace
,
96 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
97 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
98 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
,
99 MachineCombinerPattern Pattern
, bool SlackIsAccurate
);
100 bool preservesResourceLen(MachineBasicBlock
*MBB
,
101 MachineTraceMetrics::Trace BlockTrace
,
102 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
103 SmallVectorImpl
<MachineInstr
*> &DelInstrs
);
104 void instr2instrSC(SmallVectorImpl
<MachineInstr
*> &Instrs
,
105 SmallVectorImpl
<const MCSchedClassDesc
*> &InstrsSC
);
106 std::pair
<unsigned, unsigned>
107 getLatenciesForInstrSequences(MachineInstr
&MI
,
108 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
109 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
110 MachineTraceMetrics::Trace BlockTrace
);
112 void verifyPatternOrder(MachineBasicBlock
*MBB
, MachineInstr
&Root
,
113 SmallVector
<MachineCombinerPattern
, 16> &Patterns
);
117 char MachineCombiner::ID
= 0;
118 char &llvm::MachineCombinerID
= MachineCombiner::ID
;
120 INITIALIZE_PASS_BEGIN(MachineCombiner
, DEBUG_TYPE
,
121 "Machine InstCombiner", false, false)
122 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo
)
123 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics
)
124 INITIALIZE_PASS_END(MachineCombiner
, DEBUG_TYPE
, "Machine InstCombiner",
127 void MachineCombiner::getAnalysisUsage(AnalysisUsage
&AU
) const {
128 AU
.setPreservesCFG();
129 AU
.addPreserved
<MachineDominatorTree
>();
130 AU
.addRequired
<MachineLoopInfo
>();
131 AU
.addPreserved
<MachineLoopInfo
>();
132 AU
.addRequired
<MachineTraceMetrics
>();
133 AU
.addPreserved
<MachineTraceMetrics
>();
134 MachineFunctionPass::getAnalysisUsage(AU
);
137 MachineInstr
*MachineCombiner::getOperandDef(const MachineOperand
&MO
) {
138 MachineInstr
*DefInstr
= nullptr;
139 // We need a virtual register definition.
140 if (MO
.isReg() && Register::isVirtualRegister(MO
.getReg()))
141 DefInstr
= MRI
->getUniqueVRegDef(MO
.getReg());
142 // PHI's have no depth etc.
143 if (DefInstr
&& DefInstr
->isPHI())
148 /// Computes depth of instructions in vector \InsInstr.
150 /// \param InsInstrs is a vector of machine instructions
151 /// \param InstrIdxForVirtReg is a dense map of virtual register to index
152 /// of defining machine instruction in \p InsInstrs
153 /// \param BlockTrace is a trace of machine instructions
155 /// \returns Depth of last instruction in \InsInstrs ("NewRoot")
157 MachineCombiner::getDepth(SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
158 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
,
159 MachineTraceMetrics::Trace BlockTrace
) {
160 SmallVector
<unsigned, 16> InstrDepth
;
161 assert(TSchedModel
.hasInstrSchedModelOrItineraries() &&
162 "Missing machine model\n");
164 // For each instruction in the new sequence compute the depth based on the
165 // operands. Use the trace information when possible. For new operands which
166 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
167 for (auto *InstrPtr
: InsInstrs
) { // for each Use
169 for (const MachineOperand
&MO
: InstrPtr
->operands()) {
170 // Check for virtual register operand.
171 if (!(MO
.isReg() && Register::isVirtualRegister(MO
.getReg())))
175 unsigned DepthOp
= 0;
176 unsigned LatencyOp
= 0;
177 DenseMap
<unsigned, unsigned>::iterator II
=
178 InstrIdxForVirtReg
.find(MO
.getReg());
179 if (II
!= InstrIdxForVirtReg
.end()) {
180 // Operand is new virtual register not in trace
181 assert(II
->second
< InstrDepth
.size() && "Bad Index");
182 MachineInstr
*DefInstr
= InsInstrs
[II
->second
];
184 "There must be a definition for a new virtual register");
185 DepthOp
= InstrDepth
[II
->second
];
186 int DefIdx
= DefInstr
->findRegisterDefOperandIdx(MO
.getReg());
187 int UseIdx
= InstrPtr
->findRegisterUseOperandIdx(MO
.getReg());
188 LatencyOp
= TSchedModel
.computeOperandLatency(DefInstr
, DefIdx
,
191 MachineInstr
*DefInstr
= getOperandDef(MO
);
193 DepthOp
= BlockTrace
.getInstrCycles(*DefInstr
).Depth
;
194 LatencyOp
= TSchedModel
.computeOperandLatency(
195 DefInstr
, DefInstr
->findRegisterDefOperandIdx(MO
.getReg()),
196 InstrPtr
, InstrPtr
->findRegisterUseOperandIdx(MO
.getReg()));
199 IDepth
= std::max(IDepth
, DepthOp
+ LatencyOp
);
201 InstrDepth
.push_back(IDepth
);
203 unsigned NewRootIdx
= InsInstrs
.size() - 1;
204 return InstrDepth
[NewRootIdx
];
207 /// Computes instruction latency as max of latency of defined operands.
209 /// \param Root is a machine instruction that could be replaced by NewRoot.
210 /// It is used to compute a more accurate latency information for NewRoot in
211 /// case there is a dependent instruction in the same trace (\p BlockTrace)
212 /// \param NewRoot is the instruction for which the latency is computed
213 /// \param BlockTrace is a trace of machine instructions
215 /// \returns Latency of \p NewRoot
216 unsigned MachineCombiner::getLatency(MachineInstr
*Root
, MachineInstr
*NewRoot
,
217 MachineTraceMetrics::Trace BlockTrace
) {
218 assert(TSchedModel
.hasInstrSchedModelOrItineraries() &&
219 "Missing machine model\n");
221 // Check each definition in NewRoot and compute the latency
222 unsigned NewRootLatency
= 0;
224 for (const MachineOperand
&MO
: NewRoot
->operands()) {
225 // Check for virtual register operand.
226 if (!(MO
.isReg() && Register::isVirtualRegister(MO
.getReg())))
230 // Get the first instruction that uses MO
231 MachineRegisterInfo::reg_iterator RI
= MRI
->reg_begin(MO
.getReg());
233 if (RI
== MRI
->reg_end())
235 MachineInstr
*UseMO
= RI
->getParent();
236 unsigned LatencyOp
= 0;
237 if (UseMO
&& BlockTrace
.isDepInTrace(*Root
, *UseMO
)) {
238 LatencyOp
= TSchedModel
.computeOperandLatency(
239 NewRoot
, NewRoot
->findRegisterDefOperandIdx(MO
.getReg()), UseMO
,
240 UseMO
->findRegisterUseOperandIdx(MO
.getReg()));
242 LatencyOp
= TSchedModel
.computeInstrLatency(NewRoot
);
244 NewRootLatency
= std::max(NewRootLatency
, LatencyOp
);
246 return NewRootLatency
;
249 /// The combiner's goal may differ based on which pattern it is attempting
251 enum class CombinerObjective
{
252 MustReduceDepth
, // The data dependency chain must be improved.
253 Default
// The critical path must not be lengthened.
256 static CombinerObjective
getCombinerObjective(MachineCombinerPattern P
) {
257 // TODO: If C++ ever gets a real enum class, make this part of the
258 // MachineCombinerPattern class.
260 case MachineCombinerPattern::REASSOC_AX_BY
:
261 case MachineCombinerPattern::REASSOC_AX_YB
:
262 case MachineCombinerPattern::REASSOC_XA_BY
:
263 case MachineCombinerPattern::REASSOC_XA_YB
:
264 return CombinerObjective::MustReduceDepth
;
266 return CombinerObjective::Default
;
270 /// Estimate the latency of the new and original instruction sequence by summing
271 /// up the latencies of the inserted and deleted instructions. This assumes
272 /// that the inserted and deleted instructions are dependent instruction chains,
273 /// which might not hold in all cases.
274 std::pair
<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(
275 MachineInstr
&MI
, SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
276 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
277 MachineTraceMetrics::Trace BlockTrace
) {
278 assert(!InsInstrs
.empty() && "Only support sequences that insert instrs.");
279 unsigned NewRootLatency
= 0;
280 // NewRoot is the last instruction in the \p InsInstrs vector.
281 MachineInstr
*NewRoot
= InsInstrs
.back();
282 for (unsigned i
= 0; i
< InsInstrs
.size() - 1; i
++)
283 NewRootLatency
+= TSchedModel
.computeInstrLatency(InsInstrs
[i
]);
284 NewRootLatency
+= getLatency(&MI
, NewRoot
, BlockTrace
);
286 unsigned RootLatency
= 0;
287 for (auto I
: DelInstrs
)
288 RootLatency
+= TSchedModel
.computeInstrLatency(I
);
290 return {NewRootLatency
, RootLatency
};
293 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
294 /// The new code sequence ends in MI NewRoot. A necessary condition for the new
295 /// sequence to replace the old sequence is that it cannot lengthen the critical
296 /// path. The definition of "improve" may be restricted by specifying that the
297 /// new path improves the data dependency chain (MustReduceDepth).
298 bool MachineCombiner::improvesCriticalPathLen(
299 MachineBasicBlock
*MBB
, MachineInstr
*Root
,
300 MachineTraceMetrics::Trace BlockTrace
,
301 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
302 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
303 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
,
304 MachineCombinerPattern Pattern
,
305 bool SlackIsAccurate
) {
306 assert(TSchedModel
.hasInstrSchedModelOrItineraries() &&
307 "Missing machine model\n");
308 // Get depth and latency of NewRoot and Root.
309 unsigned NewRootDepth
= getDepth(InsInstrs
, InstrIdxForVirtReg
, BlockTrace
);
310 unsigned RootDepth
= BlockTrace
.getInstrCycles(*Root
).Depth
;
312 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root
<< "\tNewRootDepth: "
313 << NewRootDepth
<< "\tRootDepth: " << RootDepth
);
315 // For a transform such as reassociation, the cost equation is
316 // conservatively calculated so that we must improve the depth (data
317 // dependency cycles) in the critical path to proceed with the transform.
318 // Being conservative also protects against inaccuracies in the underlying
319 // machine trace metrics and CPU models.
320 if (getCombinerObjective(Pattern
) == CombinerObjective::MustReduceDepth
) {
321 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth ");
322 LLVM_DEBUG(NewRootDepth
< RootDepth
323 ? dbgs() << "\t and it does it\n"
324 : dbgs() << "\t but it does NOT do it\n");
325 return NewRootDepth
< RootDepth
;
328 // A more flexible cost calculation for the critical path includes the slack
329 // of the original code sequence. This may allow the transform to proceed
330 // even if the instruction depths (data dependency cycles) become worse.
332 // Account for the latency of the inserted and deleted instructions by
333 unsigned NewRootLatency
, RootLatency
;
334 std::tie(NewRootLatency
, RootLatency
) =
335 getLatenciesForInstrSequences(*Root
, InsInstrs
, DelInstrs
, BlockTrace
);
337 unsigned RootSlack
= BlockTrace
.getInstrSlack(*Root
);
338 unsigned NewCycleCount
= NewRootDepth
+ NewRootLatency
;
339 unsigned OldCycleCount
=
340 RootDepth
+ RootLatency
+ (SlackIsAccurate
? RootSlack
: 0);
341 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency
342 << "\tRootLatency: " << RootLatency
<< "\n\tRootSlack: "
343 << RootSlack
<< " SlackIsAccurate=" << SlackIsAccurate
344 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount
345 << "\n\tRootDepth + RootLatency + RootSlack = "
347 LLVM_DEBUG(NewCycleCount
<= OldCycleCount
348 ? dbgs() << "\n\t It IMPROVES PathLen because"
349 : dbgs() << "\n\t It DOES NOT improve PathLen because");
350 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount
351 << ", OldCycleCount = " << OldCycleCount
<< "\n");
353 return NewCycleCount
<= OldCycleCount
;
356 /// helper routine to convert instructions into SC
357 void MachineCombiner::instr2instrSC(
358 SmallVectorImpl
<MachineInstr
*> &Instrs
,
359 SmallVectorImpl
<const MCSchedClassDesc
*> &InstrsSC
) {
360 for (auto *InstrPtr
: Instrs
) {
361 unsigned Opc
= InstrPtr
->getOpcode();
362 unsigned Idx
= TII
->get(Opc
).getSchedClass();
363 const MCSchedClassDesc
*SC
= SchedModel
.getSchedClassDesc(Idx
);
364 InstrsSC
.push_back(SC
);
368 /// True when the new instructions do not increase resource length
369 bool MachineCombiner::preservesResourceLen(
370 MachineBasicBlock
*MBB
, MachineTraceMetrics::Trace BlockTrace
,
371 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
372 SmallVectorImpl
<MachineInstr
*> &DelInstrs
) {
373 if (!TSchedModel
.hasInstrSchedModel())
376 // Compute current resource length
378 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
379 SmallVector
<const MachineBasicBlock
*, 1> MBBarr
;
380 MBBarr
.push_back(MBB
);
381 unsigned ResLenBeforeCombine
= BlockTrace
.getResourceLength(MBBarr
);
383 // Deal with SC rather than Instructions.
384 SmallVector
<const MCSchedClassDesc
*, 16> InsInstrsSC
;
385 SmallVector
<const MCSchedClassDesc
*, 16> DelInstrsSC
;
387 instr2instrSC(InsInstrs
, InsInstrsSC
);
388 instr2instrSC(DelInstrs
, DelInstrsSC
);
390 ArrayRef
<const MCSchedClassDesc
*> MSCInsArr
= makeArrayRef(InsInstrsSC
);
391 ArrayRef
<const MCSchedClassDesc
*> MSCDelArr
= makeArrayRef(DelInstrsSC
);
393 // Compute new resource length.
394 unsigned ResLenAfterCombine
=
395 BlockTrace
.getResourceLength(MBBarr
, MSCInsArr
, MSCDelArr
);
397 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: "
398 << ResLenBeforeCombine
399 << " and after: " << ResLenAfterCombine
<< "\n";);
401 ResLenAfterCombine
<= ResLenBeforeCombine
402 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n"
403 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource "
406 return ResLenAfterCombine
<= ResLenBeforeCombine
;
409 /// \returns true when new instruction sequence should be generated
410 /// independent if it lengthens critical path or not
411 bool MachineCombiner::doSubstitute(unsigned NewSize
, unsigned OldSize
) {
412 if (OptSize
&& (NewSize
< OldSize
))
414 if (!TSchedModel
.hasInstrSchedModelOrItineraries())
419 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction
420 /// depths if requested.
422 /// \param MBB basic block to insert instructions in
423 /// \param MI current machine instruction
424 /// \param InsInstrs new instructions to insert in \p MBB
425 /// \param DelInstrs instruction to delete from \p MBB
426 /// \param MinInstr is a pointer to the machine trace information
427 /// \param RegUnits set of live registers, needed to compute instruction depths
428 /// \param IncrementalUpdate if true, compute instruction depths incrementally,
429 /// otherwise invalidate the trace
430 static void insertDeleteInstructions(MachineBasicBlock
*MBB
, MachineInstr
&MI
,
431 SmallVector
<MachineInstr
*, 16> InsInstrs
,
432 SmallVector
<MachineInstr
*, 16> DelInstrs
,
433 MachineTraceMetrics::Ensemble
*MinInstr
,
434 SparseSet
<LiveRegUnit
> &RegUnits
,
435 bool IncrementalUpdate
) {
436 for (auto *InstrPtr
: InsInstrs
)
437 MBB
->insert((MachineBasicBlock::iterator
)&MI
, InstrPtr
);
439 for (auto *InstrPtr
: DelInstrs
) {
440 InstrPtr
->eraseFromParentAndMarkDBGValuesForRemoval();
441 // Erase all LiveRegs defined by the removed instruction
442 for (auto I
= RegUnits
.begin(); I
!= RegUnits
.end(); ) {
443 if (I
->MI
== InstrPtr
)
444 I
= RegUnits
.erase(I
);
450 if (IncrementalUpdate
)
451 for (auto *InstrPtr
: InsInstrs
)
452 MinInstr
->updateDepth(MBB
, *InstrPtr
, RegUnits
);
454 MinInstr
->invalidate(MBB
);
459 // Check that the difference between original and new latency is decreasing for
460 // later patterns. This helps to discover sub-optimal pattern orderings.
461 void MachineCombiner::verifyPatternOrder(
462 MachineBasicBlock
*MBB
, MachineInstr
&Root
,
463 SmallVector
<MachineCombinerPattern
, 16> &Patterns
) {
464 long PrevLatencyDiff
= std::numeric_limits
<long>::max();
465 (void)PrevLatencyDiff
; // Variable is used in assert only.
466 for (auto P
: Patterns
) {
467 SmallVector
<MachineInstr
*, 16> InsInstrs
;
468 SmallVector
<MachineInstr
*, 16> DelInstrs
;
469 DenseMap
<unsigned, unsigned> InstrIdxForVirtReg
;
470 TII
->genAlternativeCodeSequence(Root
, P
, InsInstrs
, DelInstrs
,
472 // Found pattern, but did not generate alternative sequence.
473 // This can happen e.g. when an immediate could not be materialized
474 // in a single instruction.
475 if (InsInstrs
.empty() || !TSchedModel
.hasInstrSchedModelOrItineraries())
478 unsigned NewRootLatency
, RootLatency
;
479 std::tie(NewRootLatency
, RootLatency
) = getLatenciesForInstrSequences(
480 Root
, InsInstrs
, DelInstrs
, MinInstr
->getTrace(MBB
));
481 long CurrentLatencyDiff
= ((long)RootLatency
) - ((long)NewRootLatency
);
482 assert(CurrentLatencyDiff
<= PrevLatencyDiff
&&
483 "Current pattern is better than previous pattern.");
484 PrevLatencyDiff
= CurrentLatencyDiff
;
488 /// Substitute a slow code sequence with a faster one by
489 /// evaluating instruction combining pattern.
490 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
491 /// combining based on machine trace metrics. Only combine a sequence of
492 /// instructions when this neither lengthens the critical path nor increases
493 /// resource pressure. When optimizing for codesize always combine when the new
494 /// sequence is shorter.
495 bool MachineCombiner::combineInstructions(MachineBasicBlock
*MBB
) {
496 bool Changed
= false;
497 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB
->getName() << "\n");
499 bool IncrementalUpdate
= false;
500 auto BlockIter
= MBB
->begin();
501 decltype(BlockIter
) LastUpdate
;
502 // Check if the block is in a loop.
503 const MachineLoop
*ML
= MLI
->getLoopFor(MBB
);
505 MinInstr
= Traces
->getEnsemble(MachineTraceMetrics::TS_MinInstrCount
);
507 SparseSet
<LiveRegUnit
> RegUnits
;
508 RegUnits
.setUniverse(TRI
->getNumRegUnits());
510 while (BlockIter
!= MBB
->end()) {
511 auto &MI
= *BlockIter
++;
512 SmallVector
<MachineCombinerPattern
, 16> Patterns
;
513 // The motivating example is:
515 // MUL Other MUL_op1 MUL_op2 Other
517 // ADD/SUB => MADD/MSUB
518 // (=Root) (=NewRoot)
520 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
521 // usually beneficial for code size it unfortunately can hurt performance
522 // when the ADD is on the critical path, but the MUL is not. With the
523 // substitution the MUL becomes part of the critical path (in form of the
524 // MADD) and can lengthen it on architectures where the MADD latency is
525 // longer than the ADD latency.
527 // For each instruction we check if it can be the root of a combiner
528 // pattern. Then for each pattern the new code sequence in form of MI is
529 // generated and evaluated. When the efficiency criteria (don't lengthen
530 // critical path, don't use more resources) is met the new sequence gets
531 // hooked up into the basic block before the old sequence is removed.
533 // The algorithm does not try to evaluate all patterns and pick the best.
534 // This is only an artificial restriction though. In practice there is
535 // mostly one pattern, and getMachineCombinerPatterns() can order patterns
536 // based on an internal cost heuristic. If
537 // machine-combiner-verify-pattern-order is enabled, all patterns are
538 // checked to ensure later patterns do not provide better latency savings.
540 if (!TII
->getMachineCombinerPatterns(MI
, Patterns
))
543 if (VerifyPatternOrder
)
544 verifyPatternOrder(MBB
, MI
, Patterns
);
546 for (auto P
: Patterns
) {
547 SmallVector
<MachineInstr
*, 16> InsInstrs
;
548 SmallVector
<MachineInstr
*, 16> DelInstrs
;
549 DenseMap
<unsigned, unsigned> InstrIdxForVirtReg
;
550 TII
->genAlternativeCodeSequence(MI
, P
, InsInstrs
, DelInstrs
,
552 unsigned NewInstCount
= InsInstrs
.size();
553 unsigned OldInstCount
= DelInstrs
.size();
554 // Found pattern, but did not generate alternative sequence.
555 // This can happen e.g. when an immediate could not be materialized
556 // in a single instruction.
560 LLVM_DEBUG(if (dump_intrs
) {
561 dbgs() << "\tFor the Pattern (" << (int)P
562 << ") these instructions could be removed\n";
563 for (auto const *InstrPtr
: DelInstrs
)
564 InstrPtr
->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false,
565 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII
);
566 dbgs() << "\tThese instructions could replace the removed ones\n";
567 for (auto const *InstrPtr
: InsInstrs
)
568 InstrPtr
->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false,
569 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII
);
572 bool SubstituteAlways
= false;
573 if (ML
&& TII
->isThroughputPattern(P
))
574 SubstituteAlways
= true;
576 if (IncrementalUpdate
) {
577 // Update depths since the last incremental update.
578 MinInstr
->updateDepths(LastUpdate
, BlockIter
, RegUnits
);
579 LastUpdate
= BlockIter
;
582 // Substitute when we optimize for codesize and the new sequence has
583 // fewer instructions OR
584 // the new sequence neither lengthens the critical path nor increases
585 // resource pressure.
586 if (SubstituteAlways
|| doSubstitute(NewInstCount
, OldInstCount
)) {
587 insertDeleteInstructions(MBB
, MI
, InsInstrs
, DelInstrs
, MinInstr
,
588 RegUnits
, IncrementalUpdate
);
589 // Eagerly stop after the first pattern fires.
593 // For big basic blocks, we only compute the full trace the first time
594 // we hit this. We do not invalidate the trace, but instead update the
595 // instruction depths incrementally.
596 // NOTE: Only the instruction depths up to MI are accurate. All other
597 // trace information is not updated.
598 MachineTraceMetrics::Trace BlockTrace
= MinInstr
->getTrace(MBB
);
599 Traces
->verifyAnalysis();
600 if (improvesCriticalPathLen(MBB
, &MI
, BlockTrace
, InsInstrs
, DelInstrs
,
601 InstrIdxForVirtReg
, P
,
602 !IncrementalUpdate
) &&
603 preservesResourceLen(MBB
, BlockTrace
, InsInstrs
, DelInstrs
)) {
604 if (MBB
->size() > inc_threshold
) {
605 // Use incremental depth updates for basic blocks above treshold
606 IncrementalUpdate
= true;
607 LastUpdate
= BlockIter
;
610 insertDeleteInstructions(MBB
, MI
, InsInstrs
, DelInstrs
, MinInstr
,
611 RegUnits
, IncrementalUpdate
);
613 // Eagerly stop after the first pattern fires.
617 // Cleanup instructions of the alternative code sequence. There is no
619 MachineFunction
*MF
= MBB
->getParent();
620 for (auto *InstrPtr
: InsInstrs
)
621 MF
->DeleteMachineInstr(InstrPtr
);
623 InstrIdxForVirtReg
.clear();
627 if (Changed
&& IncrementalUpdate
)
628 Traces
->invalidate(MBB
);
632 bool MachineCombiner::runOnMachineFunction(MachineFunction
&MF
) {
633 STI
= &MF
.getSubtarget();
634 TII
= STI
->getInstrInfo();
635 TRI
= STI
->getRegisterInfo();
636 SchedModel
= STI
->getSchedModel();
637 TSchedModel
.init(STI
);
638 MRI
= &MF
.getRegInfo();
639 MLI
= &getAnalysis
<MachineLoopInfo
>();
640 Traces
= &getAnalysis
<MachineTraceMetrics
>();
642 OptSize
= MF
.getFunction().hasOptSize();
644 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF
.getName() << '\n');
645 if (!TII
->useMachineCombiner()) {
648 << " Skipping pass: Target does not support machine combiner\n");
652 bool Changed
= false;
654 // Try to combine instructions.
656 Changed
|= combineInstructions(&MBB
);