[InstCombine] Signed saturation patterns
[llvm-complete.git] / lib / Target / X86 / X86CmovConversion.cpp
blob5123853f5455a6d0cab9d47512f629f4efb32e63
1 //====- X86CmovConversion.cpp - Convert Cmov to Branch --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements a pass that converts X86 cmov instructions into
11 /// branches when profitable. This pass is conservative. It transforms if and
12 /// only if it can guarantee a gain with high confidence.
13 ///
14 /// Thus, the optimization applies under the following conditions:
15 /// 1. Consider as candidates only CMOVs in innermost loops (assume that
16 /// most hotspots are represented by these loops).
17 /// 2. Given a group of CMOV instructions that are using the same EFLAGS def
18 /// instruction:
19 /// a. Consider them as candidates only if all have the same code condition
20 /// or the opposite one to prevent generating more than one conditional
21 /// jump per EFLAGS def instruction.
22 /// b. Consider them as candidates only if all are profitable to be
23 /// converted (assume that one bad conversion may cause a degradation).
24 /// 3. Apply conversion only for loops that are found profitable and only for
25 /// CMOV candidates that were found profitable.
26 /// a. A loop is considered profitable only if conversion will reduce its
27 /// depth cost by some threshold.
28 /// b. CMOV is considered profitable if the cost of its condition is higher
29 /// than the average cost of its true-value and false-value by 25% of
30 /// branch-misprediction-penalty. This assures no degradation even with
31 /// 25% branch misprediction.
32 ///
33 /// Note: This pass is assumed to run on SSA machine code.
35 //===----------------------------------------------------------------------===//
37 // External interfaces:
38 // FunctionPass *llvm::createX86CmovConverterPass();
39 // bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF);
41 //===----------------------------------------------------------------------===//
43 #include "X86.h"
44 #include "X86InstrInfo.h"
45 #include "llvm/ADT/ArrayRef.h"
46 #include "llvm/ADT/DenseMap.h"
47 #include "llvm/ADT/STLExtras.h"
48 #include "llvm/ADT/SmallPtrSet.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/CodeGen/MachineBasicBlock.h"
52 #include "llvm/CodeGen/MachineFunction.h"
53 #include "llvm/CodeGen/MachineFunctionPass.h"
54 #include "llvm/CodeGen/MachineInstr.h"
55 #include "llvm/CodeGen/MachineInstrBuilder.h"
56 #include "llvm/CodeGen/MachineLoopInfo.h"
57 #include "llvm/CodeGen/MachineOperand.h"
58 #include "llvm/CodeGen/MachineRegisterInfo.h"
59 #include "llvm/CodeGen/TargetInstrInfo.h"
60 #include "llvm/CodeGen/TargetRegisterInfo.h"
61 #include "llvm/CodeGen/TargetSchedule.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/IR/DebugLoc.h"
64 #include "llvm/MC/MCSchedule.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <iterator>
72 #include <utility>
74 using namespace llvm;
76 #define DEBUG_TYPE "x86-cmov-conversion"
78 STATISTIC(NumOfSkippedCmovGroups, "Number of unsupported CMOV-groups");
79 STATISTIC(NumOfCmovGroupCandidate, "Number of CMOV-group candidates");
80 STATISTIC(NumOfLoopCandidate, "Number of CMOV-conversion profitable loops");
81 STATISTIC(NumOfOptimizedCmovGroups, "Number of optimized CMOV-groups");
83 // This internal switch can be used to turn off the cmov/branch optimization.
84 static cl::opt<bool>
85 EnableCmovConverter("x86-cmov-converter",
86 cl::desc("Enable the X86 cmov-to-branch optimization."),
87 cl::init(true), cl::Hidden);
89 static cl::opt<unsigned>
90 GainCycleThreshold("x86-cmov-converter-threshold",
91 cl::desc("Minimum gain per loop (in cycles) threshold."),
92 cl::init(4), cl::Hidden);
94 static cl::opt<bool> ForceMemOperand(
95 "x86-cmov-converter-force-mem-operand",
96 cl::desc("Convert cmovs to branches whenever they have memory operands."),
97 cl::init(true), cl::Hidden);
99 namespace {
101 /// Converts X86 cmov instructions into branches when profitable.
102 class X86CmovConverterPass : public MachineFunctionPass {
103 public:
104 X86CmovConverterPass() : MachineFunctionPass(ID) { }
106 StringRef getPassName() const override { return "X86 cmov Conversion"; }
107 bool runOnMachineFunction(MachineFunction &MF) override;
108 void getAnalysisUsage(AnalysisUsage &AU) const override;
110 /// Pass identification, replacement for typeid.
111 static char ID;
113 private:
114 MachineRegisterInfo *MRI;
115 const TargetInstrInfo *TII;
116 const TargetRegisterInfo *TRI;
117 TargetSchedModel TSchedModel;
119 /// List of consecutive CMOV instructions.
120 using CmovGroup = SmallVector<MachineInstr *, 2>;
121 using CmovGroups = SmallVector<CmovGroup, 2>;
123 /// Collect all CMOV-group-candidates in \p CurrLoop and update \p
124 /// CmovInstGroups accordingly.
126 /// \param Blocks List of blocks to process.
127 /// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
128 /// \returns true iff it found any CMOV-group-candidate.
129 bool collectCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
130 CmovGroups &CmovInstGroups,
131 bool IncludeLoads = false);
133 /// Check if it is profitable to transform each CMOV-group-candidates into
134 /// branch. Remove all groups that are not profitable from \p CmovInstGroups.
136 /// \param Blocks List of blocks to process.
137 /// \param CmovInstGroups List of consecutive CMOV instructions in CurrLoop.
138 /// \returns true iff any CMOV-group-candidate remain.
139 bool checkForProfitableCmovCandidates(ArrayRef<MachineBasicBlock *> Blocks,
140 CmovGroups &CmovInstGroups);
142 /// Convert the given list of consecutive CMOV instructions into a branch.
144 /// \param Group Consecutive CMOV instructions to be converted into branch.
145 void convertCmovInstsToBranches(SmallVectorImpl<MachineInstr *> &Group) const;
148 } // end anonymous namespace
150 char X86CmovConverterPass::ID = 0;
152 void X86CmovConverterPass::getAnalysisUsage(AnalysisUsage &AU) const {
153 MachineFunctionPass::getAnalysisUsage(AU);
154 AU.addRequired<MachineLoopInfo>();
157 bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
158 if (skipFunction(MF.getFunction()))
159 return false;
160 if (!EnableCmovConverter)
161 return false;
163 LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
164 << "**********\n");
166 bool Changed = false;
167 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
168 const TargetSubtargetInfo &STI = MF.getSubtarget();
169 MRI = &MF.getRegInfo();
170 TII = STI.getInstrInfo();
171 TRI = STI.getRegisterInfo();
172 TSchedModel.init(&STI);
174 // Before we handle the more subtle cases of register-register CMOVs inside
175 // of potentially hot loops, we want to quickly remove all CMOVs with
176 // a memory operand. The CMOV will risk a stall waiting for the load to
177 // complete that speculative execution behind a branch is better suited to
178 // handle on modern x86 chips.
179 if (ForceMemOperand) {
180 CmovGroups AllCmovGroups;
181 SmallVector<MachineBasicBlock *, 4> Blocks;
182 for (auto &MBB : MF)
183 Blocks.push_back(&MBB);
184 if (collectCmovCandidates(Blocks, AllCmovGroups, /*IncludeLoads*/ true)) {
185 for (auto &Group : AllCmovGroups) {
186 // Skip any group that doesn't do at least one memory operand cmov.
187 if (!llvm::any_of(Group, [&](MachineInstr *I) { return I->mayLoad(); }))
188 continue;
190 // For CMOV groups which we can rewrite and which contain a memory load,
191 // always rewrite them. On x86, a CMOV will dramatically amplify any
192 // memory latency by blocking speculative execution.
193 Changed = true;
194 convertCmovInstsToBranches(Group);
199 //===--------------------------------------------------------------------===//
200 // Register-operand Conversion Algorithm
201 // ---------
202 // For each inner most loop
203 // collectCmovCandidates() {
204 // Find all CMOV-group-candidates.
205 // }
207 // checkForProfitableCmovCandidates() {
208 // * Calculate both loop-depth and optimized-loop-depth.
209 // * Use these depth to check for loop transformation profitability.
210 // * Check for CMOV-group-candidate transformation profitability.
211 // }
213 // For each profitable CMOV-group-candidate
214 // convertCmovInstsToBranches() {
215 // * Create FalseBB, SinkBB, Conditional branch to SinkBB.
216 // * Replace each CMOV instruction with a PHI instruction in SinkBB.
217 // }
219 // Note: For more details, see each function description.
220 //===--------------------------------------------------------------------===//
222 // Build up the loops in pre-order.
223 SmallVector<MachineLoop *, 4> Loops(MLI.begin(), MLI.end());
224 // Note that we need to check size on each iteration as we accumulate child
225 // loops.
226 for (int i = 0; i < (int)Loops.size(); ++i)
227 for (MachineLoop *Child : Loops[i]->getSubLoops())
228 Loops.push_back(Child);
230 for (MachineLoop *CurrLoop : Loops) {
231 // Optimize only inner most loops.
232 if (!CurrLoop->getSubLoops().empty())
233 continue;
235 // List of consecutive CMOV instructions to be processed.
236 CmovGroups CmovInstGroups;
238 if (!collectCmovCandidates(CurrLoop->getBlocks(), CmovInstGroups))
239 continue;
241 if (!checkForProfitableCmovCandidates(CurrLoop->getBlocks(),
242 CmovInstGroups))
243 continue;
245 Changed = true;
246 for (auto &Group : CmovInstGroups)
247 convertCmovInstsToBranches(Group);
250 return Changed;
253 bool X86CmovConverterPass::collectCmovCandidates(
254 ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups,
255 bool IncludeLoads) {
256 //===--------------------------------------------------------------------===//
257 // Collect all CMOV-group-candidates and add them into CmovInstGroups.
259 // CMOV-group:
260 // CMOV instructions, in same MBB, that uses same EFLAGS def instruction.
262 // CMOV-group-candidate:
263 // CMOV-group where all the CMOV instructions are
264 // 1. consecutive.
265 // 2. have same condition code or opposite one.
266 // 3. have only operand registers (X86::CMOVrr).
267 //===--------------------------------------------------------------------===//
268 // List of possible improvement (TODO's):
269 // --------------------------------------
270 // TODO: Add support for X86::CMOVrm instructions.
271 // TODO: Add support for X86::SETcc instructions.
272 // TODO: Add support for CMOV-groups with non consecutive CMOV instructions.
273 //===--------------------------------------------------------------------===//
275 // Current processed CMOV-Group.
276 CmovGroup Group;
277 for (auto *MBB : Blocks) {
278 Group.clear();
279 // Condition code of first CMOV instruction current processed range and its
280 // opposite condition code.
281 X86::CondCode FirstCC = X86::COND_INVALID, FirstOppCC = X86::COND_INVALID,
282 MemOpCC = X86::COND_INVALID;
283 // Indicator of a non CMOVrr instruction in the current processed range.
284 bool FoundNonCMOVInst = false;
285 // Indicator for current processed CMOV-group if it should be skipped.
286 bool SkipGroup = false;
288 for (auto &I : *MBB) {
289 // Skip debug instructions.
290 if (I.isDebugInstr())
291 continue;
292 X86::CondCode CC = X86::getCondFromCMov(I);
293 // Check if we found a X86::CMOVrr instruction.
294 if (CC != X86::COND_INVALID && (IncludeLoads || !I.mayLoad())) {
295 if (Group.empty()) {
296 // We found first CMOV in the range, reset flags.
297 FirstCC = CC;
298 FirstOppCC = X86::GetOppositeBranchCondition(CC);
299 // Clear out the prior group's memory operand CC.
300 MemOpCC = X86::COND_INVALID;
301 FoundNonCMOVInst = false;
302 SkipGroup = false;
304 Group.push_back(&I);
305 // Check if it is a non-consecutive CMOV instruction or it has different
306 // condition code than FirstCC or FirstOppCC.
307 if (FoundNonCMOVInst || (CC != FirstCC && CC != FirstOppCC))
308 // Mark the SKipGroup indicator to skip current processed CMOV-Group.
309 SkipGroup = true;
310 if (I.mayLoad()) {
311 if (MemOpCC == X86::COND_INVALID)
312 // The first memory operand CMOV.
313 MemOpCC = CC;
314 else if (CC != MemOpCC)
315 // Can't handle mixed conditions with memory operands.
316 SkipGroup = true;
318 // Check if we were relying on zero-extending behavior of the CMOV.
319 if (!SkipGroup &&
320 llvm::any_of(
321 MRI->use_nodbg_instructions(I.defs().begin()->getReg()),
322 [&](MachineInstr &UseI) {
323 return UseI.getOpcode() == X86::SUBREG_TO_REG;
325 // FIXME: We should model the cost of using an explicit MOV to handle
326 // the zero-extension rather than just refusing to handle this.
327 SkipGroup = true;
328 continue;
330 // If Group is empty, keep looking for first CMOV in the range.
331 if (Group.empty())
332 continue;
334 // We found a non X86::CMOVrr instruction.
335 FoundNonCMOVInst = true;
336 // Check if this instruction define EFLAGS, to determine end of processed
337 // range, as there would be no more instructions using current EFLAGS def.
338 if (I.definesRegister(X86::EFLAGS)) {
339 // Check if current processed CMOV-group should not be skipped and add
340 // it as a CMOV-group-candidate.
341 if (!SkipGroup)
342 CmovInstGroups.push_back(Group);
343 else
344 ++NumOfSkippedCmovGroups;
345 Group.clear();
348 // End of basic block is considered end of range, check if current processed
349 // CMOV-group should not be skipped and add it as a CMOV-group-candidate.
350 if (Group.empty())
351 continue;
352 if (!SkipGroup)
353 CmovInstGroups.push_back(Group);
354 else
355 ++NumOfSkippedCmovGroups;
358 NumOfCmovGroupCandidate += CmovInstGroups.size();
359 return !CmovInstGroups.empty();
362 /// \returns Depth of CMOV instruction as if it was converted into branch.
363 /// \param TrueOpDepth depth cost of CMOV true value operand.
364 /// \param FalseOpDepth depth cost of CMOV false value operand.
365 static unsigned getDepthOfOptCmov(unsigned TrueOpDepth, unsigned FalseOpDepth) {
366 //===--------------------------------------------------------------------===//
367 // With no info about branch weight, we assume 50% for each value operand.
368 // Thus, depth of optimized CMOV instruction is the rounded up average of
369 // its True-Operand-Value-Depth and False-Operand-Value-Depth.
370 //===--------------------------------------------------------------------===//
371 return (TrueOpDepth + FalseOpDepth + 1) / 2;
374 bool X86CmovConverterPass::checkForProfitableCmovCandidates(
375 ArrayRef<MachineBasicBlock *> Blocks, CmovGroups &CmovInstGroups) {
376 struct DepthInfo {
377 /// Depth of original loop.
378 unsigned Depth;
379 /// Depth of optimized loop.
380 unsigned OptDepth;
382 /// Number of loop iterations to calculate depth for ?!
383 static const unsigned LoopIterations = 2;
384 DenseMap<MachineInstr *, DepthInfo> DepthMap;
385 DepthInfo LoopDepth[LoopIterations] = {{0, 0}, {0, 0}};
386 enum { PhyRegType = 0, VirRegType = 1, RegTypeNum = 2 };
387 /// For each register type maps the register to its last def instruction.
388 DenseMap<unsigned, MachineInstr *> RegDefMaps[RegTypeNum];
389 /// Maps register operand to its def instruction, which can be nullptr if it
390 /// is unknown (e.g., operand is defined outside the loop).
391 DenseMap<MachineOperand *, MachineInstr *> OperandToDefMap;
393 // Set depth of unknown instruction (i.e., nullptr) to zero.
394 DepthMap[nullptr] = {0, 0};
396 SmallPtrSet<MachineInstr *, 4> CmovInstructions;
397 for (auto &Group : CmovInstGroups)
398 CmovInstructions.insert(Group.begin(), Group.end());
400 //===--------------------------------------------------------------------===//
401 // Step 1: Calculate instruction depth and loop depth.
402 // Optimized-Loop:
403 // loop with CMOV-group-candidates converted into branches.
405 // Instruction-Depth:
406 // instruction latency + max operand depth.
407 // * For CMOV instruction in optimized loop the depth is calculated as:
408 // CMOV latency + getDepthOfOptCmov(True-Op-Depth, False-Op-depth)
409 // TODO: Find a better way to estimate the latency of the branch instruction
410 // rather than using the CMOV latency.
412 // Loop-Depth:
413 // max instruction depth of all instructions in the loop.
414 // Note: instruction with max depth represents the critical-path in the loop.
416 // Loop-Depth[i]:
417 // Loop-Depth calculated for first `i` iterations.
418 // Note: it is enough to calculate depth for up to two iterations.
420 // Depth-Diff[i]:
421 // Number of cycles saved in first 'i` iterations by optimizing the loop.
422 //===--------------------------------------------------------------------===//
423 for (unsigned I = 0; I < LoopIterations; ++I) {
424 DepthInfo &MaxDepth = LoopDepth[I];
425 for (auto *MBB : Blocks) {
426 // Clear physical registers Def map.
427 RegDefMaps[PhyRegType].clear();
428 for (MachineInstr &MI : *MBB) {
429 // Skip debug instructions.
430 if (MI.isDebugInstr())
431 continue;
432 unsigned MIDepth = 0;
433 unsigned MIDepthOpt = 0;
434 bool IsCMOV = CmovInstructions.count(&MI);
435 for (auto &MO : MI.uses()) {
436 // Checks for "isUse()" as "uses()" returns also implicit definitions.
437 if (!MO.isReg() || !MO.isUse())
438 continue;
439 Register Reg = MO.getReg();
440 auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)];
441 if (MachineInstr *DefMI = RDM.lookup(Reg)) {
442 OperandToDefMap[&MO] = DefMI;
443 DepthInfo Info = DepthMap.lookup(DefMI);
444 MIDepth = std::max(MIDepth, Info.Depth);
445 if (!IsCMOV)
446 MIDepthOpt = std::max(MIDepthOpt, Info.OptDepth);
450 if (IsCMOV)
451 MIDepthOpt = getDepthOfOptCmov(
452 DepthMap[OperandToDefMap.lookup(&MI.getOperand(1))].OptDepth,
453 DepthMap[OperandToDefMap.lookup(&MI.getOperand(2))].OptDepth);
455 // Iterates over all operands to handle implicit definitions as well.
456 for (auto &MO : MI.operands()) {
457 if (!MO.isReg() || !MO.isDef())
458 continue;
459 Register Reg = MO.getReg();
460 RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI;
463 unsigned Latency = TSchedModel.computeInstrLatency(&MI);
464 DepthMap[&MI] = {MIDepth += Latency, MIDepthOpt += Latency};
465 MaxDepth.Depth = std::max(MaxDepth.Depth, MIDepth);
466 MaxDepth.OptDepth = std::max(MaxDepth.OptDepth, MIDepthOpt);
471 unsigned Diff[LoopIterations] = {LoopDepth[0].Depth - LoopDepth[0].OptDepth,
472 LoopDepth[1].Depth - LoopDepth[1].OptDepth};
474 //===--------------------------------------------------------------------===//
475 // Step 2: Check if Loop worth to be optimized.
476 // Worth-Optimize-Loop:
477 // case 1: Diff[1] == Diff[0]
478 // Critical-path is iteration independent - there is no dependency
479 // of critical-path instructions on critical-path instructions of
480 // previous iteration.
481 // Thus, it is enough to check gain percent of 1st iteration -
482 // To be conservative, the optimized loop need to have a depth of
483 // 12.5% cycles less than original loop, per iteration.
485 // case 2: Diff[1] > Diff[0]
486 // Critical-path is iteration dependent - there is dependency of
487 // critical-path instructions on critical-path instructions of
488 // previous iteration.
489 // Thus, check the gain percent of the 2nd iteration (similar to the
490 // previous case), but it is also required to check the gradient of
491 // the gain - the change in Depth-Diff compared to the change in
492 // Loop-Depth between 1st and 2nd iterations.
493 // To be conservative, the gradient need to be at least 50%.
495 // In addition, In order not to optimize loops with very small gain, the
496 // gain (in cycles) after 2nd iteration should not be less than a given
497 // threshold. Thus, the check (Diff[1] >= GainCycleThreshold) must apply.
499 // If loop is not worth optimizing, remove all CMOV-group-candidates.
500 //===--------------------------------------------------------------------===//
501 if (Diff[1] < GainCycleThreshold)
502 return false;
504 bool WorthOptLoop = false;
505 if (Diff[1] == Diff[0])
506 WorthOptLoop = Diff[0] * 8 >= LoopDepth[0].Depth;
507 else if (Diff[1] > Diff[0])
508 WorthOptLoop =
509 (Diff[1] - Diff[0]) * 2 >= (LoopDepth[1].Depth - LoopDepth[0].Depth) &&
510 (Diff[1] * 8 >= LoopDepth[1].Depth);
512 if (!WorthOptLoop)
513 return false;
515 ++NumOfLoopCandidate;
517 //===--------------------------------------------------------------------===//
518 // Step 3: Check for each CMOV-group-candidate if it worth to be optimized.
519 // Worth-Optimize-Group:
520 // Iff it worths to optimize all CMOV instructions in the group.
522 // Worth-Optimize-CMOV:
523 // Predicted branch is faster than CMOV by the difference between depth of
524 // condition operand and depth of taken (predicted) value operand.
525 // To be conservative, the gain of such CMOV transformation should cover at
526 // at least 25% of branch-misprediction-penalty.
527 //===--------------------------------------------------------------------===//
528 unsigned MispredictPenalty = TSchedModel.getMCSchedModel()->MispredictPenalty;
529 CmovGroups TempGroups;
530 std::swap(TempGroups, CmovInstGroups);
531 for (auto &Group : TempGroups) {
532 bool WorthOpGroup = true;
533 for (auto *MI : Group) {
534 // Avoid CMOV instruction which value is used as a pointer to load from.
535 // This is another conservative check to avoid converting CMOV instruction
536 // used with tree-search like algorithm, where the branch is unpredicted.
537 auto UIs = MRI->use_instructions(MI->defs().begin()->getReg());
538 if (UIs.begin() != UIs.end() && ++UIs.begin() == UIs.end()) {
539 unsigned Op = UIs.begin()->getOpcode();
540 if (Op == X86::MOV64rm || Op == X86::MOV32rm) {
541 WorthOpGroup = false;
542 break;
546 unsigned CondCost =
547 DepthMap[OperandToDefMap.lookup(&MI->getOperand(4))].Depth;
548 unsigned ValCost = getDepthOfOptCmov(
549 DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
550 DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
551 if (ValCost > CondCost || (CondCost - ValCost) * 4 < MispredictPenalty) {
552 WorthOpGroup = false;
553 break;
557 if (WorthOpGroup)
558 CmovInstGroups.push_back(Group);
561 return !CmovInstGroups.empty();
564 static bool checkEFLAGSLive(MachineInstr *MI) {
565 if (MI->killsRegister(X86::EFLAGS))
566 return false;
568 // The EFLAGS operand of MI might be missing a kill marker.
569 // Figure out whether EFLAGS operand should LIVE after MI instruction.
570 MachineBasicBlock *BB = MI->getParent();
571 MachineBasicBlock::iterator ItrMI = MI;
573 // Scan forward through BB for a use/def of EFLAGS.
574 for (auto I = std::next(ItrMI), E = BB->end(); I != E; ++I) {
575 if (I->readsRegister(X86::EFLAGS))
576 return true;
577 if (I->definesRegister(X86::EFLAGS))
578 return false;
581 // We hit the end of the block, check whether EFLAGS is live into a successor.
582 for (auto I = BB->succ_begin(), E = BB->succ_end(); I != E; ++I) {
583 if ((*I)->isLiveIn(X86::EFLAGS))
584 return true;
587 return false;
590 /// Given /p First CMOV instruction and /p Last CMOV instruction representing a
591 /// group of CMOV instructions, which may contain debug instructions in between,
592 /// move all debug instructions to after the last CMOV instruction, making the
593 /// CMOV group consecutive.
594 static void packCmovGroup(MachineInstr *First, MachineInstr *Last) {
595 assert(X86::getCondFromCMov(*Last) != X86::COND_INVALID &&
596 "Last instruction in a CMOV group must be a CMOV instruction");
598 SmallVector<MachineInstr *, 2> DBGInstructions;
599 for (auto I = First->getIterator(), E = Last->getIterator(); I != E; I++) {
600 if (I->isDebugInstr())
601 DBGInstructions.push_back(&*I);
604 // Splice the debug instruction after the cmov group.
605 MachineBasicBlock *MBB = First->getParent();
606 for (auto *MI : DBGInstructions)
607 MBB->insertAfter(Last, MI->removeFromParent());
610 void X86CmovConverterPass::convertCmovInstsToBranches(
611 SmallVectorImpl<MachineInstr *> &Group) const {
612 assert(!Group.empty() && "No CMOV instructions to convert");
613 ++NumOfOptimizedCmovGroups;
615 // If the CMOV group is not packed, e.g., there are debug instructions between
616 // first CMOV and last CMOV, then pack the group and make the CMOV instruction
617 // consecutive by moving the debug instructions to after the last CMOV.
618 packCmovGroup(Group.front(), Group.back());
620 // To convert a CMOVcc instruction, we actually have to insert the diamond
621 // control-flow pattern. The incoming instruction knows the destination vreg
622 // to set, the condition code register to branch on, the true/false values to
623 // select between, and a branch opcode to use.
625 // Before
626 // -----
627 // MBB:
628 // cond = cmp ...
629 // v1 = CMOVge t1, f1, cond
630 // v2 = CMOVlt t2, f2, cond
631 // v3 = CMOVge v1, f3, cond
633 // After
634 // -----
635 // MBB:
636 // cond = cmp ...
637 // jge %SinkMBB
639 // FalseMBB:
640 // jmp %SinkMBB
642 // SinkMBB:
643 // %v1 = phi[%f1, %FalseMBB], [%t1, %MBB]
644 // %v2 = phi[%t2, %FalseMBB], [%f2, %MBB] ; For CMOV with OppCC switch
645 // ; true-value with false-value
646 // %v3 = phi[%f3, %FalseMBB], [%t1, %MBB] ; Phi instruction cannot use
647 // ; previous Phi instruction result
649 MachineInstr &MI = *Group.front();
650 MachineInstr *LastCMOV = Group.back();
651 DebugLoc DL = MI.getDebugLoc();
653 X86::CondCode CC = X86::CondCode(X86::getCondFromCMov(MI));
654 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
655 // Potentially swap the condition codes so that any memory operand to a CMOV
656 // is in the *false* position instead of the *true* position. We can invert
657 // any non-memory operand CMOV instructions to cope with this and we ensure
658 // memory operand CMOVs are only included with a single condition code.
659 if (llvm::any_of(Group, [&](MachineInstr *I) {
660 return I->mayLoad() && X86::getCondFromCMov(*I) == CC;
662 std::swap(CC, OppCC);
664 MachineBasicBlock *MBB = MI.getParent();
665 MachineFunction::iterator It = ++MBB->getIterator();
666 MachineFunction *F = MBB->getParent();
667 const BasicBlock *BB = MBB->getBasicBlock();
669 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(BB);
670 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
671 F->insert(It, FalseMBB);
672 F->insert(It, SinkMBB);
674 // If the EFLAGS register isn't dead in the terminator, then claim that it's
675 // live into the sink and copy blocks.
676 if (checkEFLAGSLive(LastCMOV)) {
677 FalseMBB->addLiveIn(X86::EFLAGS);
678 SinkMBB->addLiveIn(X86::EFLAGS);
681 // Transfer the remainder of BB and its successor edges to SinkMBB.
682 SinkMBB->splice(SinkMBB->begin(), MBB,
683 std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
684 SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
686 // Add the false and sink blocks as its successors.
687 MBB->addSuccessor(FalseMBB);
688 MBB->addSuccessor(SinkMBB);
690 // Create the conditional branch instruction.
691 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
693 // Add the sink block to the false block successors.
694 FalseMBB->addSuccessor(SinkMBB);
696 MachineInstrBuilder MIB;
697 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
698 MachineBasicBlock::iterator MIItEnd =
699 std::next(MachineBasicBlock::iterator(LastCMOV));
700 MachineBasicBlock::iterator FalseInsertionPoint = FalseMBB->begin();
701 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
703 // First we need to insert an explicit load on the false path for any memory
704 // operand. We also need to potentially do register rewriting here, but it is
705 // simpler as the memory operands are always on the false path so we can
706 // simply take that input, whatever it is.
707 DenseMap<unsigned, unsigned> FalseBBRegRewriteTable;
708 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;) {
709 auto &MI = *MIIt++;
710 // Skip any CMOVs in this group which don't load from memory.
711 if (!MI.mayLoad()) {
712 // Remember the false-side register input.
713 Register FalseReg =
714 MI.getOperand(X86::getCondFromCMov(MI) == CC ? 1 : 2).getReg();
715 // Walk back through any intermediate cmovs referenced.
716 while (true) {
717 auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
718 if (FRIt == FalseBBRegRewriteTable.end())
719 break;
720 FalseReg = FRIt->second;
722 FalseBBRegRewriteTable[MI.getOperand(0).getReg()] = FalseReg;
723 continue;
726 // The condition must be the *opposite* of the one we've decided to branch
727 // on as the branch will go *around* the load and the load should happen
728 // when the CMOV condition is false.
729 assert(X86::getCondFromCMov(MI) == OppCC &&
730 "Can only handle memory-operand cmov instructions with a condition "
731 "opposite to the selected branch direction.");
733 // The goal is to rewrite the cmov from:
735 // MBB:
736 // %A = CMOVcc %B (tied), (mem)
738 // to
740 // MBB:
741 // %A = CMOVcc %B (tied), %C
742 // FalseMBB:
743 // %C = MOV (mem)
745 // Which will allow the next loop to rewrite the CMOV in terms of a PHI:
747 // MBB:
748 // JMP!cc SinkMBB
749 // FalseMBB:
750 // %C = MOV (mem)
751 // SinkMBB:
752 // %A = PHI [ %C, FalseMBB ], [ %B, MBB]
754 // Get a fresh register to use as the destination of the MOV.
755 const TargetRegisterClass *RC = MRI->getRegClass(MI.getOperand(0).getReg());
756 Register TmpReg = MRI->createVirtualRegister(RC);
758 SmallVector<MachineInstr *, 4> NewMIs;
759 bool Unfolded = TII->unfoldMemoryOperand(*MBB->getParent(), MI, TmpReg,
760 /*UnfoldLoad*/ true,
761 /*UnfoldStore*/ false, NewMIs);
762 (void)Unfolded;
763 assert(Unfolded && "Should never fail to unfold a loading cmov!");
765 // Move the new CMOV to just before the old one and reset any impacted
766 // iterator.
767 auto *NewCMOV = NewMIs.pop_back_val();
768 assert(X86::getCondFromCMov(*NewCMOV) == OppCC &&
769 "Last new instruction isn't the expected CMOV!");
770 LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
771 MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
772 if (&*MIItBegin == &MI)
773 MIItBegin = MachineBasicBlock::iterator(NewCMOV);
775 // Sink whatever instructions were needed to produce the unfolded operand
776 // into the false block.
777 for (auto *NewMI : NewMIs) {
778 LLVM_DEBUG(dbgs() << "\tRewritten load instr: "; NewMI->dump());
779 FalseMBB->insert(FalseInsertionPoint, NewMI);
780 // Re-map any operands that are from other cmovs to the inputs for this block.
781 for (auto &MOp : NewMI->uses()) {
782 if (!MOp.isReg())
783 continue;
784 auto It = FalseBBRegRewriteTable.find(MOp.getReg());
785 if (It == FalseBBRegRewriteTable.end())
786 continue;
788 MOp.setReg(It->second);
789 // This might have been a kill when it referenced the cmov result, but
790 // it won't necessarily be once rewritten.
791 // FIXME: We could potentially improve this by tracking whether the
792 // operand to the cmov was also a kill, and then skipping the PHI node
793 // construction below.
794 MOp.setIsKill(false);
797 MBB->erase(MachineBasicBlock::iterator(MI),
798 std::next(MachineBasicBlock::iterator(MI)));
800 // Add this PHI to the rewrite table.
801 FalseBBRegRewriteTable[NewCMOV->getOperand(0).getReg()] = TmpReg;
804 // As we are creating the PHIs, we have to be careful if there is more than
805 // one. Later CMOVs may reference the results of earlier CMOVs, but later
806 // PHIs have to reference the individual true/false inputs from earlier PHIs.
807 // That also means that PHI construction must work forward from earlier to
808 // later, and that the code must maintain a mapping from earlier PHI's
809 // destination registers, and the registers that went into the PHI.
810 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
812 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
813 Register DestReg = MIIt->getOperand(0).getReg();
814 Register Op1Reg = MIIt->getOperand(1).getReg();
815 Register Op2Reg = MIIt->getOperand(2).getReg();
817 // If this CMOV we are processing is the opposite condition from the jump we
818 // generated, then we have to swap the operands for the PHI that is going to
819 // be generated.
820 if (X86::getCondFromCMov(*MIIt) == OppCC)
821 std::swap(Op1Reg, Op2Reg);
823 auto Op1Itr = RegRewriteTable.find(Op1Reg);
824 if (Op1Itr != RegRewriteTable.end())
825 Op1Reg = Op1Itr->second.first;
827 auto Op2Itr = RegRewriteTable.find(Op2Reg);
828 if (Op2Itr != RegRewriteTable.end())
829 Op2Reg = Op2Itr->second.second;
831 // SinkMBB:
832 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, MBB ]
833 // ...
834 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
835 .addReg(Op1Reg)
836 .addMBB(FalseMBB)
837 .addReg(Op2Reg)
838 .addMBB(MBB);
839 (void)MIB;
840 LLVM_DEBUG(dbgs() << "\tFrom: "; MIIt->dump());
841 LLVM_DEBUG(dbgs() << "\tTo: "; MIB->dump());
843 // Add this PHI to the rewrite table.
844 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
847 // Now remove the CMOV(s).
848 MBB->erase(MIItBegin, MIItEnd);
851 INITIALIZE_PASS_BEGIN(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
852 false, false)
853 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
854 INITIALIZE_PASS_END(X86CmovConverterPass, DEBUG_TYPE, "X86 cmov Conversion",
855 false, false)
857 FunctionPass *llvm::createX86CmovConverterPass() {
858 return new X86CmovConverterPass();