[PowerPC] Collect some CallLowering arguments into a struct. [NFC]
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64ConditionOptimizer.cpp
blob894be8b9a9d2c1efb322bb5aa43652d0fb831527
1 //=- AArch64ConditionOptimizer.cpp - Remove useless comparisons for AArch64 -=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to make consecutive compares of values use same operands to
10 // allow CSE pass to remove duplicated instructions. For this it analyzes
11 // branches and adjusts comparisons with immediate values by converting:
12 // * GE -> GT
13 // * GT -> GE
14 // * LT -> LE
15 // * LE -> LT
16 // and adjusting immediate values appropriately. It basically corrects two
17 // immediate values towards each other to make them equal.
19 // Consider the following example in C:
21 // if ((a < 5 && ...) || (a > 5 && ...)) {
22 // ~~~~~ ~~~~~
23 // ^ ^
24 // x y
26 // Here both "x" and "y" expressions compare "a" with "5". When "x" evaluates
27 // to "false", "y" can just check flags set by the first comparison. As a
28 // result of the canonicalization employed by
29 // SelectionDAGBuilder::visitSwitchCase, DAGCombine, and other target-specific
30 // code, assembly ends up in the form that is not CSE friendly:
32 // ...
33 // cmp w8, #4
34 // b.gt .LBB0_3
35 // ...
36 // .LBB0_3:
37 // cmp w8, #6
38 // b.lt .LBB0_6
39 // ...
41 // Same assembly after the pass:
43 // ...
44 // cmp w8, #5
45 // b.ge .LBB0_3
46 // ...
47 // .LBB0_3:
48 // cmp w8, #5 // <-- CSE pass removes this instruction
49 // b.le .LBB0_6
50 // ...
52 // Currently only SUBS and ADDS followed by b.?? are supported.
54 // TODO: maybe handle TBNZ/TBZ the same way as CMP when used instead for "a < 0"
55 // TODO: handle other conditional instructions (e.g. CSET)
56 // TODO: allow second branching to be anything if it doesn't require adjusting
58 //===----------------------------------------------------------------------===//
60 #include "AArch64.h"
61 #include "MCTargetDesc/AArch64AddressingModes.h"
62 #include "Utils/AArch64BaseInfo.h"
63 #include "llvm/ADT/ArrayRef.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/SmallVector.h"
66 #include "llvm/ADT/Statistic.h"
67 #include "llvm/CodeGen/MachineBasicBlock.h"
68 #include "llvm/CodeGen/MachineDominators.h"
69 #include "llvm/CodeGen/MachineFunction.h"
70 #include "llvm/CodeGen/MachineFunctionPass.h"
71 #include "llvm/CodeGen/MachineInstr.h"
72 #include "llvm/CodeGen/MachineInstrBuilder.h"
73 #include "llvm/CodeGen/MachineOperand.h"
74 #include "llvm/CodeGen/MachineRegisterInfo.h"
75 #include "llvm/CodeGen/TargetInstrInfo.h"
76 #include "llvm/CodeGen/TargetSubtargetInfo.h"
77 #include "llvm/InitializePasses.h"
78 #include "llvm/Pass.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include <cassert>
83 #include <cstdlib>
84 #include <tuple>
86 using namespace llvm;
88 #define DEBUG_TYPE "aarch64-condopt"
90 STATISTIC(NumConditionsAdjusted, "Number of conditions adjusted");
92 namespace {
94 class AArch64ConditionOptimizer : public MachineFunctionPass {
95 const TargetInstrInfo *TII;
96 MachineDominatorTree *DomTree;
97 const MachineRegisterInfo *MRI;
99 public:
100 // Stores immediate, compare instruction opcode and branch condition (in this
101 // order) of adjusted comparison.
102 using CmpInfo = std::tuple<int, unsigned, AArch64CC::CondCode>;
104 static char ID;
106 AArch64ConditionOptimizer() : MachineFunctionPass(ID) {
107 initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry());
110 void getAnalysisUsage(AnalysisUsage &AU) const override;
111 MachineInstr *findSuitableCompare(MachineBasicBlock *MBB);
112 CmpInfo adjustCmp(MachineInstr *CmpMI, AArch64CC::CondCode Cmp);
113 void modifyCmp(MachineInstr *CmpMI, const CmpInfo &Info);
114 bool adjustTo(MachineInstr *CmpMI, AArch64CC::CondCode Cmp, MachineInstr *To,
115 int ToImm);
116 bool runOnMachineFunction(MachineFunction &MF) override;
118 StringRef getPassName() const override {
119 return "AArch64 Condition Optimizer";
123 } // end anonymous namespace
125 char AArch64ConditionOptimizer::ID = 0;
127 INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer, "aarch64-condopt",
128 "AArch64 CondOpt Pass", false, false)
129 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
130 INITIALIZE_PASS_END(AArch64ConditionOptimizer, "aarch64-condopt",
131 "AArch64 CondOpt Pass", false, false)
133 FunctionPass *llvm::createAArch64ConditionOptimizerPass() {
134 return new AArch64ConditionOptimizer();
137 void AArch64ConditionOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
138 AU.addRequired<MachineDominatorTree>();
139 AU.addPreserved<MachineDominatorTree>();
140 MachineFunctionPass::getAnalysisUsage(AU);
143 // Finds compare instruction that corresponds to supported types of branching.
144 // Returns the instruction or nullptr on failures or detecting unsupported
145 // instructions.
146 MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
147 MachineBasicBlock *MBB) {
148 MachineBasicBlock::iterator I = MBB->getFirstTerminator();
149 if (I == MBB->end())
150 return nullptr;
152 if (I->getOpcode() != AArch64::Bcc)
153 return nullptr;
155 // Since we may modify cmp of this MBB, make sure NZCV does not live out.
156 for (auto SuccBB : MBB->successors())
157 if (SuccBB->isLiveIn(AArch64::NZCV))
158 return nullptr;
160 // Now find the instruction controlling the terminator.
161 for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
162 --I;
163 assert(!I->isTerminator() && "Spurious terminator");
164 // Check if there is any use of NZCV between CMP and Bcc.
165 if (I->readsRegister(AArch64::NZCV))
166 return nullptr;
167 switch (I->getOpcode()) {
168 // cmp is an alias for subs with a dead destination register.
169 case AArch64::SUBSWri:
170 case AArch64::SUBSXri:
171 // cmn is an alias for adds with a dead destination register.
172 case AArch64::ADDSWri:
173 case AArch64::ADDSXri: {
174 unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
175 if (!I->getOperand(2).isImm()) {
176 LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
177 return nullptr;
178 } else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
179 LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I
180 << '\n');
181 return nullptr;
182 } else if (!MRI->use_empty(I->getOperand(0).getReg())) {
183 LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
184 return nullptr;
186 return &*I;
188 // Prevent false positive case like:
189 // cmp w19, #0
190 // cinc w0, w19, gt
191 // ...
192 // fcmp d8, #0.0
193 // b.gt .LBB0_5
194 case AArch64::FCMPDri:
195 case AArch64::FCMPSri:
196 case AArch64::FCMPESri:
197 case AArch64::FCMPEDri:
199 case AArch64::SUBSWrr:
200 case AArch64::SUBSXrr:
201 case AArch64::ADDSWrr:
202 case AArch64::ADDSXrr:
203 case AArch64::FCMPSrr:
204 case AArch64::FCMPDrr:
205 case AArch64::FCMPESrr:
206 case AArch64::FCMPEDrr:
207 // Skip comparison instructions without immediate operands.
208 return nullptr;
211 LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB)
212 << '\n');
213 return nullptr;
216 // Changes opcode adds <-> subs considering register operand width.
217 static int getComplementOpc(int Opc) {
218 switch (Opc) {
219 case AArch64::ADDSWri: return AArch64::SUBSWri;
220 case AArch64::ADDSXri: return AArch64::SUBSXri;
221 case AArch64::SUBSWri: return AArch64::ADDSWri;
222 case AArch64::SUBSXri: return AArch64::ADDSXri;
223 default:
224 llvm_unreachable("Unexpected opcode");
228 // Changes form of comparison inclusive <-> exclusive.
229 static AArch64CC::CondCode getAdjustedCmp(AArch64CC::CondCode Cmp) {
230 switch (Cmp) {
231 case AArch64CC::GT: return AArch64CC::GE;
232 case AArch64CC::GE: return AArch64CC::GT;
233 case AArch64CC::LT: return AArch64CC::LE;
234 case AArch64CC::LE: return AArch64CC::LT;
235 default:
236 llvm_unreachable("Unexpected condition code");
240 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
241 // operator and condition code.
242 AArch64ConditionOptimizer::CmpInfo AArch64ConditionOptimizer::adjustCmp(
243 MachineInstr *CmpMI, AArch64CC::CondCode Cmp) {
244 unsigned Opc = CmpMI->getOpcode();
246 // CMN (compare with negative immediate) is an alias to ADDS (as
247 // "operand - negative" == "operand + positive")
248 bool Negative = (Opc == AArch64::ADDSWri || Opc == AArch64::ADDSXri);
250 int Correction = (Cmp == AArch64CC::GT) ? 1 : -1;
251 // Negate Correction value for comparison with negative immediate (CMN).
252 if (Negative) {
253 Correction = -Correction;
256 const int OldImm = (int)CmpMI->getOperand(2).getImm();
257 const int NewImm = std::abs(OldImm + Correction);
259 // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
260 // adjusting compare instruction opcode.
261 if (OldImm == 0 && ((Negative && Correction == 1) ||
262 (!Negative && Correction == -1))) {
263 Opc = getComplementOpc(Opc);
266 return CmpInfo(NewImm, Opc, getAdjustedCmp(Cmp));
269 // Applies changes to comparison instruction suggested by adjustCmp().
270 void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI,
271 const CmpInfo &Info) {
272 int Imm;
273 unsigned Opc;
274 AArch64CC::CondCode Cmp;
275 std::tie(Imm, Opc, Cmp) = Info;
277 MachineBasicBlock *const MBB = CmpMI->getParent();
279 // Change immediate in comparison instruction (ADDS or SUBS).
280 BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc))
281 .add(CmpMI->getOperand(0))
282 .add(CmpMI->getOperand(1))
283 .addImm(Imm)
284 .add(CmpMI->getOperand(3));
285 CmpMI->eraseFromParent();
287 // The fact that this comparison was picked ensures that it's related to the
288 // first terminator instruction.
289 MachineInstr &BrMI = *MBB->getFirstTerminator();
291 // Change condition in branch instruction.
292 BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc))
293 .addImm(Cmp)
294 .add(BrMI.getOperand(1));
295 BrMI.eraseFromParent();
297 MBB->updateTerminator();
299 ++NumConditionsAdjusted;
302 // Parse a condition code returned by analyzeBranch, and compute the CondCode
303 // corresponding to TBB.
304 // Returns true if parsing was successful, otherwise false is returned.
305 static bool parseCond(ArrayRef<MachineOperand> Cond, AArch64CC::CondCode &CC) {
306 // A normal br.cond simply has the condition code.
307 if (Cond[0].getImm() != -1) {
308 assert(Cond.size() == 1 && "Unknown Cond array format");
309 CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
310 return true;
312 return false;
315 // Adjusts one cmp instruction to another one if result of adjustment will allow
316 // CSE. Returns true if compare instruction was changed, otherwise false is
317 // returned.
318 bool AArch64ConditionOptimizer::adjustTo(MachineInstr *CmpMI,
319 AArch64CC::CondCode Cmp, MachineInstr *To, int ToImm)
321 CmpInfo Info = adjustCmp(CmpMI, Cmp);
322 if (std::get<0>(Info) == ToImm && std::get<1>(Info) == To->getOpcode()) {
323 modifyCmp(CmpMI, Info);
324 return true;
326 return false;
329 bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
330 LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
331 << "********** Function: " << MF.getName() << '\n');
332 if (skipFunction(MF.getFunction()))
333 return false;
335 TII = MF.getSubtarget().getInstrInfo();
336 DomTree = &getAnalysis<MachineDominatorTree>();
337 MRI = &MF.getRegInfo();
339 bool Changed = false;
341 // Visit blocks in dominator tree pre-order. The pre-order enables multiple
342 // cmp-conversions from the same head block.
343 // Note that updateDomTree() modifies the children of the DomTree node
344 // currently being visited. The df_iterator supports that; it doesn't look at
345 // child_begin() / child_end() until after a node has been visited.
346 for (MachineDomTreeNode *I : depth_first(DomTree)) {
347 MachineBasicBlock *HBB = I->getBlock();
349 SmallVector<MachineOperand, 4> HeadCond;
350 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
351 if (TII->analyzeBranch(*HBB, TBB, FBB, HeadCond)) {
352 continue;
355 // Equivalence check is to skip loops.
356 if (!TBB || TBB == HBB) {
357 continue;
360 SmallVector<MachineOperand, 4> TrueCond;
361 MachineBasicBlock *TBB_TBB = nullptr, *TBB_FBB = nullptr;
362 if (TII->analyzeBranch(*TBB, TBB_TBB, TBB_FBB, TrueCond)) {
363 continue;
366 MachineInstr *HeadCmpMI = findSuitableCompare(HBB);
367 if (!HeadCmpMI) {
368 continue;
371 MachineInstr *TrueCmpMI = findSuitableCompare(TBB);
372 if (!TrueCmpMI) {
373 continue;
376 AArch64CC::CondCode HeadCmp;
377 if (HeadCond.empty() || !parseCond(HeadCond, HeadCmp)) {
378 continue;
381 AArch64CC::CondCode TrueCmp;
382 if (TrueCond.empty() || !parseCond(TrueCond, TrueCmp)) {
383 continue;
386 const int HeadImm = (int)HeadCmpMI->getOperand(2).getImm();
387 const int TrueImm = (int)TrueCmpMI->getOperand(2).getImm();
389 LLVM_DEBUG(dbgs() << "Head branch:\n");
390 LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp)
391 << '\n');
392 LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm << '\n');
394 LLVM_DEBUG(dbgs() << "True branch:\n");
395 LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp)
396 << '\n');
397 LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm << '\n');
399 if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::LT) ||
400 (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::GT)) &&
401 std::abs(TrueImm - HeadImm) == 2) {
402 // This branch transforms machine instructions that correspond to
404 // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
405 // 2) (a < {TrueImm} && ...) || (a > {HeadImm} && ...)
407 // into
409 // 1) (a >= {NewImm} && ...) || (a <= {NewImm} && ...)
410 // 2) (a <= {NewImm} && ...) || (a >= {NewImm} && ...)
412 CmpInfo HeadCmpInfo = adjustCmp(HeadCmpMI, HeadCmp);
413 CmpInfo TrueCmpInfo = adjustCmp(TrueCmpMI, TrueCmp);
414 if (std::get<0>(HeadCmpInfo) == std::get<0>(TrueCmpInfo) &&
415 std::get<1>(HeadCmpInfo) == std::get<1>(TrueCmpInfo)) {
416 modifyCmp(HeadCmpMI, HeadCmpInfo);
417 modifyCmp(TrueCmpMI, TrueCmpInfo);
418 Changed = true;
420 } else if (((HeadCmp == AArch64CC::GT && TrueCmp == AArch64CC::GT) ||
421 (HeadCmp == AArch64CC::LT && TrueCmp == AArch64CC::LT)) &&
422 std::abs(TrueImm - HeadImm) == 1) {
423 // This branch transforms machine instructions that correspond to
425 // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
426 // 2) (a < {TrueImm} && ...) || (a < {HeadImm} && ...)
428 // into
430 // 1) (a <= {NewImm} && ...) || (a > {NewImm} && ...)
431 // 2) (a < {NewImm} && ...) || (a >= {NewImm} && ...)
433 // GT -> GE transformation increases immediate value, so picking the
434 // smaller one; LT -> LE decreases immediate value so invert the choice.
435 bool adjustHeadCond = (HeadImm < TrueImm);
436 if (HeadCmp == AArch64CC::LT) {
437 adjustHeadCond = !adjustHeadCond;
440 if (adjustHeadCond) {
441 Changed |= adjustTo(HeadCmpMI, HeadCmp, TrueCmpMI, TrueImm);
442 } else {
443 Changed |= adjustTo(TrueCmpMI, TrueCmp, HeadCmpMI, HeadImm);
446 // Other transformation cases almost never occur due to generation of < or >
447 // comparisons instead of <= and >=.
450 return Changed;