1 //=- AArch64ConditionOptimizer.cpp - Remove useless comparisons for AArch64 -=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass tries to make consecutive compares of values use same operands to
10 // allow CSE pass to remove duplicated instructions. For this it analyzes
11 // branches and adjusts comparisons with immediate values by converting:
16 // and adjusting immediate values appropriately. It basically corrects two
17 // immediate values towards each other to make them equal.
19 // Consider the following example in C:
21 // if ((a < 5 && ...) || (a > 5 && ...)) {
26 // Here both "x" and "y" expressions compare "a" with "5". When "x" evaluates
27 // to "false", "y" can just check flags set by the first comparison. As a
28 // result of the canonicalization employed by
29 // SelectionDAGBuilder::visitSwitchCase, DAGCombine, and other target-specific
30 // code, assembly ends up in the form that is not CSE friendly:
41 // Same assembly after the pass:
48 // cmp w8, #5 // <-- CSE pass removes this instruction
52 // Currently only SUBS and ADDS followed by b.?? are supported.
54 // TODO: maybe handle TBNZ/TBZ the same way as CMP when used instead for "a < 0"
55 // TODO: handle other conditional instructions (e.g. CSET)
56 // TODO: allow second branching to be anything if it doesn't require adjusting
58 //===----------------------------------------------------------------------===//
61 #include "MCTargetDesc/AArch64AddressingModes.h"
62 #include "Utils/AArch64BaseInfo.h"
63 #include "llvm/ADT/ArrayRef.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/SmallVector.h"
66 #include "llvm/ADT/Statistic.h"
67 #include "llvm/CodeGen/MachineBasicBlock.h"
68 #include "llvm/CodeGen/MachineDominators.h"
69 #include "llvm/CodeGen/MachineFunction.h"
70 #include "llvm/CodeGen/MachineFunctionPass.h"
71 #include "llvm/CodeGen/MachineInstr.h"
72 #include "llvm/CodeGen/MachineInstrBuilder.h"
73 #include "llvm/CodeGen/MachineOperand.h"
74 #include "llvm/CodeGen/MachineRegisterInfo.h"
75 #include "llvm/CodeGen/TargetInstrInfo.h"
76 #include "llvm/CodeGen/TargetSubtargetInfo.h"
77 #include "llvm/InitializePasses.h"
78 #include "llvm/Pass.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/raw_ostream.h"
88 #define DEBUG_TYPE "aarch64-condopt"
90 STATISTIC(NumConditionsAdjusted
, "Number of conditions adjusted");
94 class AArch64ConditionOptimizer
: public MachineFunctionPass
{
95 const TargetInstrInfo
*TII
;
96 MachineDominatorTree
*DomTree
;
97 const MachineRegisterInfo
*MRI
;
100 // Stores immediate, compare instruction opcode and branch condition (in this
101 // order) of adjusted comparison.
102 using CmpInfo
= std::tuple
<int, unsigned, AArch64CC::CondCode
>;
106 AArch64ConditionOptimizer() : MachineFunctionPass(ID
) {
107 initializeAArch64ConditionOptimizerPass(*PassRegistry::getPassRegistry());
110 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
111 MachineInstr
*findSuitableCompare(MachineBasicBlock
*MBB
);
112 CmpInfo
adjustCmp(MachineInstr
*CmpMI
, AArch64CC::CondCode Cmp
);
113 void modifyCmp(MachineInstr
*CmpMI
, const CmpInfo
&Info
);
114 bool adjustTo(MachineInstr
*CmpMI
, AArch64CC::CondCode Cmp
, MachineInstr
*To
,
116 bool runOnMachineFunction(MachineFunction
&MF
) override
;
118 StringRef
getPassName() const override
{
119 return "AArch64 Condition Optimizer";
123 } // end anonymous namespace
125 char AArch64ConditionOptimizer::ID
= 0;
127 INITIALIZE_PASS_BEGIN(AArch64ConditionOptimizer
, "aarch64-condopt",
128 "AArch64 CondOpt Pass", false, false)
129 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree
)
130 INITIALIZE_PASS_END(AArch64ConditionOptimizer
, "aarch64-condopt",
131 "AArch64 CondOpt Pass", false, false)
133 FunctionPass
*llvm::createAArch64ConditionOptimizerPass() {
134 return new AArch64ConditionOptimizer();
137 void AArch64ConditionOptimizer::getAnalysisUsage(AnalysisUsage
&AU
) const {
138 AU
.addRequired
<MachineDominatorTree
>();
139 AU
.addPreserved
<MachineDominatorTree
>();
140 MachineFunctionPass::getAnalysisUsage(AU
);
143 // Finds compare instruction that corresponds to supported types of branching.
144 // Returns the instruction or nullptr on failures or detecting unsupported
146 MachineInstr
*AArch64ConditionOptimizer::findSuitableCompare(
147 MachineBasicBlock
*MBB
) {
148 MachineBasicBlock::iterator Term
= MBB
->getFirstTerminator();
149 if (Term
== MBB
->end())
152 if (Term
->getOpcode() != AArch64::Bcc
)
155 // Since we may modify cmp of this MBB, make sure NZCV does not live out.
156 for (auto SuccBB
: MBB
->successors())
157 if (SuccBB
->isLiveIn(AArch64::NZCV
))
160 // Now find the instruction controlling the terminator.
161 for (MachineBasicBlock::iterator B
= MBB
->begin(), It
= Term
; It
!= B
;) {
162 It
= prev_nodbg(It
, B
);
163 MachineInstr
&I
= *It
;
164 assert(!I
.isTerminator() && "Spurious terminator");
165 // Check if there is any use of NZCV between CMP and Bcc.
166 if (I
.readsRegister(AArch64::NZCV
))
168 switch (I
.getOpcode()) {
169 // cmp is an alias for subs with a dead destination register.
170 case AArch64::SUBSWri
:
171 case AArch64::SUBSXri
:
172 // cmn is an alias for adds with a dead destination register.
173 case AArch64::ADDSWri
:
174 case AArch64::ADDSXri
: {
175 unsigned ShiftAmt
= AArch64_AM::getShiftValue(I
.getOperand(3).getImm());
176 if (!I
.getOperand(2).isImm()) {
177 LLVM_DEBUG(dbgs() << "Immediate of cmp is symbolic, " << I
<< '\n');
179 } else if (I
.getOperand(2).getImm() << ShiftAmt
>= 0xfff) {
180 LLVM_DEBUG(dbgs() << "Immediate of cmp may be out of range, " << I
183 } else if (!MRI
->use_nodbg_empty(I
.getOperand(0).getReg())) {
184 LLVM_DEBUG(dbgs() << "Destination of cmp is not dead, " << I
<< '\n');
189 // Prevent false positive case like:
195 case AArch64::FCMPDri
:
196 case AArch64::FCMPSri
:
197 case AArch64::FCMPESri
:
198 case AArch64::FCMPEDri
:
200 case AArch64::SUBSWrr
:
201 case AArch64::SUBSXrr
:
202 case AArch64::ADDSWrr
:
203 case AArch64::ADDSXrr
:
204 case AArch64::FCMPSrr
:
205 case AArch64::FCMPDrr
:
206 case AArch64::FCMPESrr
:
207 case AArch64::FCMPEDrr
:
208 // Skip comparison instructions without immediate operands.
212 LLVM_DEBUG(dbgs() << "Flags not defined in " << printMBBReference(*MBB
)
217 // Changes opcode adds <-> subs considering register operand width.
218 static int getComplementOpc(int Opc
) {
220 case AArch64::ADDSWri
: return AArch64::SUBSWri
;
221 case AArch64::ADDSXri
: return AArch64::SUBSXri
;
222 case AArch64::SUBSWri
: return AArch64::ADDSWri
;
223 case AArch64::SUBSXri
: return AArch64::ADDSXri
;
225 llvm_unreachable("Unexpected opcode");
229 // Changes form of comparison inclusive <-> exclusive.
230 static AArch64CC::CondCode
getAdjustedCmp(AArch64CC::CondCode Cmp
) {
232 case AArch64CC::GT
: return AArch64CC::GE
;
233 case AArch64CC::GE
: return AArch64CC::GT
;
234 case AArch64CC::LT
: return AArch64CC::LE
;
235 case AArch64CC::LE
: return AArch64CC::LT
;
237 llvm_unreachable("Unexpected condition code");
241 // Transforms GT -> GE, GE -> GT, LT -> LE, LE -> LT by updating comparison
242 // operator and condition code.
243 AArch64ConditionOptimizer::CmpInfo
AArch64ConditionOptimizer::adjustCmp(
244 MachineInstr
*CmpMI
, AArch64CC::CondCode Cmp
) {
245 unsigned Opc
= CmpMI
->getOpcode();
247 // CMN (compare with negative immediate) is an alias to ADDS (as
248 // "operand - negative" == "operand + positive")
249 bool Negative
= (Opc
== AArch64::ADDSWri
|| Opc
== AArch64::ADDSXri
);
251 int Correction
= (Cmp
== AArch64CC::GT
) ? 1 : -1;
252 // Negate Correction value for comparison with negative immediate (CMN).
254 Correction
= -Correction
;
257 const int OldImm
= (int)CmpMI
->getOperand(2).getImm();
258 const int NewImm
= std::abs(OldImm
+ Correction
);
260 // Handle +0 -> -1 and -0 -> +1 (CMN with 0 immediate) transitions by
261 // adjusting compare instruction opcode.
262 if (OldImm
== 0 && ((Negative
&& Correction
== 1) ||
263 (!Negative
&& Correction
== -1))) {
264 Opc
= getComplementOpc(Opc
);
267 return CmpInfo(NewImm
, Opc
, getAdjustedCmp(Cmp
));
270 // Applies changes to comparison instruction suggested by adjustCmp().
271 void AArch64ConditionOptimizer::modifyCmp(MachineInstr
*CmpMI
,
272 const CmpInfo
&Info
) {
275 AArch64CC::CondCode Cmp
;
276 std::tie(Imm
, Opc
, Cmp
) = Info
;
278 MachineBasicBlock
*const MBB
= CmpMI
->getParent();
280 // Change immediate in comparison instruction (ADDS or SUBS).
281 BuildMI(*MBB
, CmpMI
, CmpMI
->getDebugLoc(), TII
->get(Opc
))
282 .add(CmpMI
->getOperand(0))
283 .add(CmpMI
->getOperand(1))
285 .add(CmpMI
->getOperand(3));
286 CmpMI
->eraseFromParent();
288 // The fact that this comparison was picked ensures that it's related to the
289 // first terminator instruction.
290 MachineInstr
&BrMI
= *MBB
->getFirstTerminator();
292 // Change condition in branch instruction.
293 BuildMI(*MBB
, BrMI
, BrMI
.getDebugLoc(), TII
->get(AArch64::Bcc
))
295 .add(BrMI
.getOperand(1));
296 BrMI
.eraseFromParent();
298 ++NumConditionsAdjusted
;
301 // Parse a condition code returned by analyzeBranch, and compute the CondCode
302 // corresponding to TBB.
303 // Returns true if parsing was successful, otherwise false is returned.
304 static bool parseCond(ArrayRef
<MachineOperand
> Cond
, AArch64CC::CondCode
&CC
) {
305 // A normal br.cond simply has the condition code.
306 if (Cond
[0].getImm() != -1) {
307 assert(Cond
.size() == 1 && "Unknown Cond array format");
308 CC
= (AArch64CC::CondCode
)(int)Cond
[0].getImm();
314 // Adjusts one cmp instruction to another one if result of adjustment will allow
315 // CSE. Returns true if compare instruction was changed, otherwise false is
317 bool AArch64ConditionOptimizer::adjustTo(MachineInstr
*CmpMI
,
318 AArch64CC::CondCode Cmp
, MachineInstr
*To
, int ToImm
)
320 CmpInfo Info
= adjustCmp(CmpMI
, Cmp
);
321 if (std::get
<0>(Info
) == ToImm
&& std::get
<1>(Info
) == To
->getOpcode()) {
322 modifyCmp(CmpMI
, Info
);
328 bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction
&MF
) {
329 LLVM_DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
330 << "********** Function: " << MF
.getName() << '\n');
331 if (skipFunction(MF
.getFunction()))
334 TII
= MF
.getSubtarget().getInstrInfo();
335 DomTree
= &getAnalysis
<MachineDominatorTree
>();
336 MRI
= &MF
.getRegInfo();
338 bool Changed
= false;
340 // Visit blocks in dominator tree pre-order. The pre-order enables multiple
341 // cmp-conversions from the same head block.
342 // Note that updateDomTree() modifies the children of the DomTree node
343 // currently being visited. The df_iterator supports that; it doesn't look at
344 // child_begin() / child_end() until after a node has been visited.
345 for (MachineDomTreeNode
*I
: depth_first(DomTree
)) {
346 MachineBasicBlock
*HBB
= I
->getBlock();
348 SmallVector
<MachineOperand
, 4> HeadCond
;
349 MachineBasicBlock
*TBB
= nullptr, *FBB
= nullptr;
350 if (TII
->analyzeBranch(*HBB
, TBB
, FBB
, HeadCond
)) {
354 // Equivalence check is to skip loops.
355 if (!TBB
|| TBB
== HBB
) {
359 SmallVector
<MachineOperand
, 4> TrueCond
;
360 MachineBasicBlock
*TBB_TBB
= nullptr, *TBB_FBB
= nullptr;
361 if (TII
->analyzeBranch(*TBB
, TBB_TBB
, TBB_FBB
, TrueCond
)) {
365 MachineInstr
*HeadCmpMI
= findSuitableCompare(HBB
);
370 MachineInstr
*TrueCmpMI
= findSuitableCompare(TBB
);
375 AArch64CC::CondCode HeadCmp
;
376 if (HeadCond
.empty() || !parseCond(HeadCond
, HeadCmp
)) {
380 AArch64CC::CondCode TrueCmp
;
381 if (TrueCond
.empty() || !parseCond(TrueCond
, TrueCmp
)) {
385 const int HeadImm
= (int)HeadCmpMI
->getOperand(2).getImm();
386 const int TrueImm
= (int)TrueCmpMI
->getOperand(2).getImm();
388 LLVM_DEBUG(dbgs() << "Head branch:\n");
389 LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(HeadCmp
)
391 LLVM_DEBUG(dbgs() << "\timmediate: " << HeadImm
<< '\n');
393 LLVM_DEBUG(dbgs() << "True branch:\n");
394 LLVM_DEBUG(dbgs() << "\tcondition: " << AArch64CC::getCondCodeName(TrueCmp
)
396 LLVM_DEBUG(dbgs() << "\timmediate: " << TrueImm
<< '\n');
398 if (((HeadCmp
== AArch64CC::GT
&& TrueCmp
== AArch64CC::LT
) ||
399 (HeadCmp
== AArch64CC::LT
&& TrueCmp
== AArch64CC::GT
)) &&
400 std::abs(TrueImm
- HeadImm
) == 2) {
401 // This branch transforms machine instructions that correspond to
403 // 1) (a > {TrueImm} && ...) || (a < {HeadImm} && ...)
404 // 2) (a < {TrueImm} && ...) || (a > {HeadImm} && ...)
408 // 1) (a >= {NewImm} && ...) || (a <= {NewImm} && ...)
409 // 2) (a <= {NewImm} && ...) || (a >= {NewImm} && ...)
411 CmpInfo HeadCmpInfo
= adjustCmp(HeadCmpMI
, HeadCmp
);
412 CmpInfo TrueCmpInfo
= adjustCmp(TrueCmpMI
, TrueCmp
);
413 if (std::get
<0>(HeadCmpInfo
) == std::get
<0>(TrueCmpInfo
) &&
414 std::get
<1>(HeadCmpInfo
) == std::get
<1>(TrueCmpInfo
)) {
415 modifyCmp(HeadCmpMI
, HeadCmpInfo
);
416 modifyCmp(TrueCmpMI
, TrueCmpInfo
);
419 } else if (((HeadCmp
== AArch64CC::GT
&& TrueCmp
== AArch64CC::GT
) ||
420 (HeadCmp
== AArch64CC::LT
&& TrueCmp
== AArch64CC::LT
)) &&
421 std::abs(TrueImm
- HeadImm
) == 1) {
422 // This branch transforms machine instructions that correspond to
424 // 1) (a > {TrueImm} && ...) || (a > {HeadImm} && ...)
425 // 2) (a < {TrueImm} && ...) || (a < {HeadImm} && ...)
429 // 1) (a <= {NewImm} && ...) || (a > {NewImm} && ...)
430 // 2) (a < {NewImm} && ...) || (a >= {NewImm} && ...)
432 // GT -> GE transformation increases immediate value, so picking the
433 // smaller one; LT -> LE decreases immediate value so invert the choice.
434 bool adjustHeadCond
= (HeadImm
< TrueImm
);
435 if (HeadCmp
== AArch64CC::LT
) {
436 adjustHeadCond
= !adjustHeadCond
;
439 if (adjustHeadCond
) {
440 Changed
|= adjustTo(HeadCmpMI
, HeadCmp
, TrueCmpMI
, TrueImm
);
442 Changed
|= adjustTo(TrueCmpMI
, TrueCmp
, HeadCmpMI
, HeadImm
);
445 // Other transformation cases almost never occur due to generation of < or >
446 // comparisons instead of <= and >=.