1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineRegisterInfo.h"
15 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #define DEBUG_TYPE "gi-combiner"
21 CombinerHelper::CombinerHelper(GISelChangeObserver
&Observer
,
23 : Builder(B
), MRI(Builder
.getMF().getRegInfo()), Observer(Observer
) {}
25 void CombinerHelper::replaceRegWith(MachineRegisterInfo
&MRI
, Register FromReg
,
26 Register ToReg
) const {
27 Observer
.changingAllUsesOfReg(MRI
, FromReg
);
29 if (MRI
.constrainRegAttrs(ToReg
, FromReg
))
30 MRI
.replaceRegWith(FromReg
, ToReg
);
32 Builder
.buildCopy(ToReg
, FromReg
);
34 Observer
.finishedChangingAllUsesOfReg();
37 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo
&MRI
,
38 MachineOperand
&FromRegOp
,
39 Register ToReg
) const {
40 assert(FromRegOp
.getParent() && "Expected an operand in an MI");
41 Observer
.changingInstr(*FromRegOp
.getParent());
43 FromRegOp
.setReg(ToReg
);
45 Observer
.changedInstr(*FromRegOp
.getParent());
48 bool CombinerHelper::tryCombineCopy(MachineInstr
&MI
) {
49 if (matchCombineCopy(MI
)) {
55 bool CombinerHelper::matchCombineCopy(MachineInstr
&MI
) {
56 if (MI
.getOpcode() != TargetOpcode::COPY
)
58 unsigned DstReg
= MI
.getOperand(0).getReg();
59 unsigned SrcReg
= MI
.getOperand(1).getReg();
60 LLT DstTy
= MRI
.getType(DstReg
);
61 LLT SrcTy
= MRI
.getType(SrcReg
);
62 // Simple Copy Propagation.
63 // a(sx) = COPY b(sx) -> Replace all uses of a with b.
64 if (DstTy
.isValid() && SrcTy
.isValid() && DstTy
== SrcTy
)
68 void CombinerHelper::applyCombineCopy(MachineInstr
&MI
) {
69 unsigned DstReg
= MI
.getOperand(0).getReg();
70 unsigned SrcReg
= MI
.getOperand(1).getReg();
72 replaceRegWith(MRI
, DstReg
, SrcReg
);
77 /// Select a preference between two uses. CurrentUse is the current preference
78 /// while *ForCandidate is attributes of the candidate under consideration.
79 PreferredTuple
ChoosePreferredUse(PreferredTuple
&CurrentUse
,
80 const LLT
&TyForCandidate
,
81 unsigned OpcodeForCandidate
,
82 MachineInstr
*MIForCandidate
) {
83 if (!CurrentUse
.Ty
.isValid()) {
84 if (CurrentUse
.ExtendOpcode
== OpcodeForCandidate
||
85 CurrentUse
.ExtendOpcode
== TargetOpcode::G_ANYEXT
)
86 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
90 // We permit the extend to hoist through basic blocks but this is only
91 // sensible if the target has extending loads. If you end up lowering back
92 // into a load and extend during the legalizer then the end result is
93 // hoisting the extend up to the load.
95 // Prefer defined extensions to undefined extensions as these are more
96 // likely to reduce the number of instructions.
97 if (OpcodeForCandidate
== TargetOpcode::G_ANYEXT
&&
98 CurrentUse
.ExtendOpcode
!= TargetOpcode::G_ANYEXT
)
100 else if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_ANYEXT
&&
101 OpcodeForCandidate
!= TargetOpcode::G_ANYEXT
)
102 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
104 // Prefer sign extensions to zero extensions as sign-extensions tend to be
106 if (CurrentUse
.Ty
== TyForCandidate
) {
107 if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_SEXT
&&
108 OpcodeForCandidate
== TargetOpcode::G_ZEXT
)
110 else if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_ZEXT
&&
111 OpcodeForCandidate
== TargetOpcode::G_SEXT
)
112 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
115 // This is potentially target specific. We've chosen the largest type
116 // because G_TRUNC is usually free. One potential catch with this is that
117 // some targets have a reduced number of larger registers than smaller
118 // registers and this choice potentially increases the live-range for the
120 if (TyForCandidate
.getSizeInBits() > CurrentUse
.Ty
.getSizeInBits()) {
121 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
126 /// Find a suitable place to insert some instructions and insert them. This
127 /// function accounts for special cases like inserting before a PHI node.
128 /// The current strategy for inserting before PHI's is to duplicate the
129 /// instructions for each predecessor. However, while that's ok for G_TRUNC
130 /// on most targets since it generally requires no code, other targets/cases may
131 /// want to try harder to find a dominating block.
132 static void InsertInsnsWithoutSideEffectsBeforeUse(
133 MachineIRBuilder
&Builder
, MachineInstr
&DefMI
, MachineOperand
&UseMO
,
134 std::function
<void(MachineBasicBlock
*, MachineBasicBlock::iterator
,
135 MachineOperand
&UseMO
)>
137 MachineInstr
&UseMI
= *UseMO
.getParent();
139 MachineBasicBlock
*InsertBB
= UseMI
.getParent();
141 // If the use is a PHI then we want the predecessor block instead.
143 MachineOperand
*PredBB
= std::next(&UseMO
);
144 InsertBB
= PredBB
->getMBB();
147 // If the block is the same block as the def then we want to insert just after
148 // the def instead of at the start of the block.
149 if (InsertBB
== DefMI
.getParent()) {
150 MachineBasicBlock::iterator InsertPt
= &DefMI
;
151 Inserter(InsertBB
, std::next(InsertPt
), UseMO
);
155 // Otherwise we want the start of the BB
156 Inserter(InsertBB
, InsertBB
->getFirstNonPHI(), UseMO
);
158 } // end anonymous namespace
160 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr
&MI
) {
161 PreferredTuple Preferred
;
162 if (matchCombineExtendingLoads(MI
, Preferred
)) {
163 applyCombineExtendingLoads(MI
, Preferred
);
169 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr
&MI
,
170 PreferredTuple
&Preferred
) {
171 // We match the loads and follow the uses to the extend instead of matching
172 // the extends and following the def to the load. This is because the load
173 // must remain in the same position for correctness (unless we also add code
174 // to find a safe place to sink it) whereas the extend is freely movable.
175 // It also prevents us from duplicating the load for the volatile case or just
178 if (MI
.getOpcode() != TargetOpcode::G_LOAD
&&
179 MI
.getOpcode() != TargetOpcode::G_SEXTLOAD
&&
180 MI
.getOpcode() != TargetOpcode::G_ZEXTLOAD
)
183 auto &LoadValue
= MI
.getOperand(0);
184 assert(LoadValue
.isReg() && "Result wasn't a register?");
186 LLT LoadValueTy
= MRI
.getType(LoadValue
.getReg());
187 if (!LoadValueTy
.isScalar())
190 // Most architectures are going to legalize <s8 loads into at least a 1 byte
191 // load, and the MMOs can only describe memory accesses in multiples of bytes.
192 // If we try to perform extload combining on those, we can end up with
193 // %a(s8) = extload %ptr (load 1 byte from %ptr)
194 // ... which is an illegal extload instruction.
195 if (LoadValueTy
.getSizeInBits() < 8)
198 // For non power-of-2 types, they will very likely be legalized into multiple
199 // loads. Don't bother trying to match them into extending loads.
200 if (!isPowerOf2_32(LoadValueTy
.getSizeInBits()))
203 // Find the preferred type aside from the any-extends (unless it's the only
204 // one) and non-extending ops. We'll emit an extending load to that type and
205 // and emit a variant of (extend (trunc X)) for the others according to the
206 // relative type sizes. At the same time, pick an extend to use based on the
207 // extend involved in the chosen type.
208 unsigned PreferredOpcode
= MI
.getOpcode() == TargetOpcode::G_LOAD
209 ? TargetOpcode::G_ANYEXT
210 : MI
.getOpcode() == TargetOpcode::G_SEXTLOAD
211 ? TargetOpcode::G_SEXT
212 : TargetOpcode::G_ZEXT
;
213 Preferred
= {LLT(), PreferredOpcode
, nullptr};
214 for (auto &UseMI
: MRI
.use_instructions(LoadValue
.getReg())) {
215 if (UseMI
.getOpcode() == TargetOpcode::G_SEXT
||
216 UseMI
.getOpcode() == TargetOpcode::G_ZEXT
||
217 UseMI
.getOpcode() == TargetOpcode::G_ANYEXT
) {
218 Preferred
= ChoosePreferredUse(Preferred
,
219 MRI
.getType(UseMI
.getOperand(0).getReg()),
220 UseMI
.getOpcode(), &UseMI
);
224 // There were no extends
227 // It should be impossible to chose an extend without selecting a different
228 // type since by definition the result of an extend is larger.
229 assert(Preferred
.Ty
!= LoadValueTy
&& "Extending to same type?");
231 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred
.MI
);
235 void CombinerHelper::applyCombineExtendingLoads(MachineInstr
&MI
,
236 PreferredTuple
&Preferred
) {
237 // Rewrite the load to the chosen extending load.
238 Register ChosenDstReg
= Preferred
.MI
->getOperand(0).getReg();
240 // Inserter to insert a truncate back to the original type at a given point
241 // with some basic CSE to limit truncate duplication to one per BB.
242 DenseMap
<MachineBasicBlock
*, MachineInstr
*> EmittedInsns
;
243 auto InsertTruncAt
= [&](MachineBasicBlock
*InsertIntoBB
,
244 MachineBasicBlock::iterator InsertBefore
,
245 MachineOperand
&UseMO
) {
246 MachineInstr
*PreviouslyEmitted
= EmittedInsns
.lookup(InsertIntoBB
);
247 if (PreviouslyEmitted
) {
248 Observer
.changingInstr(*UseMO
.getParent());
249 UseMO
.setReg(PreviouslyEmitted
->getOperand(0).getReg());
250 Observer
.changedInstr(*UseMO
.getParent());
254 Builder
.setInsertPt(*InsertIntoBB
, InsertBefore
);
255 Register NewDstReg
= MRI
.cloneVirtualRegister(MI
.getOperand(0).getReg());
256 MachineInstr
*NewMI
= Builder
.buildTrunc(NewDstReg
, ChosenDstReg
);
257 EmittedInsns
[InsertIntoBB
] = NewMI
;
258 replaceRegOpWith(MRI
, UseMO
, NewDstReg
);
261 Observer
.changingInstr(MI
);
263 Builder
.getTII().get(Preferred
.ExtendOpcode
== TargetOpcode::G_SEXT
264 ? TargetOpcode::G_SEXTLOAD
265 : Preferred
.ExtendOpcode
== TargetOpcode::G_ZEXT
266 ? TargetOpcode::G_ZEXTLOAD
267 : TargetOpcode::G_LOAD
));
269 // Rewrite all the uses to fix up the types.
270 auto &LoadValue
= MI
.getOperand(0);
271 SmallVector
<MachineOperand
*, 4> Uses
;
272 for (auto &UseMO
: MRI
.use_operands(LoadValue
.getReg()))
273 Uses
.push_back(&UseMO
);
275 for (auto *UseMO
: Uses
) {
276 MachineInstr
*UseMI
= UseMO
->getParent();
278 // If the extend is compatible with the preferred extend then we should fix
279 // up the type and extend so that it uses the preferred use.
280 if (UseMI
->getOpcode() == Preferred
.ExtendOpcode
||
281 UseMI
->getOpcode() == TargetOpcode::G_ANYEXT
) {
282 unsigned UseDstReg
= UseMI
->getOperand(0).getReg();
283 MachineOperand
&UseSrcMO
= UseMI
->getOperand(1);
284 const LLT
&UseDstTy
= MRI
.getType(UseDstReg
);
285 if (UseDstReg
!= ChosenDstReg
) {
286 if (Preferred
.Ty
== UseDstTy
) {
287 // If the use has the same type as the preferred use, then merge
288 // the vregs and erase the extend. For example:
289 // %1:_(s8) = G_LOAD ...
290 // %2:_(s32) = G_SEXT %1(s8)
291 // %3:_(s32) = G_ANYEXT %1(s8)
294 // %2:_(s32) = G_SEXTLOAD ...
296 replaceRegWith(MRI
, UseDstReg
, ChosenDstReg
);
297 Observer
.erasingInstr(*UseMO
->getParent());
298 UseMO
->getParent()->eraseFromParent();
299 } else if (Preferred
.Ty
.getSizeInBits() < UseDstTy
.getSizeInBits()) {
300 // If the preferred size is smaller, then keep the extend but extend
301 // from the result of the extending load. For example:
302 // %1:_(s8) = G_LOAD ...
303 // %2:_(s32) = G_SEXT %1(s8)
304 // %3:_(s64) = G_ANYEXT %1(s8)
307 // %2:_(s32) = G_SEXTLOAD ...
308 // %3:_(s64) = G_ANYEXT %2:_(s32)
310 replaceRegOpWith(MRI
, UseSrcMO
, ChosenDstReg
);
312 // If the preferred size is large, then insert a truncate. For
314 // %1:_(s8) = G_LOAD ...
315 // %2:_(s64) = G_SEXT %1(s8)
316 // %3:_(s32) = G_ZEXT %1(s8)
319 // %2:_(s64) = G_SEXTLOAD ...
320 // %4:_(s8) = G_TRUNC %2:_(s32)
321 // %3:_(s64) = G_ZEXT %2:_(s8)
323 InsertInsnsWithoutSideEffectsBeforeUse(Builder
, MI
, *UseMO
,
328 // The use is (one of) the uses of the preferred use we chose earlier.
329 // We're going to update the load to def this value later so just erase
331 Observer
.erasingInstr(*UseMO
->getParent());
332 UseMO
->getParent()->eraseFromParent();
336 // The use isn't an extend. Truncate back to the type we originally loaded.
337 // This is free on many targets.
338 InsertInsnsWithoutSideEffectsBeforeUse(Builder
, MI
, *UseMO
, InsertTruncAt
);
341 MI
.getOperand(0).setReg(ChosenDstReg
);
342 Observer
.changedInstr(MI
);
345 bool CombinerHelper::matchCombineBr(MachineInstr
&MI
) {
346 assert(MI
.getOpcode() == TargetOpcode::G_BR
&& "Expected a G_BR");
347 // Try to match the following:
349 // %c(s32) = G_ICMP pred, %a, %b
350 // %c1(s1) = G_TRUNC %c(s32)
351 // G_BRCOND %c1, %bb2
357 // The above pattern does not have a fall through to the successor bb2, always
358 // resulting in a branch no matter which path is taken. Here we try to find
359 // and replace that pattern with conditional branch to bb3 and otherwise
360 // fallthrough to bb2.
362 MachineBasicBlock
*MBB
= MI
.getParent();
363 MachineBasicBlock::iterator
BrIt(MI
);
364 if (BrIt
== MBB
->begin())
366 assert(std::next(BrIt
) == MBB
->end() && "expected G_BR to be a terminator");
368 MachineInstr
*BrCond
= &*std::prev(BrIt
);
369 if (BrCond
->getOpcode() != TargetOpcode::G_BRCOND
)
372 // Check that the next block is the conditional branch target.
373 if (!MBB
->isLayoutSuccessor(BrCond
->getOperand(1).getMBB()))
376 MachineInstr
*CmpMI
= MRI
.getVRegDef(BrCond
->getOperand(0).getReg());
377 if (!CmpMI
|| CmpMI
->getOpcode() != TargetOpcode::G_ICMP
||
378 !MRI
.hasOneUse(CmpMI
->getOperand(0).getReg()))
383 bool CombinerHelper::tryCombineBr(MachineInstr
&MI
) {
384 if (!matchCombineBr(MI
))
386 MachineBasicBlock
*BrTarget
= MI
.getOperand(0).getMBB();
387 MachineBasicBlock::iterator
BrIt(MI
);
388 MachineInstr
*BrCond
= &*std::prev(BrIt
);
389 MachineInstr
*CmpMI
= MRI
.getVRegDef(BrCond
->getOperand(0).getReg());
391 CmpInst::Predicate InversePred
= CmpInst::getInversePredicate(
392 (CmpInst::Predicate
)CmpMI
->getOperand(1).getPredicate());
394 // Invert the G_ICMP condition.
395 Observer
.changingInstr(*CmpMI
);
396 CmpMI
->getOperand(1).setPredicate(InversePred
);
397 Observer
.changedInstr(*CmpMI
);
399 // Change the conditional branch target.
400 Observer
.changingInstr(*BrCond
);
401 BrCond
->getOperand(1).setMBB(BrTarget
);
402 Observer
.changedInstr(*BrCond
);
403 MI
.eraseFromParent();
407 bool CombinerHelper::tryCombine(MachineInstr
&MI
) {
408 if (tryCombineCopy(MI
))
410 return tryCombineExtendingLoads(MI
);