1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/ADT/SetVector.h"
10 #include "llvm/ADT/SmallBitVector.h"
11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
12 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
13 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
14 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/GlobalISel/Utils.h"
19 #include "llvm/CodeGen/LowLevelTypeUtils.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterBankInfo.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetOpcodes.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/DivisionByConstantInfo.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Target/TargetMachine.h"
39 #define DEBUG_TYPE "gi-combiner"
42 using namespace MIPatternMatch
;
44 // Option to allow testing of the combiner while no targets know about indexed
47 ForceLegalIndexing("force-legal-indexing", cl::Hidden
, cl::init(false),
48 cl::desc("Force all indexed operations to be "
49 "legal for the GlobalISel combiner"));
51 CombinerHelper::CombinerHelper(GISelChangeObserver
&Observer
,
52 MachineIRBuilder
&B
, bool IsPreLegalize
,
53 GISelKnownBits
*KB
, MachineDominatorTree
*MDT
,
54 const LegalizerInfo
*LI
)
55 : Builder(B
), MRI(Builder
.getMF().getRegInfo()), Observer(Observer
), KB(KB
),
56 MDT(MDT
), IsPreLegalize(IsPreLegalize
), LI(LI
),
57 RBI(Builder
.getMF().getSubtarget().getRegBankInfo()),
58 TRI(Builder
.getMF().getSubtarget().getRegisterInfo()) {
62 const TargetLowering
&CombinerHelper::getTargetLowering() const {
63 return *Builder
.getMF().getSubtarget().getTargetLowering();
66 /// \returns The little endian in-memory byte position of byte \p I in a
67 /// \p ByteWidth bytes wide type.
69 /// E.g. Given a 4-byte type x, x[0] -> byte 0
70 static unsigned littleEndianByteAt(const unsigned ByteWidth
, const unsigned I
) {
71 assert(I
< ByteWidth
&& "I must be in [0, ByteWidth)");
75 /// Determines the LogBase2 value for a non-null input value using the
76 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V).
77 static Register
buildLogBase2(Register V
, MachineIRBuilder
&MIB
) {
78 auto &MRI
= *MIB
.getMRI();
79 LLT Ty
= MRI
.getType(V
);
80 auto Ctlz
= MIB
.buildCTLZ(Ty
, V
);
81 auto Base
= MIB
.buildConstant(Ty
, Ty
.getScalarSizeInBits() - 1);
82 return MIB
.buildSub(Ty
, Base
, Ctlz
).getReg(0);
85 /// \returns The big endian in-memory byte position of byte \p I in a
86 /// \p ByteWidth bytes wide type.
88 /// E.g. Given a 4-byte type x, x[0] -> byte 3
89 static unsigned bigEndianByteAt(const unsigned ByteWidth
, const unsigned I
) {
90 assert(I
< ByteWidth
&& "I must be in [0, ByteWidth)");
91 return ByteWidth
- I
- 1;
94 /// Given a map from byte offsets in memory to indices in a load/store,
95 /// determine if that map corresponds to a little or big endian byte pattern.
97 /// \param MemOffset2Idx maps memory offsets to address offsets.
98 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
100 /// \returns true if the map corresponds to a big endian byte pattern, false if
101 /// it corresponds to a little endian byte pattern, and std::nullopt otherwise.
103 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
106 /// AddrOffset Little endian Big endian
111 static std::optional
<bool>
112 isBigEndian(const SmallDenseMap
<int64_t, int64_t, 8> &MemOffset2Idx
,
114 // Need at least two byte positions to decide on endianness.
115 unsigned Width
= MemOffset2Idx
.size();
118 bool BigEndian
= true, LittleEndian
= true;
119 for (unsigned MemOffset
= 0; MemOffset
< Width
; ++ MemOffset
) {
120 auto MemOffsetAndIdx
= MemOffset2Idx
.find(MemOffset
);
121 if (MemOffsetAndIdx
== MemOffset2Idx
.end())
123 const int64_t Idx
= MemOffsetAndIdx
->second
- LowestIdx
;
124 assert(Idx
>= 0 && "Expected non-negative byte offset?");
125 LittleEndian
&= Idx
== littleEndianByteAt(Width
, MemOffset
);
126 BigEndian
&= Idx
== bigEndianByteAt(Width
, MemOffset
);
127 if (!BigEndian
&& !LittleEndian
)
131 assert((BigEndian
!= LittleEndian
) &&
132 "Pattern cannot be both big and little endian!");
136 bool CombinerHelper::isPreLegalize() const { return IsPreLegalize
; }
138 bool CombinerHelper::isLegal(const LegalityQuery
&Query
) const {
139 assert(LI
&& "Must have LegalizerInfo to query isLegal!");
140 return LI
->getAction(Query
).Action
== LegalizeActions::Legal
;
143 bool CombinerHelper::isLegalOrBeforeLegalizer(
144 const LegalityQuery
&Query
) const {
145 return isPreLegalize() || isLegal(Query
);
148 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty
) const {
150 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT
, {Ty
}});
151 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs.
154 LLT EltTy
= Ty
.getElementType();
155 return isLegal({TargetOpcode::G_BUILD_VECTOR
, {Ty
, EltTy
}}) &&
156 isLegal({TargetOpcode::G_CONSTANT
, {EltTy
}});
159 void CombinerHelper::replaceRegWith(MachineRegisterInfo
&MRI
, Register FromReg
,
160 Register ToReg
) const {
161 Observer
.changingAllUsesOfReg(MRI
, FromReg
);
163 if (MRI
.constrainRegAttrs(ToReg
, FromReg
))
164 MRI
.replaceRegWith(FromReg
, ToReg
);
166 Builder
.buildCopy(ToReg
, FromReg
);
168 Observer
.finishedChangingAllUsesOfReg();
171 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo
&MRI
,
172 MachineOperand
&FromRegOp
,
173 Register ToReg
) const {
174 assert(FromRegOp
.getParent() && "Expected an operand in an MI");
175 Observer
.changingInstr(*FromRegOp
.getParent());
177 FromRegOp
.setReg(ToReg
);
179 Observer
.changedInstr(*FromRegOp
.getParent());
182 void CombinerHelper::replaceOpcodeWith(MachineInstr
&FromMI
,
183 unsigned ToOpcode
) const {
184 Observer
.changingInstr(FromMI
);
186 FromMI
.setDesc(Builder
.getTII().get(ToOpcode
));
188 Observer
.changedInstr(FromMI
);
191 const RegisterBank
*CombinerHelper::getRegBank(Register Reg
) const {
192 return RBI
->getRegBank(Reg
, MRI
, *TRI
);
195 void CombinerHelper::setRegBank(Register Reg
, const RegisterBank
*RegBank
) {
197 MRI
.setRegBank(Reg
, *RegBank
);
200 bool CombinerHelper::tryCombineCopy(MachineInstr
&MI
) {
201 if (matchCombineCopy(MI
)) {
202 applyCombineCopy(MI
);
207 bool CombinerHelper::matchCombineCopy(MachineInstr
&MI
) {
208 if (MI
.getOpcode() != TargetOpcode::COPY
)
210 Register DstReg
= MI
.getOperand(0).getReg();
211 Register SrcReg
= MI
.getOperand(1).getReg();
212 return canReplaceReg(DstReg
, SrcReg
, MRI
);
214 void CombinerHelper::applyCombineCopy(MachineInstr
&MI
) {
215 Register DstReg
= MI
.getOperand(0).getReg();
216 Register SrcReg
= MI
.getOperand(1).getReg();
217 MI
.eraseFromParent();
218 replaceRegWith(MRI
, DstReg
, SrcReg
);
221 bool CombinerHelper::tryCombineConcatVectors(MachineInstr
&MI
) {
222 bool IsUndef
= false;
223 SmallVector
<Register
, 4> Ops
;
224 if (matchCombineConcatVectors(MI
, IsUndef
, Ops
)) {
225 applyCombineConcatVectors(MI
, IsUndef
, Ops
);
231 bool CombinerHelper::matchCombineConcatVectors(MachineInstr
&MI
, bool &IsUndef
,
232 SmallVectorImpl
<Register
> &Ops
) {
233 assert(MI
.getOpcode() == TargetOpcode::G_CONCAT_VECTORS
&&
234 "Invalid instruction");
236 MachineInstr
*Undef
= nullptr;
238 // Walk over all the operands of concat vectors and check if they are
239 // build_vector themselves or undef.
240 // Then collect their operands in Ops.
241 for (const MachineOperand
&MO
: MI
.uses()) {
242 Register Reg
= MO
.getReg();
243 MachineInstr
*Def
= MRI
.getVRegDef(Reg
);
244 assert(Def
&& "Operand not defined");
245 switch (Def
->getOpcode()) {
246 case TargetOpcode::G_BUILD_VECTOR
:
248 // Remember the operands of the build_vector to fold
249 // them into the yet-to-build flattened concat vectors.
250 for (const MachineOperand
&BuildVecMO
: Def
->uses())
251 Ops
.push_back(BuildVecMO
.getReg());
253 case TargetOpcode::G_IMPLICIT_DEF
: {
254 LLT OpType
= MRI
.getType(Reg
);
255 // Keep one undef value for all the undef operands.
257 Builder
.setInsertPt(*MI
.getParent(), MI
);
258 Undef
= Builder
.buildUndef(OpType
.getScalarType());
260 assert(MRI
.getType(Undef
->getOperand(0).getReg()) ==
261 OpType
.getScalarType() &&
262 "All undefs should have the same type");
263 // Break the undef vector in as many scalar elements as needed
264 // for the flattening.
265 for (unsigned EltIdx
= 0, EltEnd
= OpType
.getNumElements();
266 EltIdx
!= EltEnd
; ++EltIdx
)
267 Ops
.push_back(Undef
->getOperand(0).getReg());
276 void CombinerHelper::applyCombineConcatVectors(
277 MachineInstr
&MI
, bool IsUndef
, const ArrayRef
<Register
> Ops
) {
278 // We determined that the concat_vectors can be flatten.
279 // Generate the flattened build_vector.
280 Register DstReg
= MI
.getOperand(0).getReg();
281 Builder
.setInsertPt(*MI
.getParent(), MI
);
282 Register NewDstReg
= MRI
.cloneVirtualRegister(DstReg
);
284 // Note: IsUndef is sort of redundant. We could have determine it by
285 // checking that at all Ops are undef. Alternatively, we could have
286 // generate a build_vector of undefs and rely on another combine to
287 // clean that up. For now, given we already gather this information
288 // in tryCombineConcatVectors, just save compile time and issue the
291 Builder
.buildUndef(NewDstReg
);
293 Builder
.buildBuildVector(NewDstReg
, Ops
);
294 MI
.eraseFromParent();
295 replaceRegWith(MRI
, DstReg
, NewDstReg
);
298 bool CombinerHelper::tryCombineShuffleVector(MachineInstr
&MI
) {
299 SmallVector
<Register
, 4> Ops
;
300 if (matchCombineShuffleVector(MI
, Ops
)) {
301 applyCombineShuffleVector(MI
, Ops
);
307 bool CombinerHelper::matchCombineShuffleVector(MachineInstr
&MI
,
308 SmallVectorImpl
<Register
> &Ops
) {
309 assert(MI
.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&&
310 "Invalid instruction kind");
311 LLT DstType
= MRI
.getType(MI
.getOperand(0).getReg());
312 Register Src1
= MI
.getOperand(1).getReg();
313 LLT SrcType
= MRI
.getType(Src1
);
314 // As bizarre as it may look, shuffle vector can actually produce
315 // scalar! This is because at the IR level a <1 x ty> shuffle
316 // vector is perfectly valid.
317 unsigned DstNumElts
= DstType
.isVector() ? DstType
.getNumElements() : 1;
318 unsigned SrcNumElts
= SrcType
.isVector() ? SrcType
.getNumElements() : 1;
320 // If the resulting vector is smaller than the size of the source
321 // vectors being concatenated, we won't be able to replace the
322 // shuffle vector into a concat_vectors.
324 // Note: We may still be able to produce a concat_vectors fed by
325 // extract_vector_elt and so on. It is less clear that would
326 // be better though, so don't bother for now.
328 // If the destination is a scalar, the size of the sources doesn't
329 // matter. we will lower the shuffle to a plain copy. This will
330 // work only if the source and destination have the same size. But
331 // that's covered by the next condition.
333 // TODO: If the size between the source and destination don't match
334 // we could still emit an extract vector element in that case.
335 if (DstNumElts
< 2 * SrcNumElts
&& DstNumElts
!= 1)
338 // Check that the shuffle mask can be broken evenly between the
339 // different sources.
340 if (DstNumElts
% SrcNumElts
!= 0)
343 // Mask length is a multiple of the source vector length.
344 // Check if the shuffle is some kind of concatenation of the input
346 unsigned NumConcat
= DstNumElts
/ SrcNumElts
;
347 SmallVector
<int, 8> ConcatSrcs(NumConcat
, -1);
348 ArrayRef
<int> Mask
= MI
.getOperand(3).getShuffleMask();
349 for (unsigned i
= 0; i
!= DstNumElts
; ++i
) {
354 // Ensure the indices in each SrcType sized piece are sequential and that
355 // the same source is used for the whole piece.
356 if ((Idx
% SrcNumElts
!= (i
% SrcNumElts
)) ||
357 (ConcatSrcs
[i
/ SrcNumElts
] >= 0 &&
358 ConcatSrcs
[i
/ SrcNumElts
] != (int)(Idx
/ SrcNumElts
)))
360 // Remember which source this index came from.
361 ConcatSrcs
[i
/ SrcNumElts
] = Idx
/ SrcNumElts
;
364 // The shuffle is concatenating multiple vectors together.
365 // Collect the different operands for that.
367 Register Src2
= MI
.getOperand(2).getReg();
368 for (auto Src
: ConcatSrcs
) {
371 Builder
.setInsertPt(*MI
.getParent(), MI
);
372 UndefReg
= Builder
.buildUndef(SrcType
).getReg(0);
374 Ops
.push_back(UndefReg
);
383 void CombinerHelper::applyCombineShuffleVector(MachineInstr
&MI
,
384 const ArrayRef
<Register
> Ops
) {
385 Register DstReg
= MI
.getOperand(0).getReg();
386 Builder
.setInsertPt(*MI
.getParent(), MI
);
387 Register NewDstReg
= MRI
.cloneVirtualRegister(DstReg
);
390 Builder
.buildCopy(NewDstReg
, Ops
[0]);
392 Builder
.buildMergeLikeInstr(NewDstReg
, Ops
);
394 MI
.eraseFromParent();
395 replaceRegWith(MRI
, DstReg
, NewDstReg
);
400 /// Select a preference between two uses. CurrentUse is the current preference
401 /// while *ForCandidate is attributes of the candidate under consideration.
402 PreferredTuple
ChoosePreferredUse(MachineInstr
&LoadMI
,
403 PreferredTuple
&CurrentUse
,
404 const LLT TyForCandidate
,
405 unsigned OpcodeForCandidate
,
406 MachineInstr
*MIForCandidate
) {
407 if (!CurrentUse
.Ty
.isValid()) {
408 if (CurrentUse
.ExtendOpcode
== OpcodeForCandidate
||
409 CurrentUse
.ExtendOpcode
== TargetOpcode::G_ANYEXT
)
410 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
414 // We permit the extend to hoist through basic blocks but this is only
415 // sensible if the target has extending loads. If you end up lowering back
416 // into a load and extend during the legalizer then the end result is
417 // hoisting the extend up to the load.
419 // Prefer defined extensions to undefined extensions as these are more
420 // likely to reduce the number of instructions.
421 if (OpcodeForCandidate
== TargetOpcode::G_ANYEXT
&&
422 CurrentUse
.ExtendOpcode
!= TargetOpcode::G_ANYEXT
)
424 else if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_ANYEXT
&&
425 OpcodeForCandidate
!= TargetOpcode::G_ANYEXT
)
426 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
428 // Prefer sign extensions to zero extensions as sign-extensions tend to be
429 // more expensive. Don't do this if the load is already a zero-extend load
430 // though, otherwise we'll rewrite a zero-extend load into a sign-extend
432 if (!isa
<GZExtLoad
>(LoadMI
) && CurrentUse
.Ty
== TyForCandidate
) {
433 if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_SEXT
&&
434 OpcodeForCandidate
== TargetOpcode::G_ZEXT
)
436 else if (CurrentUse
.ExtendOpcode
== TargetOpcode::G_ZEXT
&&
437 OpcodeForCandidate
== TargetOpcode::G_SEXT
)
438 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
441 // This is potentially target specific. We've chosen the largest type
442 // because G_TRUNC is usually free. One potential catch with this is that
443 // some targets have a reduced number of larger registers than smaller
444 // registers and this choice potentially increases the live-range for the
446 if (TyForCandidate
.getSizeInBits() > CurrentUse
.Ty
.getSizeInBits()) {
447 return {TyForCandidate
, OpcodeForCandidate
, MIForCandidate
};
452 /// Find a suitable place to insert some instructions and insert them. This
453 /// function accounts for special cases like inserting before a PHI node.
454 /// The current strategy for inserting before PHI's is to duplicate the
455 /// instructions for each predecessor. However, while that's ok for G_TRUNC
456 /// on most targets since it generally requires no code, other targets/cases may
457 /// want to try harder to find a dominating block.
458 static void InsertInsnsWithoutSideEffectsBeforeUse(
459 MachineIRBuilder
&Builder
, MachineInstr
&DefMI
, MachineOperand
&UseMO
,
460 std::function
<void(MachineBasicBlock
*, MachineBasicBlock::iterator
,
461 MachineOperand
&UseMO
)>
463 MachineInstr
&UseMI
= *UseMO
.getParent();
465 MachineBasicBlock
*InsertBB
= UseMI
.getParent();
467 // If the use is a PHI then we want the predecessor block instead.
469 MachineOperand
*PredBB
= std::next(&UseMO
);
470 InsertBB
= PredBB
->getMBB();
473 // If the block is the same block as the def then we want to insert just after
474 // the def instead of at the start of the block.
475 if (InsertBB
== DefMI
.getParent()) {
476 MachineBasicBlock::iterator InsertPt
= &DefMI
;
477 Inserter(InsertBB
, std::next(InsertPt
), UseMO
);
481 // Otherwise we want the start of the BB
482 Inserter(InsertBB
, InsertBB
->getFirstNonPHI(), UseMO
);
484 } // end anonymous namespace
486 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr
&MI
) {
487 PreferredTuple Preferred
;
488 if (matchCombineExtendingLoads(MI
, Preferred
)) {
489 applyCombineExtendingLoads(MI
, Preferred
);
495 static unsigned getExtLoadOpcForExtend(unsigned ExtOpc
) {
496 unsigned CandidateLoadOpc
;
498 case TargetOpcode::G_ANYEXT
:
499 CandidateLoadOpc
= TargetOpcode::G_LOAD
;
501 case TargetOpcode::G_SEXT
:
502 CandidateLoadOpc
= TargetOpcode::G_SEXTLOAD
;
504 case TargetOpcode::G_ZEXT
:
505 CandidateLoadOpc
= TargetOpcode::G_ZEXTLOAD
;
508 llvm_unreachable("Unexpected extend opc");
510 return CandidateLoadOpc
;
513 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr
&MI
,
514 PreferredTuple
&Preferred
) {
515 // We match the loads and follow the uses to the extend instead of matching
516 // the extends and following the def to the load. This is because the load
517 // must remain in the same position for correctness (unless we also add code
518 // to find a safe place to sink it) whereas the extend is freely movable.
519 // It also prevents us from duplicating the load for the volatile case or just
521 GAnyLoad
*LoadMI
= dyn_cast
<GAnyLoad
>(&MI
);
525 Register LoadReg
= LoadMI
->getDstReg();
527 LLT LoadValueTy
= MRI
.getType(LoadReg
);
528 if (!LoadValueTy
.isScalar())
531 // Most architectures are going to legalize <s8 loads into at least a 1 byte
532 // load, and the MMOs can only describe memory accesses in multiples of bytes.
533 // If we try to perform extload combining on those, we can end up with
534 // %a(s8) = extload %ptr (load 1 byte from %ptr)
535 // ... which is an illegal extload instruction.
536 if (LoadValueTy
.getSizeInBits() < 8)
539 // For non power-of-2 types, they will very likely be legalized into multiple
540 // loads. Don't bother trying to match them into extending loads.
541 if (!llvm::has_single_bit
<uint32_t>(LoadValueTy
.getSizeInBits()))
544 // Find the preferred type aside from the any-extends (unless it's the only
545 // one) and non-extending ops. We'll emit an extending load to that type and
546 // and emit a variant of (extend (trunc X)) for the others according to the
547 // relative type sizes. At the same time, pick an extend to use based on the
548 // extend involved in the chosen type.
549 unsigned PreferredOpcode
=
551 ? TargetOpcode::G_ANYEXT
552 : isa
<GSExtLoad
>(&MI
) ? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT
;
553 Preferred
= {LLT(), PreferredOpcode
, nullptr};
554 for (auto &UseMI
: MRI
.use_nodbg_instructions(LoadReg
)) {
555 if (UseMI
.getOpcode() == TargetOpcode::G_SEXT
||
556 UseMI
.getOpcode() == TargetOpcode::G_ZEXT
||
557 (UseMI
.getOpcode() == TargetOpcode::G_ANYEXT
)) {
558 const auto &MMO
= LoadMI
->getMMO();
559 // For atomics, only form anyextending loads.
560 if (MMO
.isAtomic() && UseMI
.getOpcode() != TargetOpcode::G_ANYEXT
)
562 // Check for legality.
563 if (!isPreLegalize()) {
564 LegalityQuery::MemDesc
MMDesc(MMO
);
565 unsigned CandidateLoadOpc
= getExtLoadOpcForExtend(UseMI
.getOpcode());
566 LLT UseTy
= MRI
.getType(UseMI
.getOperand(0).getReg());
567 LLT SrcTy
= MRI
.getType(LoadMI
->getPointerReg());
568 if (LI
->getAction({CandidateLoadOpc
, {UseTy
, SrcTy
}, {MMDesc
}})
569 .Action
!= LegalizeActions::Legal
)
572 Preferred
= ChoosePreferredUse(MI
, Preferred
,
573 MRI
.getType(UseMI
.getOperand(0).getReg()),
574 UseMI
.getOpcode(), &UseMI
);
578 // There were no extends
581 // It should be impossible to chose an extend without selecting a different
582 // type since by definition the result of an extend is larger.
583 assert(Preferred
.Ty
!= LoadValueTy
&& "Extending to same type?");
585 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred
.MI
);
589 void CombinerHelper::applyCombineExtendingLoads(MachineInstr
&MI
,
590 PreferredTuple
&Preferred
) {
591 // Rewrite the load to the chosen extending load.
592 Register ChosenDstReg
= Preferred
.MI
->getOperand(0).getReg();
594 // Inserter to insert a truncate back to the original type at a given point
595 // with some basic CSE to limit truncate duplication to one per BB.
596 DenseMap
<MachineBasicBlock
*, MachineInstr
*> EmittedInsns
;
597 auto InsertTruncAt
= [&](MachineBasicBlock
*InsertIntoBB
,
598 MachineBasicBlock::iterator InsertBefore
,
599 MachineOperand
&UseMO
) {
600 MachineInstr
*PreviouslyEmitted
= EmittedInsns
.lookup(InsertIntoBB
);
601 if (PreviouslyEmitted
) {
602 Observer
.changingInstr(*UseMO
.getParent());
603 UseMO
.setReg(PreviouslyEmitted
->getOperand(0).getReg());
604 Observer
.changedInstr(*UseMO
.getParent());
608 Builder
.setInsertPt(*InsertIntoBB
, InsertBefore
);
609 Register NewDstReg
= MRI
.cloneVirtualRegister(MI
.getOperand(0).getReg());
610 MachineInstr
*NewMI
= Builder
.buildTrunc(NewDstReg
, ChosenDstReg
);
611 EmittedInsns
[InsertIntoBB
] = NewMI
;
612 replaceRegOpWith(MRI
, UseMO
, NewDstReg
);
615 Observer
.changingInstr(MI
);
616 unsigned LoadOpc
= getExtLoadOpcForExtend(Preferred
.ExtendOpcode
);
617 MI
.setDesc(Builder
.getTII().get(LoadOpc
));
619 // Rewrite all the uses to fix up the types.
620 auto &LoadValue
= MI
.getOperand(0);
621 SmallVector
<MachineOperand
*, 4> Uses
;
622 for (auto &UseMO
: MRI
.use_operands(LoadValue
.getReg()))
623 Uses
.push_back(&UseMO
);
625 for (auto *UseMO
: Uses
) {
626 MachineInstr
*UseMI
= UseMO
->getParent();
628 // If the extend is compatible with the preferred extend then we should fix
629 // up the type and extend so that it uses the preferred use.
630 if (UseMI
->getOpcode() == Preferred
.ExtendOpcode
||
631 UseMI
->getOpcode() == TargetOpcode::G_ANYEXT
) {
632 Register UseDstReg
= UseMI
->getOperand(0).getReg();
633 MachineOperand
&UseSrcMO
= UseMI
->getOperand(1);
634 const LLT UseDstTy
= MRI
.getType(UseDstReg
);
635 if (UseDstReg
!= ChosenDstReg
) {
636 if (Preferred
.Ty
== UseDstTy
) {
637 // If the use has the same type as the preferred use, then merge
638 // the vregs and erase the extend. For example:
639 // %1:_(s8) = G_LOAD ...
640 // %2:_(s32) = G_SEXT %1(s8)
641 // %3:_(s32) = G_ANYEXT %1(s8)
644 // %2:_(s32) = G_SEXTLOAD ...
646 replaceRegWith(MRI
, UseDstReg
, ChosenDstReg
);
647 Observer
.erasingInstr(*UseMO
->getParent());
648 UseMO
->getParent()->eraseFromParent();
649 } else if (Preferred
.Ty
.getSizeInBits() < UseDstTy
.getSizeInBits()) {
650 // If the preferred size is smaller, then keep the extend but extend
651 // from the result of the extending load. For example:
652 // %1:_(s8) = G_LOAD ...
653 // %2:_(s32) = G_SEXT %1(s8)
654 // %3:_(s64) = G_ANYEXT %1(s8)
657 // %2:_(s32) = G_SEXTLOAD ...
658 // %3:_(s64) = G_ANYEXT %2:_(s32)
660 replaceRegOpWith(MRI
, UseSrcMO
, ChosenDstReg
);
662 // If the preferred size is large, then insert a truncate. For
664 // %1:_(s8) = G_LOAD ...
665 // %2:_(s64) = G_SEXT %1(s8)
666 // %3:_(s32) = G_ZEXT %1(s8)
669 // %2:_(s64) = G_SEXTLOAD ...
670 // %4:_(s8) = G_TRUNC %2:_(s32)
671 // %3:_(s64) = G_ZEXT %2:_(s8)
673 InsertInsnsWithoutSideEffectsBeforeUse(Builder
, MI
, *UseMO
,
678 // The use is (one of) the uses of the preferred use we chose earlier.
679 // We're going to update the load to def this value later so just erase
681 Observer
.erasingInstr(*UseMO
->getParent());
682 UseMO
->getParent()->eraseFromParent();
686 // The use isn't an extend. Truncate back to the type we originally loaded.
687 // This is free on many targets.
688 InsertInsnsWithoutSideEffectsBeforeUse(Builder
, MI
, *UseMO
, InsertTruncAt
);
691 MI
.getOperand(0).setReg(ChosenDstReg
);
692 Observer
.changedInstr(MI
);
695 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr
&MI
,
696 BuildFnTy
&MatchInfo
) {
697 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
699 // If we have the following code:
700 // %mask = G_CONSTANT 255
701 // %ld = G_LOAD %ptr, (load s16)
702 // %and = G_AND %ld, %mask
704 // Try to fold it into
705 // %ld = G_ZEXTLOAD %ptr, (load s8)
707 Register Dst
= MI
.getOperand(0).getReg();
708 if (MRI
.getType(Dst
).isVector())
712 getIConstantVRegValWithLookThrough(MI
.getOperand(2).getReg(), MRI
);
716 APInt MaskVal
= MaybeMask
->Value
;
718 if (!MaskVal
.isMask())
721 Register SrcReg
= MI
.getOperand(1).getReg();
722 // Don't use getOpcodeDef() here since intermediate instructions may have
724 GAnyLoad
*LoadMI
= dyn_cast
<GAnyLoad
>(MRI
.getVRegDef(SrcReg
));
725 if (!LoadMI
|| !MRI
.hasOneNonDBGUse(LoadMI
->getDstReg()))
728 Register LoadReg
= LoadMI
->getDstReg();
729 LLT RegTy
= MRI
.getType(LoadReg
);
730 Register PtrReg
= LoadMI
->getPointerReg();
731 unsigned RegSize
= RegTy
.getSizeInBits();
732 uint64_t LoadSizeBits
= LoadMI
->getMemSizeInBits();
733 unsigned MaskSizeBits
= MaskVal
.countr_one();
735 // The mask may not be larger than the in-memory type, as it might cover sign
737 if (MaskSizeBits
> LoadSizeBits
)
740 // If the mask covers the whole destination register, there's nothing to
742 if (MaskSizeBits
>= RegSize
)
745 // Most targets cannot deal with loads of size < 8 and need to re-legalize to
746 // at least byte loads. Avoid creating such loads here
747 if (MaskSizeBits
< 8 || !isPowerOf2_32(MaskSizeBits
))
750 const MachineMemOperand
&MMO
= LoadMI
->getMMO();
751 LegalityQuery::MemDesc
MemDesc(MMO
);
753 // Don't modify the memory access size if this is atomic/volatile, but we can
754 // still adjust the opcode to indicate the high bit behavior.
755 if (LoadMI
->isSimple())
756 MemDesc
.MemoryTy
= LLT::scalar(MaskSizeBits
);
757 else if (LoadSizeBits
> MaskSizeBits
|| LoadSizeBits
== RegSize
)
760 // TODO: Could check if it's legal with the reduced or original memory size.
761 if (!isLegalOrBeforeLegalizer(
762 {TargetOpcode::G_ZEXTLOAD
, {RegTy
, MRI
.getType(PtrReg
)}, {MemDesc
}}))
765 MatchInfo
= [=](MachineIRBuilder
&B
) {
766 B
.setInstrAndDebugLoc(*LoadMI
);
767 auto &MF
= B
.getMF();
768 auto PtrInfo
= MMO
.getPointerInfo();
769 auto *NewMMO
= MF
.getMachineMemOperand(&MMO
, PtrInfo
, MemDesc
.MemoryTy
);
770 B
.buildLoadInstr(TargetOpcode::G_ZEXTLOAD
, Dst
, PtrReg
, *NewMMO
);
771 LoadMI
->eraseFromParent();
776 bool CombinerHelper::isPredecessor(const MachineInstr
&DefMI
,
777 const MachineInstr
&UseMI
) {
778 assert(!DefMI
.isDebugInstr() && !UseMI
.isDebugInstr() &&
779 "shouldn't consider debug uses");
780 assert(DefMI
.getParent() == UseMI
.getParent());
781 if (&DefMI
== &UseMI
)
783 const MachineBasicBlock
&MBB
= *DefMI
.getParent();
784 auto DefOrUse
= find_if(MBB
, [&DefMI
, &UseMI
](const MachineInstr
&MI
) {
785 return &MI
== &DefMI
|| &MI
== &UseMI
;
787 if (DefOrUse
== MBB
.end())
788 llvm_unreachable("Block must contain both DefMI and UseMI!");
789 return &*DefOrUse
== &DefMI
;
792 bool CombinerHelper::dominates(const MachineInstr
&DefMI
,
793 const MachineInstr
&UseMI
) {
794 assert(!DefMI
.isDebugInstr() && !UseMI
.isDebugInstr() &&
795 "shouldn't consider debug uses");
797 return MDT
->dominates(&DefMI
, &UseMI
);
798 else if (DefMI
.getParent() != UseMI
.getParent())
801 return isPredecessor(DefMI
, UseMI
);
804 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr
&MI
) {
805 assert(MI
.getOpcode() == TargetOpcode::G_SEXT_INREG
);
806 Register SrcReg
= MI
.getOperand(1).getReg();
807 Register LoadUser
= SrcReg
;
809 if (MRI
.getType(SrcReg
).isVector())
813 if (mi_match(SrcReg
, MRI
, m_GTrunc(m_Reg(TruncSrc
))))
816 uint64_t SizeInBits
= MI
.getOperand(2).getImm();
817 // If the source is a G_SEXTLOAD from the same bit width, then we don't
818 // need any extend at all, just a truncate.
819 if (auto *LoadMI
= getOpcodeDef
<GSExtLoad
>(LoadUser
, MRI
)) {
820 // If truncating more than the original extended value, abort.
821 auto LoadSizeBits
= LoadMI
->getMemSizeInBits();
822 if (TruncSrc
&& MRI
.getType(TruncSrc
).getSizeInBits() < LoadSizeBits
)
824 if (LoadSizeBits
== SizeInBits
)
830 void CombinerHelper::applySextTruncSextLoad(MachineInstr
&MI
) {
831 assert(MI
.getOpcode() == TargetOpcode::G_SEXT_INREG
);
832 Builder
.setInstrAndDebugLoc(MI
);
833 Builder
.buildCopy(MI
.getOperand(0).getReg(), MI
.getOperand(1).getReg());
834 MI
.eraseFromParent();
837 bool CombinerHelper::matchSextInRegOfLoad(
838 MachineInstr
&MI
, std::tuple
<Register
, unsigned> &MatchInfo
) {
839 assert(MI
.getOpcode() == TargetOpcode::G_SEXT_INREG
);
841 Register DstReg
= MI
.getOperand(0).getReg();
842 LLT RegTy
= MRI
.getType(DstReg
);
844 // Only supports scalars for now.
845 if (RegTy
.isVector())
848 Register SrcReg
= MI
.getOperand(1).getReg();
849 auto *LoadDef
= getOpcodeDef
<GLoad
>(SrcReg
, MRI
);
850 if (!LoadDef
|| !MRI
.hasOneNonDBGUse(DstReg
))
853 uint64_t MemBits
= LoadDef
->getMemSizeInBits();
855 // If the sign extend extends from a narrower width than the load's width,
856 // then we can narrow the load width when we combine to a G_SEXTLOAD.
857 // Avoid widening the load at all.
858 unsigned NewSizeBits
= std::min((uint64_t)MI
.getOperand(2).getImm(), MemBits
);
860 // Don't generate G_SEXTLOADs with a < 1 byte width.
863 // Don't bother creating a non-power-2 sextload, it will likely be broken up
864 // anyway for most targets.
865 if (!isPowerOf2_32(NewSizeBits
))
868 const MachineMemOperand
&MMO
= LoadDef
->getMMO();
869 LegalityQuery::MemDesc
MMDesc(MMO
);
871 // Don't modify the memory access size if this is atomic/volatile, but we can
872 // still adjust the opcode to indicate the high bit behavior.
873 if (LoadDef
->isSimple())
874 MMDesc
.MemoryTy
= LLT::scalar(NewSizeBits
);
875 else if (MemBits
> NewSizeBits
|| MemBits
== RegTy
.getSizeInBits())
878 // TODO: Could check if it's legal with the reduced or original memory size.
879 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD
,
880 {MRI
.getType(LoadDef
->getDstReg()),
881 MRI
.getType(LoadDef
->getPointerReg())},
885 MatchInfo
= std::make_tuple(LoadDef
->getDstReg(), NewSizeBits
);
889 void CombinerHelper::applySextInRegOfLoad(
890 MachineInstr
&MI
, std::tuple
<Register
, unsigned> &MatchInfo
) {
891 assert(MI
.getOpcode() == TargetOpcode::G_SEXT_INREG
);
893 unsigned ScalarSizeBits
;
894 std::tie(LoadReg
, ScalarSizeBits
) = MatchInfo
;
895 GLoad
*LoadDef
= cast
<GLoad
>(MRI
.getVRegDef(LoadReg
));
897 // If we have the following:
898 // %ld = G_LOAD %ptr, (load 2)
899 // %ext = G_SEXT_INREG %ld, 8
901 // %ld = G_SEXTLOAD %ptr (load 1)
903 auto &MMO
= LoadDef
->getMMO();
904 Builder
.setInstrAndDebugLoc(*LoadDef
);
905 auto &MF
= Builder
.getMF();
906 auto PtrInfo
= MMO
.getPointerInfo();
907 auto *NewMMO
= MF
.getMachineMemOperand(&MMO
, PtrInfo
, ScalarSizeBits
/ 8);
908 Builder
.buildLoadInstr(TargetOpcode::G_SEXTLOAD
, MI
.getOperand(0).getReg(),
909 LoadDef
->getPointerReg(), *NewMMO
);
910 MI
.eraseFromParent();
913 bool CombinerHelper::findPostIndexCandidate(MachineInstr
&MI
, Register
&Addr
,
914 Register
&Base
, Register
&Offset
) {
915 auto &MF
= *MI
.getParent()->getParent();
916 const auto &TLI
= *MF
.getSubtarget().getTargetLowering();
919 unsigned Opcode
= MI
.getOpcode();
920 assert(Opcode
== TargetOpcode::G_LOAD
|| Opcode
== TargetOpcode::G_SEXTLOAD
||
921 Opcode
== TargetOpcode::G_ZEXTLOAD
|| Opcode
== TargetOpcode::G_STORE
);
924 Base
= MI
.getOperand(1).getReg();
925 MachineInstr
*BaseDef
= MRI
.getUniqueVRegDef(Base
);
926 if (BaseDef
&& BaseDef
->getOpcode() == TargetOpcode::G_FRAME_INDEX
)
929 LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI
);
930 // FIXME: The following use traversal needs a bail out for patholigical cases.
931 for (auto &Use
: MRI
.use_nodbg_instructions(Base
)) {
932 if (Use
.getOpcode() != TargetOpcode::G_PTR_ADD
)
935 Offset
= Use
.getOperand(2).getReg();
936 if (!ForceLegalIndexing
&&
937 !TLI
.isIndexingLegal(MI
, Base
, Offset
, /*IsPre*/ false, MRI
)) {
938 LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "
943 // Make sure the offset calculation is before the potentially indexed op.
944 // FIXME: we really care about dependency here. The offset calculation might
946 MachineInstr
*OffsetDef
= MRI
.getUniqueVRegDef(Offset
);
947 if (!OffsetDef
|| !dominates(*OffsetDef
, MI
)) {
948 LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "
953 // FIXME: check whether all uses of Base are load/store with foldable
954 // addressing modes. If so, using the normal addr-modes is better than
955 // forming an indexed one.
957 bool MemOpDominatesAddrUses
= true;
958 for (auto &PtrAddUse
:
959 MRI
.use_nodbg_instructions(Use
.getOperand(0).getReg())) {
960 if (!dominates(MI
, PtrAddUse
)) {
961 MemOpDominatesAddrUses
= false;
966 if (!MemOpDominatesAddrUses
) {
968 dbgs() << " Ignoring candidate as memop does not dominate uses: "
973 LLVM_DEBUG(dbgs() << " Found match: " << Use
);
974 Addr
= Use
.getOperand(0).getReg();
981 bool CombinerHelper::findPreIndexCandidate(MachineInstr
&MI
, Register
&Addr
,
982 Register
&Base
, Register
&Offset
) {
983 auto &MF
= *MI
.getParent()->getParent();
984 const auto &TLI
= *MF
.getSubtarget().getTargetLowering();
987 unsigned Opcode
= MI
.getOpcode();
988 assert(Opcode
== TargetOpcode::G_LOAD
|| Opcode
== TargetOpcode::G_SEXTLOAD
||
989 Opcode
== TargetOpcode::G_ZEXTLOAD
|| Opcode
== TargetOpcode::G_STORE
);
992 Addr
= MI
.getOperand(1).getReg();
993 MachineInstr
*AddrDef
= getOpcodeDef(TargetOpcode::G_PTR_ADD
, Addr
, MRI
);
994 if (!AddrDef
|| MRI
.hasOneNonDBGUse(Addr
))
997 Base
= AddrDef
->getOperand(1).getReg();
998 Offset
= AddrDef
->getOperand(2).getReg();
1000 LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI
);
1002 if (!ForceLegalIndexing
&&
1003 !TLI
.isIndexingLegal(MI
, Base
, Offset
, /*IsPre*/ true, MRI
)) {
1004 LLVM_DEBUG(dbgs() << " Skipping, not legal for target");
1008 MachineInstr
*BaseDef
= getDefIgnoringCopies(Base
, MRI
);
1009 if (BaseDef
->getOpcode() == TargetOpcode::G_FRAME_INDEX
) {
1010 LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.");
1014 if (MI
.getOpcode() == TargetOpcode::G_STORE
) {
1015 // Would require a copy.
1016 if (Base
== MI
.getOperand(0).getReg()) {
1017 LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.");
1021 // We're expecting one use of Addr in MI, but it could also be the
1022 // value stored, which isn't actually dominated by the instruction.
1023 if (MI
.getOperand(0).getReg() == Addr
) {
1024 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses");
1029 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
1030 // That might allow us to end base's liveness here by adjusting the constant.
1032 for (auto &UseMI
: MRI
.use_nodbg_instructions(Addr
)) {
1033 if (!dominates(MI
, UseMI
)) {
1034 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.");
1042 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr
&MI
) {
1043 IndexedLoadStoreMatchInfo MatchInfo
;
1044 if (matchCombineIndexedLoadStore(MI
, MatchInfo
)) {
1045 applyCombineIndexedLoadStore(MI
, MatchInfo
);
1051 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr
&MI
, IndexedLoadStoreMatchInfo
&MatchInfo
) {
1052 unsigned Opcode
= MI
.getOpcode();
1053 if (Opcode
!= TargetOpcode::G_LOAD
&& Opcode
!= TargetOpcode::G_SEXTLOAD
&&
1054 Opcode
!= TargetOpcode::G_ZEXTLOAD
&& Opcode
!= TargetOpcode::G_STORE
)
1057 // For now, no targets actually support these opcodes so don't waste time
1058 // running these unless we're forced to for testing.
1059 if (!ForceLegalIndexing
)
1062 MatchInfo
.IsPre
= findPreIndexCandidate(MI
, MatchInfo
.Addr
, MatchInfo
.Base
,
1064 if (!MatchInfo
.IsPre
&&
1065 !findPostIndexCandidate(MI
, MatchInfo
.Addr
, MatchInfo
.Base
,
1072 void CombinerHelper::applyCombineIndexedLoadStore(
1073 MachineInstr
&MI
, IndexedLoadStoreMatchInfo
&MatchInfo
) {
1074 MachineInstr
&AddrDef
= *MRI
.getUniqueVRegDef(MatchInfo
.Addr
);
1075 MachineIRBuilder
MIRBuilder(MI
);
1076 unsigned Opcode
= MI
.getOpcode();
1077 bool IsStore
= Opcode
== TargetOpcode::G_STORE
;
1080 case TargetOpcode::G_LOAD
:
1081 NewOpcode
= TargetOpcode::G_INDEXED_LOAD
;
1083 case TargetOpcode::G_SEXTLOAD
:
1084 NewOpcode
= TargetOpcode::G_INDEXED_SEXTLOAD
;
1086 case TargetOpcode::G_ZEXTLOAD
:
1087 NewOpcode
= TargetOpcode::G_INDEXED_ZEXTLOAD
;
1089 case TargetOpcode::G_STORE
:
1090 NewOpcode
= TargetOpcode::G_INDEXED_STORE
;
1093 llvm_unreachable("Unknown load/store opcode");
1096 auto MIB
= MIRBuilder
.buildInstr(NewOpcode
);
1098 MIB
.addDef(MatchInfo
.Addr
);
1099 MIB
.addUse(MI
.getOperand(0).getReg());
1101 MIB
.addDef(MI
.getOperand(0).getReg());
1102 MIB
.addDef(MatchInfo
.Addr
);
1105 MIB
.addUse(MatchInfo
.Base
);
1106 MIB
.addUse(MatchInfo
.Offset
);
1107 MIB
.addImm(MatchInfo
.IsPre
);
1108 MI
.eraseFromParent();
1109 AddrDef
.eraseFromParent();
1111 LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
1114 bool CombinerHelper::matchCombineDivRem(MachineInstr
&MI
,
1115 MachineInstr
*&OtherMI
) {
1116 unsigned Opcode
= MI
.getOpcode();
1117 bool IsDiv
, IsSigned
;
1121 llvm_unreachable("Unexpected opcode!");
1122 case TargetOpcode::G_SDIV
:
1123 case TargetOpcode::G_UDIV
: {
1125 IsSigned
= Opcode
== TargetOpcode::G_SDIV
;
1128 case TargetOpcode::G_SREM
:
1129 case TargetOpcode::G_UREM
: {
1131 IsSigned
= Opcode
== TargetOpcode::G_SREM
;
1136 Register Src1
= MI
.getOperand(1).getReg();
1137 unsigned DivOpcode
, RemOpcode
, DivremOpcode
;
1139 DivOpcode
= TargetOpcode::G_SDIV
;
1140 RemOpcode
= TargetOpcode::G_SREM
;
1141 DivremOpcode
= TargetOpcode::G_SDIVREM
;
1143 DivOpcode
= TargetOpcode::G_UDIV
;
1144 RemOpcode
= TargetOpcode::G_UREM
;
1145 DivremOpcode
= TargetOpcode::G_UDIVREM
;
1148 if (!isLegalOrBeforeLegalizer({DivremOpcode
, {MRI
.getType(Src1
)}}))
1152 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1153 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1155 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1158 // %rem:_ = G_[SU]REM %src1:_, %src2:_
1159 // %div:_ = G_[SU]DIV %src1:_, %src2:_
1161 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_
1163 for (auto &UseMI
: MRI
.use_nodbg_instructions(Src1
)) {
1164 if (MI
.getParent() == UseMI
.getParent() &&
1165 ((IsDiv
&& UseMI
.getOpcode() == RemOpcode
) ||
1166 (!IsDiv
&& UseMI
.getOpcode() == DivOpcode
)) &&
1167 matchEqualDefs(MI
.getOperand(2), UseMI
.getOperand(2)) &&
1168 matchEqualDefs(MI
.getOperand(1), UseMI
.getOperand(1))) {
1177 void CombinerHelper::applyCombineDivRem(MachineInstr
&MI
,
1178 MachineInstr
*&OtherMI
) {
1179 unsigned Opcode
= MI
.getOpcode();
1180 assert(OtherMI
&& "OtherMI shouldn't be empty.");
1182 Register DestDivReg
, DestRemReg
;
1183 if (Opcode
== TargetOpcode::G_SDIV
|| Opcode
== TargetOpcode::G_UDIV
) {
1184 DestDivReg
= MI
.getOperand(0).getReg();
1185 DestRemReg
= OtherMI
->getOperand(0).getReg();
1187 DestDivReg
= OtherMI
->getOperand(0).getReg();
1188 DestRemReg
= MI
.getOperand(0).getReg();
1192 Opcode
== TargetOpcode::G_SDIV
|| Opcode
== TargetOpcode::G_SREM
;
1194 // Check which instruction is first in the block so we don't break def-use
1195 // deps by "moving" the instruction incorrectly. Also keep track of which
1196 // instruction is first so we pick it's operands, avoiding use-before-def
1198 MachineInstr
*FirstInst
;
1199 if (dominates(MI
, *OtherMI
)) {
1200 Builder
.setInstrAndDebugLoc(MI
);
1203 Builder
.setInstrAndDebugLoc(*OtherMI
);
1204 FirstInst
= OtherMI
;
1207 Builder
.buildInstr(IsSigned
? TargetOpcode::G_SDIVREM
1208 : TargetOpcode::G_UDIVREM
,
1209 {DestDivReg
, DestRemReg
},
1210 { FirstInst
->getOperand(1), FirstInst
->getOperand(2) });
1211 MI
.eraseFromParent();
1212 OtherMI
->eraseFromParent();
1215 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr
&MI
,
1216 MachineInstr
*&BrCond
) {
1217 assert(MI
.getOpcode() == TargetOpcode::G_BR
);
1219 // Try to match the following:
1221 // G_BRCOND %c1, %bb2
1227 // The above pattern does not have a fall through to the successor bb2, always
1228 // resulting in a branch no matter which path is taken. Here we try to find
1229 // and replace that pattern with conditional branch to bb3 and otherwise
1230 // fallthrough to bb2. This is generally better for branch predictors.
1232 MachineBasicBlock
*MBB
= MI
.getParent();
1233 MachineBasicBlock::iterator
BrIt(MI
);
1234 if (BrIt
== MBB
->begin())
1236 assert(std::next(BrIt
) == MBB
->end() && "expected G_BR to be a terminator");
1238 BrCond
= &*std::prev(BrIt
);
1239 if (BrCond
->getOpcode() != TargetOpcode::G_BRCOND
)
1242 // Check that the next block is the conditional branch target. Also make sure
1243 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
1244 MachineBasicBlock
*BrCondTarget
= BrCond
->getOperand(1).getMBB();
1245 return BrCondTarget
!= MI
.getOperand(0).getMBB() &&
1246 MBB
->isLayoutSuccessor(BrCondTarget
);
1249 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr
&MI
,
1250 MachineInstr
*&BrCond
) {
1251 MachineBasicBlock
*BrTarget
= MI
.getOperand(0).getMBB();
1252 Builder
.setInstrAndDebugLoc(*BrCond
);
1253 LLT Ty
= MRI
.getType(BrCond
->getOperand(0).getReg());
1254 // FIXME: Does int/fp matter for this? If so, we might need to restrict
1255 // this to i1 only since we might not know for sure what kind of
1256 // compare generated the condition value.
1257 auto True
= Builder
.buildConstant(
1258 Ty
, getICmpTrueVal(getTargetLowering(), false, false));
1259 auto Xor
= Builder
.buildXor(Ty
, BrCond
->getOperand(0), True
);
1261 auto *FallthroughBB
= BrCond
->getOperand(1).getMBB();
1262 Observer
.changingInstr(MI
);
1263 MI
.getOperand(0).setMBB(FallthroughBB
);
1264 Observer
.changedInstr(MI
);
1266 // Change the conditional branch to use the inverted condition and
1267 // new target block.
1268 Observer
.changingInstr(*BrCond
);
1269 BrCond
->getOperand(0).setReg(Xor
.getReg(0));
1270 BrCond
->getOperand(1).setMBB(BrTarget
);
1271 Observer
.changedInstr(*BrCond
);
1274 static Type
*getTypeForLLT(LLT Ty
, LLVMContext
&C
) {
1276 return FixedVectorType::get(IntegerType::get(C
, Ty
.getScalarSizeInBits()),
1277 Ty
.getNumElements());
1278 return IntegerType::get(C
, Ty
.getSizeInBits());
1281 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr
&MI
) {
1282 MachineIRBuilder
HelperBuilder(MI
);
1283 GISelObserverWrapper DummyObserver
;
1284 LegalizerHelper
Helper(HelperBuilder
.getMF(), DummyObserver
, HelperBuilder
);
1285 return Helper
.lowerMemcpyInline(MI
) ==
1286 LegalizerHelper::LegalizeResult::Legalized
;
1289 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr
&MI
, unsigned MaxLen
) {
1290 MachineIRBuilder
HelperBuilder(MI
);
1291 GISelObserverWrapper DummyObserver
;
1292 LegalizerHelper
Helper(HelperBuilder
.getMF(), DummyObserver
, HelperBuilder
);
1293 return Helper
.lowerMemCpyFamily(MI
, MaxLen
) ==
1294 LegalizerHelper::LegalizeResult::Legalized
;
1297 static APFloat
constantFoldFpUnary(const MachineInstr
&MI
,
1298 const MachineRegisterInfo
&MRI
,
1299 const APFloat
&Val
) {
1300 APFloat
Result(Val
);
1301 switch (MI
.getOpcode()) {
1303 llvm_unreachable("Unexpected opcode!");
1304 case TargetOpcode::G_FNEG
: {
1305 Result
.changeSign();
1308 case TargetOpcode::G_FABS
: {
1312 case TargetOpcode::G_FPTRUNC
: {
1314 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
1315 Result
.convert(getFltSemanticForLLT(DstTy
), APFloat::rmNearestTiesToEven
,
1319 case TargetOpcode::G_FSQRT
: {
1321 Result
.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven
,
1323 Result
= APFloat(sqrt(Result
.convertToDouble()));
1326 case TargetOpcode::G_FLOG2
: {
1328 Result
.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven
,
1330 Result
= APFloat(log2(Result
.convertToDouble()));
1334 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1335 // `buildFConstant` will assert on size mismatch. Only `G_FSQRT`, and
1336 // `G_FLOG2` reach here.
1338 Result
.convert(Val
.getSemantics(), APFloat::rmNearestTiesToEven
, &Unused
);
1342 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr
&MI
,
1343 const ConstantFP
*Cst
) {
1344 Builder
.setInstrAndDebugLoc(MI
);
1345 APFloat Folded
= constantFoldFpUnary(MI
, MRI
, Cst
->getValue());
1346 const ConstantFP
*NewCst
= ConstantFP::get(Builder
.getContext(), Folded
);
1347 Builder
.buildFConstant(MI
.getOperand(0), *NewCst
);
1348 MI
.eraseFromParent();
1351 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr
&MI
,
1352 PtrAddChain
&MatchInfo
) {
1353 // We're trying to match the following pattern:
1354 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1355 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1357 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1359 if (MI
.getOpcode() != TargetOpcode::G_PTR_ADD
)
1362 Register Add2
= MI
.getOperand(1).getReg();
1363 Register Imm1
= MI
.getOperand(2).getReg();
1364 auto MaybeImmVal
= getIConstantVRegValWithLookThrough(Imm1
, MRI
);
1368 MachineInstr
*Add2Def
= MRI
.getVRegDef(Add2
);
1369 if (!Add2Def
|| Add2Def
->getOpcode() != TargetOpcode::G_PTR_ADD
)
1372 Register Base
= Add2Def
->getOperand(1).getReg();
1373 Register Imm2
= Add2Def
->getOperand(2).getReg();
1374 auto MaybeImm2Val
= getIConstantVRegValWithLookThrough(Imm2
, MRI
);
1378 // Check if the new combined immediate forms an illegal addressing mode.
1379 // Do not combine if it was legal before but would get illegal.
1380 // To do so, we need to find a load/store user of the pointer to get
1382 Type
*AccessTy
= nullptr;
1383 auto &MF
= *MI
.getMF();
1384 for (auto &UseMI
: MRI
.use_nodbg_instructions(MI
.getOperand(0).getReg())) {
1385 if (auto *LdSt
= dyn_cast
<GLoadStore
>(&UseMI
)) {
1386 AccessTy
= getTypeForLLT(MRI
.getType(LdSt
->getReg(0)),
1387 MF
.getFunction().getContext());
1391 TargetLoweringBase::AddrMode AMNew
;
1392 APInt CombinedImm
= MaybeImmVal
->Value
+ MaybeImm2Val
->Value
;
1393 AMNew
.BaseOffs
= CombinedImm
.getSExtValue();
1395 AMNew
.HasBaseReg
= true;
1396 TargetLoweringBase::AddrMode AMOld
;
1397 AMOld
.BaseOffs
= MaybeImm2Val
->Value
.getSExtValue();
1398 AMOld
.HasBaseReg
= true;
1399 unsigned AS
= MRI
.getType(Add2
).getAddressSpace();
1400 const auto &TLI
= *MF
.getSubtarget().getTargetLowering();
1401 if (TLI
.isLegalAddressingMode(MF
.getDataLayout(), AMOld
, AccessTy
, AS
) &&
1402 !TLI
.isLegalAddressingMode(MF
.getDataLayout(), AMNew
, AccessTy
, AS
))
1406 // Pass the combined immediate to the apply function.
1407 MatchInfo
.Imm
= AMNew
.BaseOffs
;
1408 MatchInfo
.Base
= Base
;
1409 MatchInfo
.Bank
= getRegBank(Imm2
);
1413 void CombinerHelper::applyPtrAddImmedChain(MachineInstr
&MI
,
1414 PtrAddChain
&MatchInfo
) {
1415 assert(MI
.getOpcode() == TargetOpcode::G_PTR_ADD
&& "Expected G_PTR_ADD");
1416 MachineIRBuilder
MIB(MI
);
1417 LLT OffsetTy
= MRI
.getType(MI
.getOperand(2).getReg());
1418 auto NewOffset
= MIB
.buildConstant(OffsetTy
, MatchInfo
.Imm
);
1419 setRegBank(NewOffset
.getReg(0), MatchInfo
.Bank
);
1420 Observer
.changingInstr(MI
);
1421 MI
.getOperand(1).setReg(MatchInfo
.Base
);
1422 MI
.getOperand(2).setReg(NewOffset
.getReg(0));
1423 Observer
.changedInstr(MI
);
1426 bool CombinerHelper::matchShiftImmedChain(MachineInstr
&MI
,
1427 RegisterImmPair
&MatchInfo
) {
1428 // We're trying to match the following pattern with any of
1429 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1430 // %t1 = SHIFT %base, G_CONSTANT imm1
1431 // %root = SHIFT %t1, G_CONSTANT imm2
1433 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1435 unsigned Opcode
= MI
.getOpcode();
1436 assert((Opcode
== TargetOpcode::G_SHL
|| Opcode
== TargetOpcode::G_ASHR
||
1437 Opcode
== TargetOpcode::G_LSHR
|| Opcode
== TargetOpcode::G_SSHLSAT
||
1438 Opcode
== TargetOpcode::G_USHLSAT
) &&
1439 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1441 Register Shl2
= MI
.getOperand(1).getReg();
1442 Register Imm1
= MI
.getOperand(2).getReg();
1443 auto MaybeImmVal
= getIConstantVRegValWithLookThrough(Imm1
, MRI
);
1447 MachineInstr
*Shl2Def
= MRI
.getUniqueVRegDef(Shl2
);
1448 if (Shl2Def
->getOpcode() != Opcode
)
1451 Register Base
= Shl2Def
->getOperand(1).getReg();
1452 Register Imm2
= Shl2Def
->getOperand(2).getReg();
1453 auto MaybeImm2Val
= getIConstantVRegValWithLookThrough(Imm2
, MRI
);
1457 // Pass the combined immediate to the apply function.
1459 (MaybeImmVal
->Value
.getSExtValue() + MaybeImm2Val
->Value
).getSExtValue();
1460 MatchInfo
.Reg
= Base
;
1462 // There is no simple replacement for a saturating unsigned left shift that
1463 // exceeds the scalar size.
1464 if (Opcode
== TargetOpcode::G_USHLSAT
&&
1465 MatchInfo
.Imm
>= MRI
.getType(Shl2
).getScalarSizeInBits())
1471 void CombinerHelper::applyShiftImmedChain(MachineInstr
&MI
,
1472 RegisterImmPair
&MatchInfo
) {
1473 unsigned Opcode
= MI
.getOpcode();
1474 assert((Opcode
== TargetOpcode::G_SHL
|| Opcode
== TargetOpcode::G_ASHR
||
1475 Opcode
== TargetOpcode::G_LSHR
|| Opcode
== TargetOpcode::G_SSHLSAT
||
1476 Opcode
== TargetOpcode::G_USHLSAT
) &&
1477 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1479 Builder
.setInstrAndDebugLoc(MI
);
1480 LLT Ty
= MRI
.getType(MI
.getOperand(1).getReg());
1481 unsigned const ScalarSizeInBits
= Ty
.getScalarSizeInBits();
1482 auto Imm
= MatchInfo
.Imm
;
1484 if (Imm
>= ScalarSizeInBits
) {
1485 // Any logical shift that exceeds scalar size will produce zero.
1486 if (Opcode
== TargetOpcode::G_SHL
|| Opcode
== TargetOpcode::G_LSHR
) {
1487 Builder
.buildConstant(MI
.getOperand(0), 0);
1488 MI
.eraseFromParent();
1491 // Arithmetic shift and saturating signed left shift have no effect beyond
1493 Imm
= ScalarSizeInBits
- 1;
1496 LLT ImmTy
= MRI
.getType(MI
.getOperand(2).getReg());
1497 Register NewImm
= Builder
.buildConstant(ImmTy
, Imm
).getReg(0);
1498 Observer
.changingInstr(MI
);
1499 MI
.getOperand(1).setReg(MatchInfo
.Reg
);
1500 MI
.getOperand(2).setReg(NewImm
);
1501 Observer
.changedInstr(MI
);
1504 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr
&MI
,
1505 ShiftOfShiftedLogic
&MatchInfo
) {
1506 // We're trying to match the following pattern with any of
1507 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1508 // with any of G_AND/G_OR/G_XOR logic instructions.
1509 // %t1 = SHIFT %X, G_CONSTANT C0
1510 // %t2 = LOGIC %t1, %Y
1511 // %root = SHIFT %t2, G_CONSTANT C1
1513 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1514 // %t4 = SHIFT %Y, G_CONSTANT C1
1515 // %root = LOGIC %t3, %t4
1516 unsigned ShiftOpcode
= MI
.getOpcode();
1517 assert((ShiftOpcode
== TargetOpcode::G_SHL
||
1518 ShiftOpcode
== TargetOpcode::G_ASHR
||
1519 ShiftOpcode
== TargetOpcode::G_LSHR
||
1520 ShiftOpcode
== TargetOpcode::G_USHLSAT
||
1521 ShiftOpcode
== TargetOpcode::G_SSHLSAT
) &&
1522 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1524 // Match a one-use bitwise logic op.
1525 Register LogicDest
= MI
.getOperand(1).getReg();
1526 if (!MRI
.hasOneNonDBGUse(LogicDest
))
1529 MachineInstr
*LogicMI
= MRI
.getUniqueVRegDef(LogicDest
);
1530 unsigned LogicOpcode
= LogicMI
->getOpcode();
1531 if (LogicOpcode
!= TargetOpcode::G_AND
&& LogicOpcode
!= TargetOpcode::G_OR
&&
1532 LogicOpcode
!= TargetOpcode::G_XOR
)
1535 // Find a matching one-use shift by constant.
1536 const Register C1
= MI
.getOperand(2).getReg();
1537 auto MaybeImmVal
= getIConstantVRegValWithLookThrough(C1
, MRI
);
1541 const uint64_t C1Val
= MaybeImmVal
->Value
.getZExtValue();
1543 auto matchFirstShift
= [&](const MachineInstr
*MI
, uint64_t &ShiftVal
) {
1544 // Shift should match previous one and should be a one-use.
1545 if (MI
->getOpcode() != ShiftOpcode
||
1546 !MRI
.hasOneNonDBGUse(MI
->getOperand(0).getReg()))
1549 // Must be a constant.
1551 getIConstantVRegValWithLookThrough(MI
->getOperand(2).getReg(), MRI
);
1555 ShiftVal
= MaybeImmVal
->Value
.getSExtValue();
1559 // Logic ops are commutative, so check each operand for a match.
1560 Register LogicMIReg1
= LogicMI
->getOperand(1).getReg();
1561 MachineInstr
*LogicMIOp1
= MRI
.getUniqueVRegDef(LogicMIReg1
);
1562 Register LogicMIReg2
= LogicMI
->getOperand(2).getReg();
1563 MachineInstr
*LogicMIOp2
= MRI
.getUniqueVRegDef(LogicMIReg2
);
1566 if (matchFirstShift(LogicMIOp1
, C0Val
)) {
1567 MatchInfo
.LogicNonShiftReg
= LogicMIReg2
;
1568 MatchInfo
.Shift2
= LogicMIOp1
;
1569 } else if (matchFirstShift(LogicMIOp2
, C0Val
)) {
1570 MatchInfo
.LogicNonShiftReg
= LogicMIReg1
;
1571 MatchInfo
.Shift2
= LogicMIOp2
;
1575 MatchInfo
.ValSum
= C0Val
+ C1Val
;
1577 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1578 if (MatchInfo
.ValSum
>= MRI
.getType(LogicDest
).getScalarSizeInBits())
1581 MatchInfo
.Logic
= LogicMI
;
1585 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr
&MI
,
1586 ShiftOfShiftedLogic
&MatchInfo
) {
1587 unsigned Opcode
= MI
.getOpcode();
1588 assert((Opcode
== TargetOpcode::G_SHL
|| Opcode
== TargetOpcode::G_ASHR
||
1589 Opcode
== TargetOpcode::G_LSHR
|| Opcode
== TargetOpcode::G_USHLSAT
||
1590 Opcode
== TargetOpcode::G_SSHLSAT
) &&
1591 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1593 LLT ShlType
= MRI
.getType(MI
.getOperand(2).getReg());
1594 LLT DestType
= MRI
.getType(MI
.getOperand(0).getReg());
1595 Builder
.setInstrAndDebugLoc(MI
);
1597 Register Const
= Builder
.buildConstant(ShlType
, MatchInfo
.ValSum
).getReg(0);
1599 Register Shift1Base
= MatchInfo
.Shift2
->getOperand(1).getReg();
1601 Builder
.buildInstr(Opcode
, {DestType
}, {Shift1Base
, Const
}).getReg(0);
1603 // If LogicNonShiftReg is the same to Shift1Base, and shift1 const is the same
1604 // to MatchInfo.Shift2 const, CSEMIRBuilder will reuse the old shift1 when
1605 // build shift2. So, if we erase MatchInfo.Shift2 at the end, actually we
1606 // remove old shift1. And it will cause crash later. So erase it earlier to
1608 MatchInfo
.Shift2
->eraseFromParent();
1610 Register Shift2Const
= MI
.getOperand(2).getReg();
1611 Register Shift2
= Builder
1612 .buildInstr(Opcode
, {DestType
},
1613 {MatchInfo
.LogicNonShiftReg
, Shift2Const
})
1616 Register Dest
= MI
.getOperand(0).getReg();
1617 Builder
.buildInstr(MatchInfo
.Logic
->getOpcode(), {Dest
}, {Shift1
, Shift2
});
1619 // This was one use so it's safe to remove it.
1620 MatchInfo
.Logic
->eraseFromParent();
1622 MI
.eraseFromParent();
1625 bool CombinerHelper::matchCommuteShift(MachineInstr
&MI
, BuildFnTy
&MatchInfo
) {
1626 assert(MI
.getOpcode() == TargetOpcode::G_SHL
&& "Expected G_SHL");
1627 // Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1628 // Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1629 auto &Shl
= cast
<GenericMachineInstr
>(MI
);
1630 Register DstReg
= Shl
.getReg(0);
1631 Register SrcReg
= Shl
.getReg(1);
1632 Register ShiftReg
= Shl
.getReg(2);
1635 if (!getTargetLowering().isDesirableToCommuteWithShift(MI
, !isPreLegalize()))
1638 if (!mi_match(SrcReg
, MRI
,
1639 m_OneNonDBGUse(m_any_of(m_GAdd(m_Reg(X
), m_Reg(C1
)),
1640 m_GOr(m_Reg(X
), m_Reg(C1
))))))
1644 if (!mi_match(C1
, MRI
, m_ICstOrSplat(C1Val
)) ||
1645 !mi_match(ShiftReg
, MRI
, m_ICstOrSplat(C2Val
)))
1648 auto *SrcDef
= MRI
.getVRegDef(SrcReg
);
1649 assert((SrcDef
->getOpcode() == TargetOpcode::G_ADD
||
1650 SrcDef
->getOpcode() == TargetOpcode::G_OR
) && "Unexpected op");
1651 LLT SrcTy
= MRI
.getType(SrcReg
);
1652 MatchInfo
= [=](MachineIRBuilder
&B
) {
1653 auto S1
= B
.buildShl(SrcTy
, X
, ShiftReg
);
1654 auto S2
= B
.buildShl(SrcTy
, C1
, ShiftReg
);
1655 B
.buildInstr(SrcDef
->getOpcode(), {DstReg
}, {S1
, S2
});
1660 bool CombinerHelper::matchCombineMulToShl(MachineInstr
&MI
,
1661 unsigned &ShiftVal
) {
1662 assert(MI
.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL");
1664 getIConstantVRegValWithLookThrough(MI
.getOperand(2).getReg(), MRI
);
1668 ShiftVal
= MaybeImmVal
->Value
.exactLogBase2();
1669 return (static_cast<int32_t>(ShiftVal
) != -1);
1672 void CombinerHelper::applyCombineMulToShl(MachineInstr
&MI
,
1673 unsigned &ShiftVal
) {
1674 assert(MI
.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL");
1675 MachineIRBuilder
MIB(MI
);
1676 LLT ShiftTy
= MRI
.getType(MI
.getOperand(0).getReg());
1677 auto ShiftCst
= MIB
.buildConstant(ShiftTy
, ShiftVal
);
1678 Observer
.changingInstr(MI
);
1679 MI
.setDesc(MIB
.getTII().get(TargetOpcode::G_SHL
));
1680 MI
.getOperand(2).setReg(ShiftCst
.getReg(0));
1681 Observer
.changedInstr(MI
);
1684 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
1685 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr
&MI
,
1686 RegisterImmPair
&MatchData
) {
1687 assert(MI
.getOpcode() == TargetOpcode::G_SHL
&& KB
);
1689 Register LHS
= MI
.getOperand(1).getReg();
1692 if (!mi_match(LHS
, MRI
, m_GAnyExt(m_Reg(ExtSrc
))) &&
1693 !mi_match(LHS
, MRI
, m_GZExt(m_Reg(ExtSrc
))) &&
1694 !mi_match(LHS
, MRI
, m_GSExt(m_Reg(ExtSrc
))))
1697 Register RHS
= MI
.getOperand(2).getReg();
1698 MachineInstr
*MIShiftAmt
= MRI
.getVRegDef(RHS
);
1699 auto MaybeShiftAmtVal
= isConstantOrConstantSplatVector(*MIShiftAmt
, MRI
);
1700 if (!MaybeShiftAmtVal
)
1704 LLT SrcTy
= MRI
.getType(ExtSrc
);
1706 // We only really care about the legality with the shifted value. We can
1707 // pick any type the constant shift amount, so ask the target what to
1708 // use. Otherwise we would have to guess and hope it is reported as legal.
1709 LLT ShiftAmtTy
= getTargetLowering().getPreferredShiftAmountTy(SrcTy
);
1710 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL
, {SrcTy
, ShiftAmtTy
}}))
1714 int64_t ShiftAmt
= MaybeShiftAmtVal
->getSExtValue();
1715 MatchData
.Reg
= ExtSrc
;
1716 MatchData
.Imm
= ShiftAmt
;
1718 unsigned MinLeadingZeros
= KB
->getKnownZeroes(ExtSrc
).countl_one();
1719 unsigned SrcTySize
= MRI
.getType(ExtSrc
).getScalarSizeInBits();
1720 return MinLeadingZeros
>= ShiftAmt
&& ShiftAmt
< SrcTySize
;
1723 void CombinerHelper::applyCombineShlOfExtend(MachineInstr
&MI
,
1724 const RegisterImmPair
&MatchData
) {
1725 Register ExtSrcReg
= MatchData
.Reg
;
1726 int64_t ShiftAmtVal
= MatchData
.Imm
;
1728 LLT ExtSrcTy
= MRI
.getType(ExtSrcReg
);
1729 Builder
.setInstrAndDebugLoc(MI
);
1730 auto ShiftAmt
= Builder
.buildConstant(ExtSrcTy
, ShiftAmtVal
);
1732 Builder
.buildShl(ExtSrcTy
, ExtSrcReg
, ShiftAmt
, MI
.getFlags());
1733 Builder
.buildZExt(MI
.getOperand(0), NarrowShift
);
1734 MI
.eraseFromParent();
1737 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr
&MI
,
1738 Register
&MatchInfo
) {
1739 GMerge
&Merge
= cast
<GMerge
>(MI
);
1740 SmallVector
<Register
, 16> MergedValues
;
1741 for (unsigned I
= 0; I
< Merge
.getNumSources(); ++I
)
1742 MergedValues
.emplace_back(Merge
.getSourceReg(I
));
1744 auto *Unmerge
= getOpcodeDef
<GUnmerge
>(MergedValues
[0], MRI
);
1745 if (!Unmerge
|| Unmerge
->getNumDefs() != Merge
.getNumSources())
1748 for (unsigned I
= 0; I
< MergedValues
.size(); ++I
)
1749 if (MergedValues
[I
] != Unmerge
->getReg(I
))
1752 MatchInfo
= Unmerge
->getSourceReg();
1756 static Register
peekThroughBitcast(Register Reg
,
1757 const MachineRegisterInfo
&MRI
) {
1758 while (mi_match(Reg
, MRI
, m_GBitcast(m_Reg(Reg
))))
1764 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1765 MachineInstr
&MI
, SmallVectorImpl
<Register
> &Operands
) {
1766 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1767 "Expected an unmerge");
1768 auto &Unmerge
= cast
<GUnmerge
>(MI
);
1769 Register SrcReg
= peekThroughBitcast(Unmerge
.getSourceReg(), MRI
);
1771 auto *SrcInstr
= getOpcodeDef
<GMergeLikeInstr
>(SrcReg
, MRI
);
1775 // Check the source type of the merge.
1776 LLT SrcMergeTy
= MRI
.getType(SrcInstr
->getSourceReg(0));
1777 LLT Dst0Ty
= MRI
.getType(Unmerge
.getReg(0));
1778 bool SameSize
= Dst0Ty
.getSizeInBits() == SrcMergeTy
.getSizeInBits();
1779 if (SrcMergeTy
!= Dst0Ty
&& !SameSize
)
1781 // They are the same now (modulo a bitcast).
1782 // We can collect all the src registers.
1783 for (unsigned Idx
= 0; Idx
< SrcInstr
->getNumSources(); ++Idx
)
1784 Operands
.push_back(SrcInstr
->getSourceReg(Idx
));
1788 void CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1789 MachineInstr
&MI
, SmallVectorImpl
<Register
> &Operands
) {
1790 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1791 "Expected an unmerge");
1792 assert((MI
.getNumOperands() - 1 == Operands
.size()) &&
1793 "Not enough operands to replace all defs");
1794 unsigned NumElems
= MI
.getNumOperands() - 1;
1796 LLT SrcTy
= MRI
.getType(Operands
[0]);
1797 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
1798 bool CanReuseInputDirectly
= DstTy
== SrcTy
;
1799 Builder
.setInstrAndDebugLoc(MI
);
1800 for (unsigned Idx
= 0; Idx
< NumElems
; ++Idx
) {
1801 Register DstReg
= MI
.getOperand(Idx
).getReg();
1802 Register SrcReg
= Operands
[Idx
];
1804 // This combine may run after RegBankSelect, so we need to be aware of
1806 const auto &DstCB
= MRI
.getRegClassOrRegBank(DstReg
);
1807 if (!DstCB
.isNull() && DstCB
!= MRI
.getRegClassOrRegBank(SrcReg
)) {
1808 SrcReg
= Builder
.buildCopy(MRI
.getType(SrcReg
), SrcReg
).getReg(0);
1809 MRI
.setRegClassOrRegBank(SrcReg
, DstCB
);
1812 if (CanReuseInputDirectly
)
1813 replaceRegWith(MRI
, DstReg
, SrcReg
);
1815 Builder
.buildCast(DstReg
, SrcReg
);
1817 MI
.eraseFromParent();
1820 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr
&MI
,
1821 SmallVectorImpl
<APInt
> &Csts
) {
1822 unsigned SrcIdx
= MI
.getNumOperands() - 1;
1823 Register SrcReg
= MI
.getOperand(SrcIdx
).getReg();
1824 MachineInstr
*SrcInstr
= MRI
.getVRegDef(SrcReg
);
1825 if (SrcInstr
->getOpcode() != TargetOpcode::G_CONSTANT
&&
1826 SrcInstr
->getOpcode() != TargetOpcode::G_FCONSTANT
)
1828 // Break down the big constant in smaller ones.
1829 const MachineOperand
&CstVal
= SrcInstr
->getOperand(1);
1830 APInt Val
= SrcInstr
->getOpcode() == TargetOpcode::G_CONSTANT
1831 ? CstVal
.getCImm()->getValue()
1832 : CstVal
.getFPImm()->getValueAPF().bitcastToAPInt();
1834 LLT Dst0Ty
= MRI
.getType(MI
.getOperand(0).getReg());
1835 unsigned ShiftAmt
= Dst0Ty
.getSizeInBits();
1836 // Unmerge a constant.
1837 for (unsigned Idx
= 0; Idx
!= SrcIdx
; ++Idx
) {
1838 Csts
.emplace_back(Val
.trunc(ShiftAmt
));
1839 Val
= Val
.lshr(ShiftAmt
);
1845 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr
&MI
,
1846 SmallVectorImpl
<APInt
> &Csts
) {
1847 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1848 "Expected an unmerge");
1849 assert((MI
.getNumOperands() - 1 == Csts
.size()) &&
1850 "Not enough operands to replace all defs");
1851 unsigned NumElems
= MI
.getNumOperands() - 1;
1852 Builder
.setInstrAndDebugLoc(MI
);
1853 for (unsigned Idx
= 0; Idx
< NumElems
; ++Idx
) {
1854 Register DstReg
= MI
.getOperand(Idx
).getReg();
1855 Builder
.buildConstant(DstReg
, Csts
[Idx
]);
1858 MI
.eraseFromParent();
1861 bool CombinerHelper::matchCombineUnmergeUndef(
1862 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
1863 unsigned SrcIdx
= MI
.getNumOperands() - 1;
1864 Register SrcReg
= MI
.getOperand(SrcIdx
).getReg();
1865 MatchInfo
= [&MI
](MachineIRBuilder
&B
) {
1866 unsigned NumElems
= MI
.getNumOperands() - 1;
1867 for (unsigned Idx
= 0; Idx
< NumElems
; ++Idx
) {
1868 Register DstReg
= MI
.getOperand(Idx
).getReg();
1869 B
.buildUndef(DstReg
);
1872 return isa
<GImplicitDef
>(MRI
.getVRegDef(SrcReg
));
1875 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr
&MI
) {
1876 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1877 "Expected an unmerge");
1878 // Check that all the lanes are dead except the first one.
1879 for (unsigned Idx
= 1, EndIdx
= MI
.getNumDefs(); Idx
!= EndIdx
; ++Idx
) {
1880 if (!MRI
.use_nodbg_empty(MI
.getOperand(Idx
).getReg()))
1886 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr
&MI
) {
1887 Builder
.setInstrAndDebugLoc(MI
);
1888 Register SrcReg
= MI
.getOperand(MI
.getNumDefs()).getReg();
1889 // Truncating a vector is going to truncate every single lane,
1890 // whereas we want the full lowbits.
1891 // Do the operation on a scalar instead.
1892 LLT SrcTy
= MRI
.getType(SrcReg
);
1893 if (SrcTy
.isVector())
1895 Builder
.buildCast(LLT::scalar(SrcTy
.getSizeInBits()), SrcReg
).getReg(0);
1897 Register Dst0Reg
= MI
.getOperand(0).getReg();
1898 LLT Dst0Ty
= MRI
.getType(Dst0Reg
);
1899 if (Dst0Ty
.isVector()) {
1900 auto MIB
= Builder
.buildTrunc(LLT::scalar(Dst0Ty
.getSizeInBits()), SrcReg
);
1901 Builder
.buildCast(Dst0Reg
, MIB
);
1903 Builder
.buildTrunc(Dst0Reg
, SrcReg
);
1904 MI
.eraseFromParent();
1907 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr
&MI
) {
1908 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1909 "Expected an unmerge");
1910 Register Dst0Reg
= MI
.getOperand(0).getReg();
1911 LLT Dst0Ty
= MRI
.getType(Dst0Reg
);
1912 // G_ZEXT on vector applies to each lane, so it will
1913 // affect all destinations. Therefore we won't be able
1914 // to simplify the unmerge to just the first definition.
1915 if (Dst0Ty
.isVector())
1917 Register SrcReg
= MI
.getOperand(MI
.getNumDefs()).getReg();
1918 LLT SrcTy
= MRI
.getType(SrcReg
);
1919 if (SrcTy
.isVector())
1922 Register ZExtSrcReg
;
1923 if (!mi_match(SrcReg
, MRI
, m_GZExt(m_Reg(ZExtSrcReg
))))
1926 // Finally we can replace the first definition with
1927 // a zext of the source if the definition is big enough to hold
1928 // all of ZExtSrc bits.
1929 LLT ZExtSrcTy
= MRI
.getType(ZExtSrcReg
);
1930 return ZExtSrcTy
.getSizeInBits() <= Dst0Ty
.getSizeInBits();
1933 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr
&MI
) {
1934 assert(MI
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
&&
1935 "Expected an unmerge");
1937 Register Dst0Reg
= MI
.getOperand(0).getReg();
1939 MachineInstr
*ZExtInstr
=
1940 MRI
.getVRegDef(MI
.getOperand(MI
.getNumDefs()).getReg());
1941 assert(ZExtInstr
&& ZExtInstr
->getOpcode() == TargetOpcode::G_ZEXT
&&
1942 "Expecting a G_ZEXT");
1944 Register ZExtSrcReg
= ZExtInstr
->getOperand(1).getReg();
1945 LLT Dst0Ty
= MRI
.getType(Dst0Reg
);
1946 LLT ZExtSrcTy
= MRI
.getType(ZExtSrcReg
);
1948 Builder
.setInstrAndDebugLoc(MI
);
1950 if (Dst0Ty
.getSizeInBits() > ZExtSrcTy
.getSizeInBits()) {
1951 Builder
.buildZExt(Dst0Reg
, ZExtSrcReg
);
1953 assert(Dst0Ty
.getSizeInBits() == ZExtSrcTy
.getSizeInBits() &&
1954 "ZExt src doesn't fit in destination");
1955 replaceRegWith(MRI
, Dst0Reg
, ZExtSrcReg
);
1959 for (unsigned Idx
= 1, EndIdx
= MI
.getNumDefs(); Idx
!= EndIdx
; ++Idx
) {
1961 ZeroReg
= Builder
.buildConstant(Dst0Ty
, 0).getReg(0);
1962 replaceRegWith(MRI
, MI
.getOperand(Idx
).getReg(), ZeroReg
);
1964 MI
.eraseFromParent();
1967 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr
&MI
,
1968 unsigned TargetShiftSize
,
1969 unsigned &ShiftVal
) {
1970 assert((MI
.getOpcode() == TargetOpcode::G_SHL
||
1971 MI
.getOpcode() == TargetOpcode::G_LSHR
||
1972 MI
.getOpcode() == TargetOpcode::G_ASHR
) && "Expected a shift");
1974 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
1975 if (Ty
.isVector()) // TODO:
1978 // Don't narrow further than the requested size.
1979 unsigned Size
= Ty
.getSizeInBits();
1980 if (Size
<= TargetShiftSize
)
1984 getIConstantVRegValWithLookThrough(MI
.getOperand(2).getReg(), MRI
);
1988 ShiftVal
= MaybeImmVal
->Value
.getSExtValue();
1989 return ShiftVal
>= Size
/ 2 && ShiftVal
< Size
;
1992 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr
&MI
,
1993 const unsigned &ShiftVal
) {
1994 Register DstReg
= MI
.getOperand(0).getReg();
1995 Register SrcReg
= MI
.getOperand(1).getReg();
1996 LLT Ty
= MRI
.getType(SrcReg
);
1997 unsigned Size
= Ty
.getSizeInBits();
1998 unsigned HalfSize
= Size
/ 2;
1999 assert(ShiftVal
>= HalfSize
);
2001 LLT HalfTy
= LLT::scalar(HalfSize
);
2003 Builder
.setInstr(MI
);
2004 auto Unmerge
= Builder
.buildUnmerge(HalfTy
, SrcReg
);
2005 unsigned NarrowShiftAmt
= ShiftVal
- HalfSize
;
2007 if (MI
.getOpcode() == TargetOpcode::G_LSHR
) {
2008 Register Narrowed
= Unmerge
.getReg(1);
2010 // dst = G_LSHR s64:x, C for C >= 32
2012 // lo, hi = G_UNMERGE_VALUES x
2013 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2015 if (NarrowShiftAmt
!= 0) {
2016 Narrowed
= Builder
.buildLShr(HalfTy
, Narrowed
,
2017 Builder
.buildConstant(HalfTy
, NarrowShiftAmt
)).getReg(0);
2020 auto Zero
= Builder
.buildConstant(HalfTy
, 0);
2021 Builder
.buildMergeLikeInstr(DstReg
, {Narrowed
, Zero
});
2022 } else if (MI
.getOpcode() == TargetOpcode::G_SHL
) {
2023 Register Narrowed
= Unmerge
.getReg(0);
2024 // dst = G_SHL s64:x, C for C >= 32
2026 // lo, hi = G_UNMERGE_VALUES x
2027 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2028 if (NarrowShiftAmt
!= 0) {
2029 Narrowed
= Builder
.buildShl(HalfTy
, Narrowed
,
2030 Builder
.buildConstant(HalfTy
, NarrowShiftAmt
)).getReg(0);
2033 auto Zero
= Builder
.buildConstant(HalfTy
, 0);
2034 Builder
.buildMergeLikeInstr(DstReg
, {Zero
, Narrowed
});
2036 assert(MI
.getOpcode() == TargetOpcode::G_ASHR
);
2037 auto Hi
= Builder
.buildAShr(
2038 HalfTy
, Unmerge
.getReg(1),
2039 Builder
.buildConstant(HalfTy
, HalfSize
- 1));
2041 if (ShiftVal
== HalfSize
) {
2042 // (G_ASHR i64:x, 32) ->
2043 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2044 Builder
.buildMergeLikeInstr(DstReg
, {Unmerge
.getReg(1), Hi
});
2045 } else if (ShiftVal
== Size
- 1) {
2046 // Don't need a second shift.
2047 // (G_ASHR i64:x, 63) ->
2048 // %narrowed = (G_ASHR hi_32(x), 31)
2049 // G_MERGE_VALUES %narrowed, %narrowed
2050 Builder
.buildMergeLikeInstr(DstReg
, {Hi
, Hi
});
2052 auto Lo
= Builder
.buildAShr(
2053 HalfTy
, Unmerge
.getReg(1),
2054 Builder
.buildConstant(HalfTy
, ShiftVal
- HalfSize
));
2056 // (G_ASHR i64:x, C) ->, for C >= 32
2057 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2058 Builder
.buildMergeLikeInstr(DstReg
, {Lo
, Hi
});
2062 MI
.eraseFromParent();
2065 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr
&MI
,
2066 unsigned TargetShiftAmount
) {
2068 if (matchCombineShiftToUnmerge(MI
, TargetShiftAmount
, ShiftAmt
)) {
2069 applyCombineShiftToUnmerge(MI
, ShiftAmt
);
2076 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr
&MI
, Register
&Reg
) {
2077 assert(MI
.getOpcode() == TargetOpcode::G_INTTOPTR
&& "Expected a G_INTTOPTR");
2078 Register DstReg
= MI
.getOperand(0).getReg();
2079 LLT DstTy
= MRI
.getType(DstReg
);
2080 Register SrcReg
= MI
.getOperand(1).getReg();
2081 return mi_match(SrcReg
, MRI
,
2082 m_GPtrToInt(m_all_of(m_SpecificType(DstTy
), m_Reg(Reg
))));
2085 void CombinerHelper::applyCombineI2PToP2I(MachineInstr
&MI
, Register
&Reg
) {
2086 assert(MI
.getOpcode() == TargetOpcode::G_INTTOPTR
&& "Expected a G_INTTOPTR");
2087 Register DstReg
= MI
.getOperand(0).getReg();
2088 Builder
.setInstr(MI
);
2089 Builder
.buildCopy(DstReg
, Reg
);
2090 MI
.eraseFromParent();
2093 void CombinerHelper::applyCombineP2IToI2P(MachineInstr
&MI
, Register
&Reg
) {
2094 assert(MI
.getOpcode() == TargetOpcode::G_PTRTOINT
&& "Expected a G_PTRTOINT");
2095 Register DstReg
= MI
.getOperand(0).getReg();
2096 Builder
.setInstr(MI
);
2097 Builder
.buildZExtOrTrunc(DstReg
, Reg
);
2098 MI
.eraseFromParent();
2101 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2102 MachineInstr
&MI
, std::pair
<Register
, bool> &PtrReg
) {
2103 assert(MI
.getOpcode() == TargetOpcode::G_ADD
);
2104 Register LHS
= MI
.getOperand(1).getReg();
2105 Register RHS
= MI
.getOperand(2).getReg();
2106 LLT IntTy
= MRI
.getType(LHS
);
2108 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2110 PtrReg
.second
= false;
2111 for (Register SrcReg
: {LHS
, RHS
}) {
2112 if (mi_match(SrcReg
, MRI
, m_GPtrToInt(m_Reg(PtrReg
.first
)))) {
2113 // Don't handle cases where the integer is implicitly converted to the
2115 LLT PtrTy
= MRI
.getType(PtrReg
.first
);
2116 if (PtrTy
.getScalarSizeInBits() == IntTy
.getScalarSizeInBits())
2120 PtrReg
.second
= true;
2126 void CombinerHelper::applyCombineAddP2IToPtrAdd(
2127 MachineInstr
&MI
, std::pair
<Register
, bool> &PtrReg
) {
2128 Register Dst
= MI
.getOperand(0).getReg();
2129 Register LHS
= MI
.getOperand(1).getReg();
2130 Register RHS
= MI
.getOperand(2).getReg();
2132 const bool DoCommute
= PtrReg
.second
;
2134 std::swap(LHS
, RHS
);
2137 LLT PtrTy
= MRI
.getType(LHS
);
2139 Builder
.setInstrAndDebugLoc(MI
);
2140 auto PtrAdd
= Builder
.buildPtrAdd(PtrTy
, LHS
, RHS
);
2141 Builder
.buildPtrToInt(Dst
, PtrAdd
);
2142 MI
.eraseFromParent();
2145 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr
&MI
,
2147 auto &PtrAdd
= cast
<GPtrAdd
>(MI
);
2148 Register LHS
= PtrAdd
.getBaseReg();
2149 Register RHS
= PtrAdd
.getOffsetReg();
2150 MachineRegisterInfo
&MRI
= Builder
.getMF().getRegInfo();
2152 if (auto RHSCst
= getIConstantVRegVal(RHS
, MRI
)) {
2154 if (mi_match(LHS
, MRI
, m_GIntToPtr(m_ICst(Cst
)))) {
2155 auto DstTy
= MRI
.getType(PtrAdd
.getReg(0));
2156 // G_INTTOPTR uses zero-extension
2157 NewCst
= Cst
.zextOrTrunc(DstTy
.getSizeInBits());
2158 NewCst
+= RHSCst
->sextOrTrunc(DstTy
.getSizeInBits());
2166 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr
&MI
,
2168 auto &PtrAdd
= cast
<GPtrAdd
>(MI
);
2169 Register Dst
= PtrAdd
.getReg(0);
2171 Builder
.setInstrAndDebugLoc(MI
);
2172 Builder
.buildConstant(Dst
, NewCst
);
2173 PtrAdd
.eraseFromParent();
2176 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr
&MI
, Register
&Reg
) {
2177 assert(MI
.getOpcode() == TargetOpcode::G_ANYEXT
&& "Expected a G_ANYEXT");
2178 Register DstReg
= MI
.getOperand(0).getReg();
2179 Register SrcReg
= MI
.getOperand(1).getReg();
2180 LLT DstTy
= MRI
.getType(DstReg
);
2181 return mi_match(SrcReg
, MRI
,
2182 m_GTrunc(m_all_of(m_Reg(Reg
), m_SpecificType(DstTy
))));
2185 bool CombinerHelper::matchCombineZextTrunc(MachineInstr
&MI
, Register
&Reg
) {
2186 assert(MI
.getOpcode() == TargetOpcode::G_ZEXT
&& "Expected a G_ZEXT");
2187 Register DstReg
= MI
.getOperand(0).getReg();
2188 Register SrcReg
= MI
.getOperand(1).getReg();
2189 LLT DstTy
= MRI
.getType(DstReg
);
2190 if (mi_match(SrcReg
, MRI
,
2191 m_GTrunc(m_all_of(m_Reg(Reg
), m_SpecificType(DstTy
))))) {
2192 unsigned DstSize
= DstTy
.getScalarSizeInBits();
2193 unsigned SrcSize
= MRI
.getType(SrcReg
).getScalarSizeInBits();
2194 return KB
->getKnownBits(Reg
).countMinLeadingZeros() >= DstSize
- SrcSize
;
2199 bool CombinerHelper::matchCombineExtOfExt(
2200 MachineInstr
&MI
, std::tuple
<Register
, unsigned> &MatchInfo
) {
2201 assert((MI
.getOpcode() == TargetOpcode::G_ANYEXT
||
2202 MI
.getOpcode() == TargetOpcode::G_SEXT
||
2203 MI
.getOpcode() == TargetOpcode::G_ZEXT
) &&
2204 "Expected a G_[ASZ]EXT");
2205 Register SrcReg
= MI
.getOperand(1).getReg();
2206 MachineInstr
*SrcMI
= MRI
.getVRegDef(SrcReg
);
2207 // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2208 unsigned Opc
= MI
.getOpcode();
2209 unsigned SrcOpc
= SrcMI
->getOpcode();
2210 if (Opc
== SrcOpc
||
2211 (Opc
== TargetOpcode::G_ANYEXT
&&
2212 (SrcOpc
== TargetOpcode::G_SEXT
|| SrcOpc
== TargetOpcode::G_ZEXT
)) ||
2213 (Opc
== TargetOpcode::G_SEXT
&& SrcOpc
== TargetOpcode::G_ZEXT
)) {
2214 MatchInfo
= std::make_tuple(SrcMI
->getOperand(1).getReg(), SrcOpc
);
2220 void CombinerHelper::applyCombineExtOfExt(
2221 MachineInstr
&MI
, std::tuple
<Register
, unsigned> &MatchInfo
) {
2222 assert((MI
.getOpcode() == TargetOpcode::G_ANYEXT
||
2223 MI
.getOpcode() == TargetOpcode::G_SEXT
||
2224 MI
.getOpcode() == TargetOpcode::G_ZEXT
) &&
2225 "Expected a G_[ASZ]EXT");
2227 Register Reg
= std::get
<0>(MatchInfo
);
2228 unsigned SrcExtOp
= std::get
<1>(MatchInfo
);
2230 // Combine exts with the same opcode.
2231 if (MI
.getOpcode() == SrcExtOp
) {
2232 Observer
.changingInstr(MI
);
2233 MI
.getOperand(1).setReg(Reg
);
2234 Observer
.changedInstr(MI
);
2239 // - anyext([sz]ext x) to [sz]ext x
2240 // - sext(zext x) to zext x
2241 if (MI
.getOpcode() == TargetOpcode::G_ANYEXT
||
2242 (MI
.getOpcode() == TargetOpcode::G_SEXT
&&
2243 SrcExtOp
== TargetOpcode::G_ZEXT
)) {
2244 Register DstReg
= MI
.getOperand(0).getReg();
2245 Builder
.setInstrAndDebugLoc(MI
);
2246 Builder
.buildInstr(SrcExtOp
, {DstReg
}, {Reg
});
2247 MI
.eraseFromParent();
2251 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr
&MI
) {
2252 assert(MI
.getOpcode() == TargetOpcode::G_MUL
&& "Expected a G_MUL");
2253 Register DstReg
= MI
.getOperand(0).getReg();
2254 Register SrcReg
= MI
.getOperand(1).getReg();
2255 LLT DstTy
= MRI
.getType(DstReg
);
2257 Builder
.setInstrAndDebugLoc(MI
);
2258 Builder
.buildSub(DstReg
, Builder
.buildConstant(DstTy
, 0), SrcReg
,
2260 MI
.eraseFromParent();
2263 bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr
&MI
,
2264 BuildFnTy
&MatchInfo
) {
2265 assert(MI
.getOpcode() == TargetOpcode::G_FABS
&& "Expected a G_FABS");
2266 Register Src
= MI
.getOperand(1).getReg();
2269 if (!mi_match(Src
, MRI
, m_GFNeg(m_Reg(NegSrc
))))
2272 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
2273 Observer
.changingInstr(MI
);
2274 MI
.getOperand(1).setReg(NegSrc
);
2275 Observer
.changedInstr(MI
);
2280 bool CombinerHelper::matchCombineTruncOfExt(
2281 MachineInstr
&MI
, std::pair
<Register
, unsigned> &MatchInfo
) {
2282 assert(MI
.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC");
2283 Register SrcReg
= MI
.getOperand(1).getReg();
2284 MachineInstr
*SrcMI
= MRI
.getVRegDef(SrcReg
);
2285 unsigned SrcOpc
= SrcMI
->getOpcode();
2286 if (SrcOpc
== TargetOpcode::G_ANYEXT
|| SrcOpc
== TargetOpcode::G_SEXT
||
2287 SrcOpc
== TargetOpcode::G_ZEXT
) {
2288 MatchInfo
= std::make_pair(SrcMI
->getOperand(1).getReg(), SrcOpc
);
2294 void CombinerHelper::applyCombineTruncOfExt(
2295 MachineInstr
&MI
, std::pair
<Register
, unsigned> &MatchInfo
) {
2296 assert(MI
.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC");
2297 Register SrcReg
= MatchInfo
.first
;
2298 unsigned SrcExtOp
= MatchInfo
.second
;
2299 Register DstReg
= MI
.getOperand(0).getReg();
2300 LLT SrcTy
= MRI
.getType(SrcReg
);
2301 LLT DstTy
= MRI
.getType(DstReg
);
2302 if (SrcTy
== DstTy
) {
2303 MI
.eraseFromParent();
2304 replaceRegWith(MRI
, DstReg
, SrcReg
);
2307 Builder
.setInstrAndDebugLoc(MI
);
2308 if (SrcTy
.getSizeInBits() < DstTy
.getSizeInBits())
2309 Builder
.buildInstr(SrcExtOp
, {DstReg
}, {SrcReg
});
2311 Builder
.buildTrunc(DstReg
, SrcReg
);
2312 MI
.eraseFromParent();
2315 static LLT
getMidVTForTruncRightShiftCombine(LLT ShiftTy
, LLT TruncTy
) {
2316 const unsigned ShiftSize
= ShiftTy
.getScalarSizeInBits();
2317 const unsigned TruncSize
= TruncTy
.getScalarSizeInBits();
2319 // ShiftTy > 32 > TruncTy -> 32
2320 if (ShiftSize
> 32 && TruncSize
< 32)
2321 return ShiftTy
.changeElementSize(32);
2323 // TODO: We could also reduce to 16 bits, but that's more target-dependent.
2324 // Some targets like it, some don't, some only like it under certain
2325 // conditions/processor versions, etc.
2326 // A TL hook might be needed for this.
2332 bool CombinerHelper::matchCombineTruncOfShift(
2333 MachineInstr
&MI
, std::pair
<MachineInstr
*, LLT
> &MatchInfo
) {
2334 assert(MI
.getOpcode() == TargetOpcode::G_TRUNC
&& "Expected a G_TRUNC");
2335 Register DstReg
= MI
.getOperand(0).getReg();
2336 Register SrcReg
= MI
.getOperand(1).getReg();
2338 if (!MRI
.hasOneNonDBGUse(SrcReg
))
2341 LLT SrcTy
= MRI
.getType(SrcReg
);
2342 LLT DstTy
= MRI
.getType(DstReg
);
2344 MachineInstr
*SrcMI
= getDefIgnoringCopies(SrcReg
, MRI
);
2345 const auto &TL
= getTargetLowering();
2348 switch (SrcMI
->getOpcode()) {
2351 case TargetOpcode::G_SHL
: {
2354 // Make sure new shift amount is legal.
2355 KnownBits Known
= KB
->getKnownBits(SrcMI
->getOperand(2).getReg());
2356 if (Known
.getMaxValue().uge(NewShiftTy
.getScalarSizeInBits()))
2360 case TargetOpcode::G_LSHR
:
2361 case TargetOpcode::G_ASHR
: {
2362 // For right shifts, we conservatively do not do the transform if the TRUNC
2363 // has any STORE users. The reason is that if we change the type of the
2364 // shift, we may break the truncstore combine.
2366 // TODO: Fix truncstore combine to handle (trunc(lshr (trunc x), k)).
2367 for (auto &User
: MRI
.use_instructions(DstReg
))
2368 if (User
.getOpcode() == TargetOpcode::G_STORE
)
2371 NewShiftTy
= getMidVTForTruncRightShiftCombine(SrcTy
, DstTy
);
2372 if (NewShiftTy
== SrcTy
)
2375 // Make sure we won't lose information by truncating the high bits.
2376 KnownBits Known
= KB
->getKnownBits(SrcMI
->getOperand(2).getReg());
2377 if (Known
.getMaxValue().ugt(NewShiftTy
.getScalarSizeInBits() -
2378 DstTy
.getScalarSizeInBits()))
2384 if (!isLegalOrBeforeLegalizer(
2385 {SrcMI
->getOpcode(),
2386 {NewShiftTy
, TL
.getPreferredShiftAmountTy(NewShiftTy
)}}))
2389 MatchInfo
= std::make_pair(SrcMI
, NewShiftTy
);
2393 void CombinerHelper::applyCombineTruncOfShift(
2394 MachineInstr
&MI
, std::pair
<MachineInstr
*, LLT
> &MatchInfo
) {
2395 Builder
.setInstrAndDebugLoc(MI
);
2397 MachineInstr
*ShiftMI
= MatchInfo
.first
;
2398 LLT NewShiftTy
= MatchInfo
.second
;
2400 Register Dst
= MI
.getOperand(0).getReg();
2401 LLT DstTy
= MRI
.getType(Dst
);
2403 Register ShiftAmt
= ShiftMI
->getOperand(2).getReg();
2404 Register ShiftSrc
= ShiftMI
->getOperand(1).getReg();
2405 ShiftSrc
= Builder
.buildTrunc(NewShiftTy
, ShiftSrc
).getReg(0);
2409 .buildInstr(ShiftMI
->getOpcode(), {NewShiftTy
}, {ShiftSrc
, ShiftAmt
})
2412 if (NewShiftTy
== DstTy
)
2413 replaceRegWith(MRI
, Dst
, NewShift
);
2415 Builder
.buildTrunc(Dst
, NewShift
);
2420 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr
&MI
) {
2421 return any_of(MI
.explicit_uses(), [this](const MachineOperand
&MO
) {
2422 return MO
.isReg() &&
2423 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF
, MO
.getReg(), MRI
);
2427 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr
&MI
) {
2428 return all_of(MI
.explicit_uses(), [this](const MachineOperand
&MO
) {
2429 return !MO
.isReg() ||
2430 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF
, MO
.getReg(), MRI
);
2434 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr
&MI
) {
2435 assert(MI
.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
);
2436 ArrayRef
<int> Mask
= MI
.getOperand(3).getShuffleMask();
2437 return all_of(Mask
, [](int Elt
) { return Elt
< 0; });
2440 bool CombinerHelper::matchUndefStore(MachineInstr
&MI
) {
2441 assert(MI
.getOpcode() == TargetOpcode::G_STORE
);
2442 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF
, MI
.getOperand(0).getReg(),
2446 bool CombinerHelper::matchUndefSelectCmp(MachineInstr
&MI
) {
2447 assert(MI
.getOpcode() == TargetOpcode::G_SELECT
);
2448 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF
, MI
.getOperand(1).getReg(),
2452 bool CombinerHelper::matchInsertExtractVecEltOutOfBounds(MachineInstr
&MI
) {
2453 assert((MI
.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
||
2454 MI
.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT
) &&
2455 "Expected an insert/extract element op");
2456 LLT VecTy
= MRI
.getType(MI
.getOperand(1).getReg());
2458 MI
.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT
? 2 : 3;
2459 auto Idx
= getIConstantVRegVal(MI
.getOperand(IdxIdx
).getReg(), MRI
);
2462 return Idx
->getZExtValue() >= VecTy
.getNumElements();
2465 bool CombinerHelper::matchConstantSelectCmp(MachineInstr
&MI
, unsigned &OpIdx
) {
2466 GSelect
&SelMI
= cast
<GSelect
>(MI
);
2468 isConstantOrConstantSplatVector(*MRI
.getVRegDef(SelMI
.getCondReg()), MRI
);
2471 OpIdx
= Cst
->isZero() ? 3 : 2;
2475 void CombinerHelper::eraseInst(MachineInstr
&MI
) { MI
.eraseFromParent(); }
2477 bool CombinerHelper::matchEqualDefs(const MachineOperand
&MOP1
,
2478 const MachineOperand
&MOP2
) {
2479 if (!MOP1
.isReg() || !MOP2
.isReg())
2481 auto InstAndDef1
= getDefSrcRegIgnoringCopies(MOP1
.getReg(), MRI
);
2484 auto InstAndDef2
= getDefSrcRegIgnoringCopies(MOP2
.getReg(), MRI
);
2487 MachineInstr
*I1
= InstAndDef1
->MI
;
2488 MachineInstr
*I2
= InstAndDef2
->MI
;
2490 // Handle a case like this:
2492 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2494 // Even though %0 and %1 are produced by the same instruction they are not
2497 return MOP1
.getReg() == MOP2
.getReg();
2499 // If we have an instruction which loads or stores, we can't guarantee that
2502 // For example, we may have
2504 // %x1 = G_LOAD %addr (load N from @somewhere)
2508 // %x2 = G_LOAD %addr (load N from @somewhere)
2510 // %or = G_OR %x1, %x2
2512 // It's possible that @foo will modify whatever lives at the address we're
2513 // loading from. To be safe, let's just assume that all loads and stores
2514 // are different (unless we have something which is guaranteed to not
2516 if (I1
->mayLoadOrStore() && !I1
->isDereferenceableInvariantLoad())
2519 // If both instructions are loads or stores, they are equal only if both
2520 // are dereferenceable invariant loads with the same number of bits.
2521 if (I1
->mayLoadOrStore() && I2
->mayLoadOrStore()) {
2522 GLoadStore
*LS1
= dyn_cast
<GLoadStore
>(I1
);
2523 GLoadStore
*LS2
= dyn_cast
<GLoadStore
>(I2
);
2527 if (!I2
->isDereferenceableInvariantLoad() ||
2528 (LS1
->getMemSizeInBits() != LS2
->getMemSizeInBits()))
2532 // Check for physical registers on the instructions first to avoid cases
2535 // %a = COPY $physreg
2537 // SOMETHING implicit-def $physreg
2539 // %b = COPY $physreg
2541 // These copies are not equivalent.
2542 if (any_of(I1
->uses(), [](const MachineOperand
&MO
) {
2543 return MO
.isReg() && MO
.getReg().isPhysical();
2545 // Check if we have a case like this:
2547 // %a = COPY $physreg
2550 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2551 // From that, we know that they must have the same value, since they must
2552 // have come from the same COPY.
2553 return I1
->isIdenticalTo(*I2
);
2556 // We don't have any physical registers, so we don't necessarily need the
2559 // On the off-chance that there's some target instruction feeding into the
2560 // instruction, let's use produceSameValue instead of isIdenticalTo.
2561 if (Builder
.getTII().produceSameValue(*I1
, *I2
, &MRI
)) {
2562 // Handle instructions with multiple defs that produce same values. Values
2563 // are same for operands with same index.
2564 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2565 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>)
2566 // I1 and I2 are different instructions but produce same values,
2567 // %1 and %6 are same, %1 and %7 are not the same value.
2568 return I1
->findRegisterDefOperandIdx(InstAndDef1
->Reg
) ==
2569 I2
->findRegisterDefOperandIdx(InstAndDef2
->Reg
);
2574 bool CombinerHelper::matchConstantOp(const MachineOperand
&MOP
, int64_t C
) {
2577 auto *MI
= MRI
.getVRegDef(MOP
.getReg());
2578 auto MaybeCst
= isConstantOrConstantSplatVector(*MI
, MRI
);
2579 return MaybeCst
&& MaybeCst
->getBitWidth() <= 64 &&
2580 MaybeCst
->getSExtValue() == C
;
2583 void CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr
&MI
,
2585 assert(MI
.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2586 Register OldReg
= MI
.getOperand(0).getReg();
2587 Register Replacement
= MI
.getOperand(OpIdx
).getReg();
2588 assert(canReplaceReg(OldReg
, Replacement
, MRI
) && "Cannot replace register?");
2589 MI
.eraseFromParent();
2590 replaceRegWith(MRI
, OldReg
, Replacement
);
2593 void CombinerHelper::replaceSingleDefInstWithReg(MachineInstr
&MI
,
2594 Register Replacement
) {
2595 assert(MI
.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2596 Register OldReg
= MI
.getOperand(0).getReg();
2597 assert(canReplaceReg(OldReg
, Replacement
, MRI
) && "Cannot replace register?");
2598 MI
.eraseFromParent();
2599 replaceRegWith(MRI
, OldReg
, Replacement
);
2602 bool CombinerHelper::matchSelectSameVal(MachineInstr
&MI
) {
2603 assert(MI
.getOpcode() == TargetOpcode::G_SELECT
);
2604 // Match (cond ? x : x)
2605 return matchEqualDefs(MI
.getOperand(2), MI
.getOperand(3)) &&
2606 canReplaceReg(MI
.getOperand(0).getReg(), MI
.getOperand(2).getReg(),
2610 bool CombinerHelper::matchBinOpSameVal(MachineInstr
&MI
) {
2611 return matchEqualDefs(MI
.getOperand(1), MI
.getOperand(2)) &&
2612 canReplaceReg(MI
.getOperand(0).getReg(), MI
.getOperand(1).getReg(),
2616 bool CombinerHelper::matchOperandIsZero(MachineInstr
&MI
, unsigned OpIdx
) {
2617 return matchConstantOp(MI
.getOperand(OpIdx
), 0) &&
2618 canReplaceReg(MI
.getOperand(0).getReg(), MI
.getOperand(OpIdx
).getReg(),
2622 bool CombinerHelper::matchOperandIsUndef(MachineInstr
&MI
, unsigned OpIdx
) {
2623 MachineOperand
&MO
= MI
.getOperand(OpIdx
);
2624 return MO
.isReg() &&
2625 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF
, MO
.getReg(), MRI
);
2628 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr
&MI
,
2630 MachineOperand
&MO
= MI
.getOperand(OpIdx
);
2631 return isKnownToBeAPowerOfTwo(MO
.getReg(), MRI
, KB
);
2634 void CombinerHelper::replaceInstWithFConstant(MachineInstr
&MI
, double C
) {
2635 assert(MI
.getNumDefs() == 1 && "Expected only one def?");
2636 Builder
.setInstr(MI
);
2637 Builder
.buildFConstant(MI
.getOperand(0), C
);
2638 MI
.eraseFromParent();
2641 void CombinerHelper::replaceInstWithConstant(MachineInstr
&MI
, int64_t C
) {
2642 assert(MI
.getNumDefs() == 1 && "Expected only one def?");
2643 Builder
.setInstr(MI
);
2644 Builder
.buildConstant(MI
.getOperand(0), C
);
2645 MI
.eraseFromParent();
2648 void CombinerHelper::replaceInstWithConstant(MachineInstr
&MI
, APInt C
) {
2649 assert(MI
.getNumDefs() == 1 && "Expected only one def?");
2650 Builder
.setInstr(MI
);
2651 Builder
.buildConstant(MI
.getOperand(0), C
);
2652 MI
.eraseFromParent();
2655 void CombinerHelper::replaceInstWithUndef(MachineInstr
&MI
) {
2656 assert(MI
.getNumDefs() == 1 && "Expected only one def?");
2657 Builder
.setInstr(MI
);
2658 Builder
.buildUndef(MI
.getOperand(0));
2659 MI
.eraseFromParent();
2662 bool CombinerHelper::matchSimplifyAddToSub(
2663 MachineInstr
&MI
, std::tuple
<Register
, Register
> &MatchInfo
) {
2664 Register LHS
= MI
.getOperand(1).getReg();
2665 Register RHS
= MI
.getOperand(2).getReg();
2666 Register
&NewLHS
= std::get
<0>(MatchInfo
);
2667 Register
&NewRHS
= std::get
<1>(MatchInfo
);
2669 // Helper lambda to check for opportunities for
2670 // ((0-A) + B) -> B - A
2671 // (A + (0-B)) -> A - B
2672 auto CheckFold
= [&](Register
&MaybeSub
, Register
&MaybeNewLHS
) {
2673 if (!mi_match(MaybeSub
, MRI
, m_Neg(m_Reg(NewRHS
))))
2675 NewLHS
= MaybeNewLHS
;
2679 return CheckFold(LHS
, RHS
) || CheckFold(RHS
, LHS
);
2682 bool CombinerHelper::matchCombineInsertVecElts(
2683 MachineInstr
&MI
, SmallVectorImpl
<Register
> &MatchInfo
) {
2684 assert(MI
.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
&&
2686 Register DstReg
= MI
.getOperand(0).getReg();
2687 LLT DstTy
= MRI
.getType(DstReg
);
2688 assert(DstTy
.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2689 unsigned NumElts
= DstTy
.getNumElements();
2690 // If this MI is part of a sequence of insert_vec_elts, then
2691 // don't do the combine in the middle of the sequence.
2692 if (MRI
.hasOneUse(DstReg
) && MRI
.use_instr_begin(DstReg
)->getOpcode() ==
2693 TargetOpcode::G_INSERT_VECTOR_ELT
)
2695 MachineInstr
*CurrInst
= &MI
;
2696 MachineInstr
*TmpInst
;
2699 MatchInfo
.resize(NumElts
);
2701 CurrInst
->getOperand(0).getReg(), MRI
,
2702 m_GInsertVecElt(m_MInstr(TmpInst
), m_Reg(TmpReg
), m_ICst(IntImm
)))) {
2703 if (IntImm
>= NumElts
|| IntImm
< 0)
2705 if (!MatchInfo
[IntImm
])
2706 MatchInfo
[IntImm
] = TmpReg
;
2710 if (CurrInst
->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT
)
2712 if (TmpInst
->getOpcode() == TargetOpcode::G_BUILD_VECTOR
) {
2713 for (unsigned I
= 1; I
< TmpInst
->getNumOperands(); ++I
) {
2714 if (!MatchInfo
[I
- 1].isValid())
2715 MatchInfo
[I
- 1] = TmpInst
->getOperand(I
).getReg();
2719 // If we didn't end in a G_IMPLICIT_DEF, bail out.
2720 return TmpInst
->getOpcode() == TargetOpcode::G_IMPLICIT_DEF
;
2723 void CombinerHelper::applyCombineInsertVecElts(
2724 MachineInstr
&MI
, SmallVectorImpl
<Register
> &MatchInfo
) {
2725 Builder
.setInstr(MI
);
2727 auto GetUndef
= [&]() {
2730 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
2731 UndefReg
= Builder
.buildUndef(DstTy
.getScalarType()).getReg(0);
2734 for (unsigned I
= 0; I
< MatchInfo
.size(); ++I
) {
2736 MatchInfo
[I
] = GetUndef();
2738 Builder
.buildBuildVector(MI
.getOperand(0).getReg(), MatchInfo
);
2739 MI
.eraseFromParent();
2742 void CombinerHelper::applySimplifyAddToSub(
2743 MachineInstr
&MI
, std::tuple
<Register
, Register
> &MatchInfo
) {
2744 Builder
.setInstr(MI
);
2745 Register SubLHS
, SubRHS
;
2746 std::tie(SubLHS
, SubRHS
) = MatchInfo
;
2747 Builder
.buildSub(MI
.getOperand(0).getReg(), SubLHS
, SubRHS
);
2748 MI
.eraseFromParent();
2751 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2752 MachineInstr
&MI
, InstructionStepsMatchInfo
&MatchInfo
) {
2753 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2755 // Creates the new hand + logic instruction (but does not insert them.)
2757 // On success, MatchInfo is populated with the new instructions. These are
2758 // inserted in applyHoistLogicOpWithSameOpcodeHands.
2759 unsigned LogicOpcode
= MI
.getOpcode();
2760 assert(LogicOpcode
== TargetOpcode::G_AND
||
2761 LogicOpcode
== TargetOpcode::G_OR
||
2762 LogicOpcode
== TargetOpcode::G_XOR
);
2763 MachineIRBuilder
MIB(MI
);
2764 Register Dst
= MI
.getOperand(0).getReg();
2765 Register LHSReg
= MI
.getOperand(1).getReg();
2766 Register RHSReg
= MI
.getOperand(2).getReg();
2768 // Don't recompute anything.
2769 if (!MRI
.hasOneNonDBGUse(LHSReg
) || !MRI
.hasOneNonDBGUse(RHSReg
))
2772 // Make sure we have (hand x, ...), (hand y, ...)
2773 MachineInstr
*LeftHandInst
= getDefIgnoringCopies(LHSReg
, MRI
);
2774 MachineInstr
*RightHandInst
= getDefIgnoringCopies(RHSReg
, MRI
);
2775 if (!LeftHandInst
|| !RightHandInst
)
2777 unsigned HandOpcode
= LeftHandInst
->getOpcode();
2778 if (HandOpcode
!= RightHandInst
->getOpcode())
2780 if (!LeftHandInst
->getOperand(1).isReg() ||
2781 !RightHandInst
->getOperand(1).isReg())
2784 // Make sure the types match up, and if we're doing this post-legalization,
2785 // we end up with legal types.
2786 Register X
= LeftHandInst
->getOperand(1).getReg();
2787 Register Y
= RightHandInst
->getOperand(1).getReg();
2788 LLT XTy
= MRI
.getType(X
);
2789 LLT YTy
= MRI
.getType(Y
);
2790 if (!XTy
.isValid() || XTy
!= YTy
)
2793 // Optional extra source register.
2794 Register ExtraHandOpSrcReg
;
2795 switch (HandOpcode
) {
2798 case TargetOpcode::G_ANYEXT
:
2799 case TargetOpcode::G_SEXT
:
2800 case TargetOpcode::G_ZEXT
: {
2801 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2804 case TargetOpcode::G_AND
:
2805 case TargetOpcode::G_ASHR
:
2806 case TargetOpcode::G_LSHR
:
2807 case TargetOpcode::G_SHL
: {
2808 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2809 MachineOperand
&ZOp
= LeftHandInst
->getOperand(2);
2810 if (!matchEqualDefs(ZOp
, RightHandInst
->getOperand(2)))
2812 ExtraHandOpSrcReg
= ZOp
.getReg();
2817 if (!isLegalOrBeforeLegalizer({LogicOpcode
, {XTy
, YTy
}}))
2820 // Record the steps to build the new instructions.
2822 // Steps to build (logic x, y)
2823 auto NewLogicDst
= MRI
.createGenericVirtualRegister(XTy
);
2824 OperandBuildSteps LogicBuildSteps
= {
2825 [=](MachineInstrBuilder
&MIB
) { MIB
.addDef(NewLogicDst
); },
2826 [=](MachineInstrBuilder
&MIB
) { MIB
.addReg(X
); },
2827 [=](MachineInstrBuilder
&MIB
) { MIB
.addReg(Y
); }};
2828 InstructionBuildSteps
LogicSteps(LogicOpcode
, LogicBuildSteps
);
2830 // Steps to build hand (logic x, y), ...z
2831 OperandBuildSteps HandBuildSteps
= {
2832 [=](MachineInstrBuilder
&MIB
) { MIB
.addDef(Dst
); },
2833 [=](MachineInstrBuilder
&MIB
) { MIB
.addReg(NewLogicDst
); }};
2834 if (ExtraHandOpSrcReg
.isValid())
2835 HandBuildSteps
.push_back(
2836 [=](MachineInstrBuilder
&MIB
) { MIB
.addReg(ExtraHandOpSrcReg
); });
2837 InstructionBuildSteps
HandSteps(HandOpcode
, HandBuildSteps
);
2839 MatchInfo
= InstructionStepsMatchInfo({LogicSteps
, HandSteps
});
2843 void CombinerHelper::applyBuildInstructionSteps(
2844 MachineInstr
&MI
, InstructionStepsMatchInfo
&MatchInfo
) {
2845 assert(MatchInfo
.InstrsToBuild
.size() &&
2846 "Expected at least one instr to build?");
2847 Builder
.setInstr(MI
);
2848 for (auto &InstrToBuild
: MatchInfo
.InstrsToBuild
) {
2849 assert(InstrToBuild
.Opcode
&& "Expected a valid opcode?");
2850 assert(InstrToBuild
.OperandFns
.size() && "Expected at least one operand?");
2851 MachineInstrBuilder Instr
= Builder
.buildInstr(InstrToBuild
.Opcode
);
2852 for (auto &OperandFn
: InstrToBuild
.OperandFns
)
2855 MI
.eraseFromParent();
2858 bool CombinerHelper::matchAshrShlToSextInreg(
2859 MachineInstr
&MI
, std::tuple
<Register
, int64_t> &MatchInfo
) {
2860 assert(MI
.getOpcode() == TargetOpcode::G_ASHR
);
2861 int64_t ShlCst
, AshrCst
;
2863 if (!mi_match(MI
.getOperand(0).getReg(), MRI
,
2864 m_GAShr(m_GShl(m_Reg(Src
), m_ICstOrSplat(ShlCst
)),
2865 m_ICstOrSplat(AshrCst
))))
2867 if (ShlCst
!= AshrCst
)
2869 if (!isLegalOrBeforeLegalizer(
2870 {TargetOpcode::G_SEXT_INREG
, {MRI
.getType(Src
)}}))
2872 MatchInfo
= std::make_tuple(Src
, ShlCst
);
2876 void CombinerHelper::applyAshShlToSextInreg(
2877 MachineInstr
&MI
, std::tuple
<Register
, int64_t> &MatchInfo
) {
2878 assert(MI
.getOpcode() == TargetOpcode::G_ASHR
);
2881 std::tie(Src
, ShiftAmt
) = MatchInfo
;
2882 unsigned Size
= MRI
.getType(Src
).getScalarSizeInBits();
2883 Builder
.setInstrAndDebugLoc(MI
);
2884 Builder
.buildSExtInReg(MI
.getOperand(0).getReg(), Src
, Size
- ShiftAmt
);
2885 MI
.eraseFromParent();
2888 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
2889 bool CombinerHelper::matchOverlappingAnd(
2890 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
2891 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
2893 Register Dst
= MI
.getOperand(0).getReg();
2894 LLT Ty
= MRI
.getType(Dst
);
2901 m_GAnd(m_GAnd(m_Reg(R
), m_ICst(C1
)), m_ICst(C2
))))
2904 MatchInfo
= [=](MachineIRBuilder
&B
) {
2906 B
.buildAnd(Dst
, R
, B
.buildConstant(Ty
, C1
& C2
));
2909 auto Zero
= B
.buildConstant(Ty
, 0);
2910 replaceRegWith(MRI
, Dst
, Zero
->getOperand(0).getReg());
2915 bool CombinerHelper::matchRedundantAnd(MachineInstr
&MI
,
2916 Register
&Replacement
) {
2919 // %y:_(sN) = G_SOMETHING
2920 // %x:_(sN) = G_SOMETHING
2921 // %res:_(sN) = G_AND %x, %y
2923 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2925 // Patterns like this can appear as a result of legalization. E.g.
2927 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2928 // %one:_(s32) = G_CONSTANT i32 1
2929 // %and:_(s32) = G_AND %cmp, %one
2931 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2932 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
2936 Register AndDst
= MI
.getOperand(0).getReg();
2937 Register LHS
= MI
.getOperand(1).getReg();
2938 Register RHS
= MI
.getOperand(2).getReg();
2939 KnownBits LHSBits
= KB
->getKnownBits(LHS
);
2940 KnownBits RHSBits
= KB
->getKnownBits(RHS
);
2942 // Check that x & Mask == x.
2943 // x & 1 == x, always
2944 // x & 0 == x, only if x is also 0
2945 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2947 // Check if we can replace AndDst with the LHS of the G_AND
2948 if (canReplaceReg(AndDst
, LHS
, MRI
) &&
2949 (LHSBits
.Zero
| RHSBits
.One
).isAllOnes()) {
2954 // Check if we can replace AndDst with the RHS of the G_AND
2955 if (canReplaceReg(AndDst
, RHS
, MRI
) &&
2956 (LHSBits
.One
| RHSBits
.Zero
).isAllOnes()) {
2964 bool CombinerHelper::matchRedundantOr(MachineInstr
&MI
, Register
&Replacement
) {
2967 // %y:_(sN) = G_SOMETHING
2968 // %x:_(sN) = G_SOMETHING
2969 // %res:_(sN) = G_OR %x, %y
2971 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2972 assert(MI
.getOpcode() == TargetOpcode::G_OR
);
2976 Register OrDst
= MI
.getOperand(0).getReg();
2977 Register LHS
= MI
.getOperand(1).getReg();
2978 Register RHS
= MI
.getOperand(2).getReg();
2979 KnownBits LHSBits
= KB
->getKnownBits(LHS
);
2980 KnownBits RHSBits
= KB
->getKnownBits(RHS
);
2982 // Check that x | Mask == x.
2983 // x | 0 == x, always
2984 // x | 1 == x, only if x is also 1
2985 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2987 // Check if we can replace OrDst with the LHS of the G_OR
2988 if (canReplaceReg(OrDst
, LHS
, MRI
) &&
2989 (LHSBits
.One
| RHSBits
.Zero
).isAllOnes()) {
2994 // Check if we can replace OrDst with the RHS of the G_OR
2995 if (canReplaceReg(OrDst
, RHS
, MRI
) &&
2996 (LHSBits
.Zero
| RHSBits
.One
).isAllOnes()) {
3004 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr
&MI
) {
3005 // If the input is already sign extended, just drop the extension.
3006 Register Src
= MI
.getOperand(1).getReg();
3007 unsigned ExtBits
= MI
.getOperand(2).getImm();
3008 unsigned TypeSize
= MRI
.getType(Src
).getScalarSizeInBits();
3009 return KB
->computeNumSignBits(Src
) >= (TypeSize
- ExtBits
+ 1);
3012 static bool isConstValidTrue(const TargetLowering
&TLI
, unsigned ScalarSizeBits
,
3013 int64_t Cst
, bool IsVector
, bool IsFP
) {
3014 // For i1, Cst will always be -1 regardless of boolean contents.
3015 return (ScalarSizeBits
== 1 && Cst
== -1) ||
3016 isConstTrueVal(TLI
, Cst
, IsVector
, IsFP
);
3019 bool CombinerHelper::matchNotCmp(MachineInstr
&MI
,
3020 SmallVectorImpl
<Register
> &RegsToNegate
) {
3021 assert(MI
.getOpcode() == TargetOpcode::G_XOR
);
3022 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
3023 const auto &TLI
= *Builder
.getMF().getSubtarget().getTargetLowering();
3026 // We match xor(src, true) here.
3027 if (!mi_match(MI
.getOperand(0).getReg(), MRI
,
3028 m_GXor(m_Reg(XorSrc
), m_Reg(CstReg
))))
3031 if (!MRI
.hasOneNonDBGUse(XorSrc
))
3034 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3035 // and ORs. The suffix of RegsToNegate starting from index I is used a work
3036 // list of tree nodes to visit.
3037 RegsToNegate
.push_back(XorSrc
);
3038 // Remember whether the comparisons are all integer or all floating point.
3041 for (unsigned I
= 0; I
< RegsToNegate
.size(); ++I
) {
3042 Register Reg
= RegsToNegate
[I
];
3043 if (!MRI
.hasOneNonDBGUse(Reg
))
3045 MachineInstr
*Def
= MRI
.getVRegDef(Reg
);
3046 switch (Def
->getOpcode()) {
3048 // Don't match if the tree contains anything other than ANDs, ORs and
3051 case TargetOpcode::G_ICMP
:
3055 // When we apply the combine we will invert the predicate.
3057 case TargetOpcode::G_FCMP
:
3061 // When we apply the combine we will invert the predicate.
3063 case TargetOpcode::G_AND
:
3064 case TargetOpcode::G_OR
:
3065 // Implement De Morgan's laws:
3066 // ~(x & y) -> ~x | ~y
3067 // ~(x | y) -> ~x & ~y
3068 // When we apply the combine we will change the opcode and recursively
3069 // negate the operands.
3070 RegsToNegate
.push_back(Def
->getOperand(1).getReg());
3071 RegsToNegate
.push_back(Def
->getOperand(2).getReg());
3076 // Now we know whether the comparisons are integer or floating point, check
3077 // the constant in the xor.
3079 if (Ty
.isVector()) {
3080 MachineInstr
*CstDef
= MRI
.getVRegDef(CstReg
);
3081 auto MaybeCst
= getIConstantSplatSExtVal(*CstDef
, MRI
);
3084 if (!isConstValidTrue(TLI
, Ty
.getScalarSizeInBits(), *MaybeCst
, true, IsFP
))
3087 if (!mi_match(CstReg
, MRI
, m_ICst(Cst
)))
3089 if (!isConstValidTrue(TLI
, Ty
.getSizeInBits(), Cst
, false, IsFP
))
3096 void CombinerHelper::applyNotCmp(MachineInstr
&MI
,
3097 SmallVectorImpl
<Register
> &RegsToNegate
) {
3098 for (Register Reg
: RegsToNegate
) {
3099 MachineInstr
*Def
= MRI
.getVRegDef(Reg
);
3100 Observer
.changingInstr(*Def
);
3101 // For each comparison, invert the opcode. For each AND and OR, change the
3103 switch (Def
->getOpcode()) {
3105 llvm_unreachable("Unexpected opcode");
3106 case TargetOpcode::G_ICMP
:
3107 case TargetOpcode::G_FCMP
: {
3108 MachineOperand
&PredOp
= Def
->getOperand(1);
3109 CmpInst::Predicate NewP
= CmpInst::getInversePredicate(
3110 (CmpInst::Predicate
)PredOp
.getPredicate());
3111 PredOp
.setPredicate(NewP
);
3114 case TargetOpcode::G_AND
:
3115 Def
->setDesc(Builder
.getTII().get(TargetOpcode::G_OR
));
3117 case TargetOpcode::G_OR
:
3118 Def
->setDesc(Builder
.getTII().get(TargetOpcode::G_AND
));
3121 Observer
.changedInstr(*Def
);
3124 replaceRegWith(MRI
, MI
.getOperand(0).getReg(), MI
.getOperand(1).getReg());
3125 MI
.eraseFromParent();
3128 bool CombinerHelper::matchXorOfAndWithSameReg(
3129 MachineInstr
&MI
, std::pair
<Register
, Register
> &MatchInfo
) {
3130 // Match (xor (and x, y), y) (or any of its commuted cases)
3131 assert(MI
.getOpcode() == TargetOpcode::G_XOR
);
3132 Register
&X
= MatchInfo
.first
;
3133 Register
&Y
= MatchInfo
.second
;
3134 Register AndReg
= MI
.getOperand(1).getReg();
3135 Register SharedReg
= MI
.getOperand(2).getReg();
3137 // Find a G_AND on either side of the G_XOR.
3140 // (xor (and x, y), SharedReg)
3141 // (xor SharedReg, (and x, y))
3142 if (!mi_match(AndReg
, MRI
, m_GAnd(m_Reg(X
), m_Reg(Y
)))) {
3143 std::swap(AndReg
, SharedReg
);
3144 if (!mi_match(AndReg
, MRI
, m_GAnd(m_Reg(X
), m_Reg(Y
))))
3148 // Only do this if we'll eliminate the G_AND.
3149 if (!MRI
.hasOneNonDBGUse(AndReg
))
3152 // We can combine if SharedReg is the same as either the LHS or RHS of the
3156 return Y
== SharedReg
;
3159 void CombinerHelper::applyXorOfAndWithSameReg(
3160 MachineInstr
&MI
, std::pair
<Register
, Register
> &MatchInfo
) {
3161 // Fold (xor (and x, y), y) -> (and (not x), y)
3162 Builder
.setInstrAndDebugLoc(MI
);
3164 std::tie(X
, Y
) = MatchInfo
;
3165 auto Not
= Builder
.buildNot(MRI
.getType(X
), X
);
3166 Observer
.changingInstr(MI
);
3167 MI
.setDesc(Builder
.getTII().get(TargetOpcode::G_AND
));
3168 MI
.getOperand(1).setReg(Not
->getOperand(0).getReg());
3169 MI
.getOperand(2).setReg(Y
);
3170 Observer
.changedInstr(MI
);
3173 bool CombinerHelper::matchPtrAddZero(MachineInstr
&MI
) {
3174 auto &PtrAdd
= cast
<GPtrAdd
>(MI
);
3175 Register DstReg
= PtrAdd
.getReg(0);
3176 LLT Ty
= MRI
.getType(DstReg
);
3177 const DataLayout
&DL
= Builder
.getMF().getDataLayout();
3179 if (DL
.isNonIntegralAddressSpace(Ty
.getScalarType().getAddressSpace()))
3182 if (Ty
.isPointer()) {
3183 auto ConstVal
= getIConstantVRegVal(PtrAdd
.getBaseReg(), MRI
);
3184 return ConstVal
&& *ConstVal
== 0;
3187 assert(Ty
.isVector() && "Expecting a vector type");
3188 const MachineInstr
*VecMI
= MRI
.getVRegDef(PtrAdd
.getBaseReg());
3189 return isBuildVectorAllZeros(*VecMI
, MRI
);
3192 void CombinerHelper::applyPtrAddZero(MachineInstr
&MI
) {
3193 auto &PtrAdd
= cast
<GPtrAdd
>(MI
);
3194 Builder
.setInstrAndDebugLoc(PtrAdd
);
3195 Builder
.buildIntToPtr(PtrAdd
.getReg(0), PtrAdd
.getOffsetReg());
3196 PtrAdd
.eraseFromParent();
3199 /// The second source operand is known to be a power of 2.
3200 void CombinerHelper::applySimplifyURemByPow2(MachineInstr
&MI
) {
3201 Register DstReg
= MI
.getOperand(0).getReg();
3202 Register Src0
= MI
.getOperand(1).getReg();
3203 Register Pow2Src1
= MI
.getOperand(2).getReg();
3204 LLT Ty
= MRI
.getType(DstReg
);
3205 Builder
.setInstrAndDebugLoc(MI
);
3207 // Fold (urem x, pow2) -> (and x, pow2-1)
3208 auto NegOne
= Builder
.buildConstant(Ty
, -1);
3209 auto Add
= Builder
.buildAdd(Ty
, Pow2Src1
, NegOne
);
3210 Builder
.buildAnd(DstReg
, Src0
, Add
);
3211 MI
.eraseFromParent();
3214 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr
&MI
,
3215 unsigned &SelectOpNo
) {
3216 Register LHS
= MI
.getOperand(1).getReg();
3217 Register RHS
= MI
.getOperand(2).getReg();
3219 Register OtherOperandReg
= RHS
;
3221 MachineInstr
*Select
= MRI
.getVRegDef(LHS
);
3223 // Don't do this unless the old select is going away. We want to eliminate the
3224 // binary operator, not replace a binop with a select.
3225 if (Select
->getOpcode() != TargetOpcode::G_SELECT
||
3226 !MRI
.hasOneNonDBGUse(LHS
)) {
3227 OtherOperandReg
= LHS
;
3229 Select
= MRI
.getVRegDef(RHS
);
3230 if (Select
->getOpcode() != TargetOpcode::G_SELECT
||
3231 !MRI
.hasOneNonDBGUse(RHS
))
3235 MachineInstr
*SelectLHS
= MRI
.getVRegDef(Select
->getOperand(2).getReg());
3236 MachineInstr
*SelectRHS
= MRI
.getVRegDef(Select
->getOperand(3).getReg());
3238 if (!isConstantOrConstantVector(*SelectLHS
, MRI
,
3240 /*AllowOpaqueConstants*/ false))
3242 if (!isConstantOrConstantVector(*SelectRHS
, MRI
,
3244 /*AllowOpaqueConstants*/ false))
3247 unsigned BinOpcode
= MI
.getOpcode();
3249 // We know know one of the operands is a select of constants. Now verify that
3250 // the other binary operator operand is either a constant, or we can handle a
3252 bool CanFoldNonConst
=
3253 (BinOpcode
== TargetOpcode::G_AND
|| BinOpcode
== TargetOpcode::G_OR
) &&
3254 (isNullOrNullSplat(*SelectLHS
, MRI
) ||
3255 isAllOnesOrAllOnesSplat(*SelectLHS
, MRI
)) &&
3256 (isNullOrNullSplat(*SelectRHS
, MRI
) ||
3257 isAllOnesOrAllOnesSplat(*SelectRHS
, MRI
));
3258 if (CanFoldNonConst
)
3261 return isConstantOrConstantVector(*MRI
.getVRegDef(OtherOperandReg
), MRI
,
3263 /*AllowOpaqueConstants*/ false);
3266 /// \p SelectOperand is the operand in binary operator \p MI that is the select
3268 void CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr
&MI
,
3269 const unsigned &SelectOperand
) {
3270 Builder
.setInstrAndDebugLoc(MI
);
3272 Register Dst
= MI
.getOperand(0).getReg();
3273 Register LHS
= MI
.getOperand(1).getReg();
3274 Register RHS
= MI
.getOperand(2).getReg();
3275 MachineInstr
*Select
= MRI
.getVRegDef(MI
.getOperand(SelectOperand
).getReg());
3277 Register SelectCond
= Select
->getOperand(1).getReg();
3278 Register SelectTrue
= Select
->getOperand(2).getReg();
3279 Register SelectFalse
= Select
->getOperand(3).getReg();
3281 LLT Ty
= MRI
.getType(Dst
);
3282 unsigned BinOpcode
= MI
.getOpcode();
3284 Register FoldTrue
, FoldFalse
;
3286 // We have a select-of-constants followed by a binary operator with a
3287 // constant. Eliminate the binop by pulling the constant math into the select.
3288 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
3289 if (SelectOperand
== 1) {
3290 // TODO: SelectionDAG verifies this actually constant folds before
3291 // committing to the combine.
3293 FoldTrue
= Builder
.buildInstr(BinOpcode
, {Ty
}, {SelectTrue
, RHS
}).getReg(0);
3295 Builder
.buildInstr(BinOpcode
, {Ty
}, {SelectFalse
, RHS
}).getReg(0);
3297 FoldTrue
= Builder
.buildInstr(BinOpcode
, {Ty
}, {LHS
, SelectTrue
}).getReg(0);
3299 Builder
.buildInstr(BinOpcode
, {Ty
}, {LHS
, SelectFalse
}).getReg(0);
3302 Builder
.buildSelect(Dst
, SelectCond
, FoldTrue
, FoldFalse
, MI
.getFlags());
3303 MI
.eraseFromParent();
3306 std::optional
<SmallVector
<Register
, 8>>
3307 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr
*Root
) const {
3308 assert(Root
->getOpcode() == TargetOpcode::G_OR
&& "Expected G_OR only!");
3309 // We want to detect if Root is part of a tree which represents a bunch
3310 // of loads being merged into a larger load. We'll try to recognize patterns
3311 // like, for example:
3330 // Each "Reg" may have been produced by a load + some arithmetic. This
3331 // function will save each of them.
3332 SmallVector
<Register
, 8> RegsToVisit
;
3333 SmallVector
<const MachineInstr
*, 7> Ors
= {Root
};
3335 // In the "worst" case, we're dealing with a load for each byte. So, there
3336 // are at most #bytes - 1 ORs.
3337 const unsigned MaxIter
=
3338 MRI
.getType(Root
->getOperand(0).getReg()).getSizeInBytes() - 1;
3339 for (unsigned Iter
= 0; Iter
< MaxIter
; ++Iter
) {
3342 const MachineInstr
*Curr
= Ors
.pop_back_val();
3343 Register OrLHS
= Curr
->getOperand(1).getReg();
3344 Register OrRHS
= Curr
->getOperand(2).getReg();
3346 // In the combine, we want to elimate the entire tree.
3347 if (!MRI
.hasOneNonDBGUse(OrLHS
) || !MRI
.hasOneNonDBGUse(OrRHS
))
3348 return std::nullopt
;
3350 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3351 // something that may be a load + arithmetic.
3352 if (const MachineInstr
*Or
= getOpcodeDef(TargetOpcode::G_OR
, OrLHS
, MRI
))
3355 RegsToVisit
.push_back(OrLHS
);
3356 if (const MachineInstr
*Or
= getOpcodeDef(TargetOpcode::G_OR
, OrRHS
, MRI
))
3359 RegsToVisit
.push_back(OrRHS
);
3362 // We're going to try and merge each register into a wider power-of-2 type,
3363 // so we ought to have an even number of registers.
3364 if (RegsToVisit
.empty() || RegsToVisit
.size() % 2 != 0)
3365 return std::nullopt
;
3369 /// Helper function for findLoadOffsetsForLoadOrCombine.
3371 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3372 /// and then moving that value into a specific byte offset.
3376 /// \returns The load instruction and the byte offset it is moved into.
3377 static std::optional
<std::pair
<GZExtLoad
*, int64_t>>
3378 matchLoadAndBytePosition(Register Reg
, unsigned MemSizeInBits
,
3379 const MachineRegisterInfo
&MRI
) {
3380 assert(MRI
.hasOneNonDBGUse(Reg
) &&
3381 "Expected Reg to only have one non-debug use?");
3384 if (!mi_match(Reg
, MRI
,
3385 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad
), m_ICst(Shift
))))) {
3390 if (Shift
% MemSizeInBits
!= 0)
3391 return std::nullopt
;
3393 // TODO: Handle other types of loads.
3394 auto *Load
= getOpcodeDef
<GZExtLoad
>(MaybeLoad
, MRI
);
3396 return std::nullopt
;
3398 if (!Load
->isUnordered() || Load
->getMemSizeInBits() != MemSizeInBits
)
3399 return std::nullopt
;
3401 return std::make_pair(Load
, Shift
/ MemSizeInBits
);
3404 std::optional
<std::tuple
<GZExtLoad
*, int64_t, GZExtLoad
*>>
3405 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3406 SmallDenseMap
<int64_t, int64_t, 8> &MemOffset2Idx
,
3407 const SmallVector
<Register
, 8> &RegsToVisit
, const unsigned MemSizeInBits
) {
3409 // Each load found for the pattern. There should be one for each RegsToVisit.
3410 SmallSetVector
<const MachineInstr
*, 8> Loads
;
3412 // The lowest index used in any load. (The lowest "i" for each x[i].)
3413 int64_t LowestIdx
= INT64_MAX
;
3415 // The load which uses the lowest index.
3416 GZExtLoad
*LowestIdxLoad
= nullptr;
3418 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3419 SmallSet
<int64_t, 8> SeenIdx
;
3421 // Ensure each load is in the same MBB.
3422 // TODO: Support multiple MachineBasicBlocks.
3423 MachineBasicBlock
*MBB
= nullptr;
3424 const MachineMemOperand
*MMO
= nullptr;
3426 // Earliest instruction-order load in the pattern.
3427 GZExtLoad
*EarliestLoad
= nullptr;
3429 // Latest instruction-order load in the pattern.
3430 GZExtLoad
*LatestLoad
= nullptr;
3432 // Base pointer which every load should share.
3435 // We want to find a load for each register. Each load should have some
3436 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3437 // track of the load which uses the lowest index. Later, we will check if we
3438 // can use its pointer in the final, combined load.
3439 for (auto Reg
: RegsToVisit
) {
3440 // Find the load, and find the position that it will end up in (e.g. a
3442 auto LoadAndPos
= matchLoadAndBytePosition(Reg
, MemSizeInBits
, MRI
);
3444 return std::nullopt
;
3447 std::tie(Load
, DstPos
) = *LoadAndPos
;
3449 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3450 // it is difficult to check for stores/calls/etc between loads.
3451 MachineBasicBlock
*LoadMBB
= Load
->getParent();
3455 return std::nullopt
;
3457 // Make sure that the MachineMemOperands of every seen load are compatible.
3458 auto &LoadMMO
= Load
->getMMO();
3461 if (MMO
->getAddrSpace() != LoadMMO
.getAddrSpace())
3462 return std::nullopt
;
3464 // Find out what the base pointer and index for the load is.
3467 if (!mi_match(Load
->getOperand(1).getReg(), MRI
,
3468 m_GPtrAdd(m_Reg(LoadPtr
), m_ICst(Idx
)))) {
3469 LoadPtr
= Load
->getOperand(1).getReg();
3473 // Don't combine things like a[i], a[i] -> a bigger load.
3474 if (!SeenIdx
.insert(Idx
).second
)
3475 return std::nullopt
;
3477 // Every load must share the same base pointer; don't combine things like:
3479 // a[i], b[i + 1] -> a bigger load.
3480 if (!BasePtr
.isValid())
3482 if (BasePtr
!= LoadPtr
)
3483 return std::nullopt
;
3485 if (Idx
< LowestIdx
) {
3487 LowestIdxLoad
= Load
;
3490 // Keep track of the byte offset that this load ends up at. If we have seen
3491 // the byte offset, then stop here. We do not want to combine:
3493 // a[i] << 16, a[i + k] << 16 -> a bigger load.
3494 if (!MemOffset2Idx
.try_emplace(DstPos
, Idx
).second
)
3495 return std::nullopt
;
3498 // Keep track of the position of the earliest/latest loads in the pattern.
3499 // We will check that there are no load fold barriers between them later
3502 // FIXME: Is there a better way to check for load fold barriers?
3503 if (!EarliestLoad
|| dominates(*Load
, *EarliestLoad
))
3504 EarliestLoad
= Load
;
3505 if (!LatestLoad
|| dominates(*LatestLoad
, *Load
))
3509 // We found a load for each register. Let's check if each load satisfies the
3511 assert(Loads
.size() == RegsToVisit
.size() &&
3512 "Expected to find a load for each register?");
3513 assert(EarliestLoad
!= LatestLoad
&& EarliestLoad
&&
3514 LatestLoad
&& "Expected at least two loads?");
3516 // Check if there are any stores, calls, etc. between any of the loads. If
3517 // there are, then we can't safely perform the combine.
3519 // MaxIter is chosen based off the (worst case) number of iterations it
3520 // typically takes to succeed in the LLVM test suite plus some padding.
3522 // FIXME: Is there a better way to check for load fold barriers?
3523 const unsigned MaxIter
= 20;
3525 for (const auto &MI
: instructionsWithoutDebug(EarliestLoad
->getIterator(),
3526 LatestLoad
->getIterator())) {
3527 if (Loads
.count(&MI
))
3529 if (MI
.isLoadFoldBarrier())
3530 return std::nullopt
;
3531 if (Iter
++ == MaxIter
)
3532 return std::nullopt
;
3535 return std::make_tuple(LowestIdxLoad
, LowestIdx
, LatestLoad
);
3538 bool CombinerHelper::matchLoadOrCombine(
3539 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
3540 assert(MI
.getOpcode() == TargetOpcode::G_OR
);
3541 MachineFunction
&MF
= *MI
.getMF();
3542 // Assuming a little-endian target, transform:
3544 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3546 // s32 val = *((i32)a)
3549 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3551 // s32 val = BSWAP(*((s32)a))
3552 Register Dst
= MI
.getOperand(0).getReg();
3553 LLT Ty
= MRI
.getType(Dst
);
3557 // We need to combine at least two loads into this type. Since the smallest
3558 // possible load is into a byte, we need at least a 16-bit wide type.
3559 const unsigned WideMemSizeInBits
= Ty
.getSizeInBits();
3560 if (WideMemSizeInBits
< 16 || WideMemSizeInBits
% 8 != 0)
3563 // Match a collection of non-OR instructions in the pattern.
3564 auto RegsToVisit
= findCandidatesForLoadOrCombine(&MI
);
3568 // We have a collection of non-OR instructions. Figure out how wide each of
3569 // the small loads should be based off of the number of potential loads we
3571 const unsigned NarrowMemSizeInBits
= WideMemSizeInBits
/ RegsToVisit
->size();
3572 if (NarrowMemSizeInBits
% 8 != 0)
3575 // Check if each register feeding into each OR is a load from the same
3576 // base pointer + some arithmetic.
3578 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3580 // Also verify that each of these ends up putting a[i] into the same memory
3581 // offset as a load into a wide type would.
3582 SmallDenseMap
<int64_t, int64_t, 8> MemOffset2Idx
;
3583 GZExtLoad
*LowestIdxLoad
, *LatestLoad
;
3585 auto MaybeLoadInfo
= findLoadOffsetsForLoadOrCombine(
3586 MemOffset2Idx
, *RegsToVisit
, NarrowMemSizeInBits
);
3589 std::tie(LowestIdxLoad
, LowestIdx
, LatestLoad
) = *MaybeLoadInfo
;
3591 // We have a bunch of loads being OR'd together. Using the addresses + offsets
3592 // we found before, check if this corresponds to a big or little endian byte
3593 // pattern. If it does, then we can represent it using a load + possibly a
3595 bool IsBigEndianTarget
= MF
.getDataLayout().isBigEndian();
3596 std::optional
<bool> IsBigEndian
= isBigEndian(MemOffset2Idx
, LowestIdx
);
3599 bool NeedsBSwap
= IsBigEndianTarget
!= *IsBigEndian
;
3600 if (NeedsBSwap
&& !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP
, {Ty
}}))
3603 // Make sure that the load from the lowest index produces offset 0 in the
3606 // This ensures that we won't combine something like this:
3608 // load x[i] -> byte 2
3609 // load x[i+1] -> byte 0 ---> wide_load x[i]
3610 // load x[i+2] -> byte 1
3611 const unsigned NumLoadsInTy
= WideMemSizeInBits
/ NarrowMemSizeInBits
;
3612 const unsigned ZeroByteOffset
=
3614 ? bigEndianByteAt(NumLoadsInTy
, 0)
3615 : littleEndianByteAt(NumLoadsInTy
, 0);
3616 auto ZeroOffsetIdx
= MemOffset2Idx
.find(ZeroByteOffset
);
3617 if (ZeroOffsetIdx
== MemOffset2Idx
.end() ||
3618 ZeroOffsetIdx
->second
!= LowestIdx
)
3621 // We wil reuse the pointer from the load which ends up at byte offset 0. It
3622 // may not use index 0.
3623 Register Ptr
= LowestIdxLoad
->getPointerReg();
3624 const MachineMemOperand
&MMO
= LowestIdxLoad
->getMMO();
3625 LegalityQuery::MemDesc
MMDesc(MMO
);
3626 MMDesc
.MemoryTy
= Ty
;
3627 if (!isLegalOrBeforeLegalizer(
3628 {TargetOpcode::G_LOAD
, {Ty
, MRI
.getType(Ptr
)}, {MMDesc
}}))
3630 auto PtrInfo
= MMO
.getPointerInfo();
3631 auto *NewMMO
= MF
.getMachineMemOperand(&MMO
, PtrInfo
, WideMemSizeInBits
/ 8);
3633 // Load must be allowed and fast on the target.
3634 LLVMContext
&C
= MF
.getFunction().getContext();
3635 auto &DL
= MF
.getDataLayout();
3637 if (!getTargetLowering().allowsMemoryAccess(C
, DL
, Ty
, *NewMMO
, &Fast
) ||
3641 MatchInfo
= [=](MachineIRBuilder
&MIB
) {
3642 MIB
.setInstrAndDebugLoc(*LatestLoad
);
3643 Register LoadDst
= NeedsBSwap
? MRI
.cloneVirtualRegister(Dst
) : Dst
;
3644 MIB
.buildLoad(LoadDst
, Ptr
, *NewMMO
);
3646 MIB
.buildBSwap(Dst
, LoadDst
);
3651 bool CombinerHelper::matchExtendThroughPhis(MachineInstr
&MI
,
3652 MachineInstr
*&ExtMI
) {
3653 assert(MI
.getOpcode() == TargetOpcode::G_PHI
);
3655 Register DstReg
= MI
.getOperand(0).getReg();
3657 // TODO: Extending a vector may be expensive, don't do this until heuristics
3659 if (MRI
.getType(DstReg
).isVector())
3662 // Try to match a phi, whose only use is an extend.
3663 if (!MRI
.hasOneNonDBGUse(DstReg
))
3665 ExtMI
= &*MRI
.use_instr_nodbg_begin(DstReg
);
3666 switch (ExtMI
->getOpcode()) {
3667 case TargetOpcode::G_ANYEXT
:
3668 return true; // G_ANYEXT is usually free.
3669 case TargetOpcode::G_ZEXT
:
3670 case TargetOpcode::G_SEXT
:
3676 // If the target is likely to fold this extend away, don't propagate.
3677 if (Builder
.getTII().isExtendLikelyToBeFolded(*ExtMI
, MRI
))
3680 // We don't want to propagate the extends unless there's a good chance that
3681 // they'll be optimized in some way.
3682 // Collect the unique incoming values.
3683 SmallPtrSet
<MachineInstr
*, 4> InSrcs
;
3684 for (unsigned Idx
= 1; Idx
< MI
.getNumOperands(); Idx
+= 2) {
3685 auto *DefMI
= getDefIgnoringCopies(MI
.getOperand(Idx
).getReg(), MRI
);
3686 switch (DefMI
->getOpcode()) {
3687 case TargetOpcode::G_LOAD
:
3688 case TargetOpcode::G_TRUNC
:
3689 case TargetOpcode::G_SEXT
:
3690 case TargetOpcode::G_ZEXT
:
3691 case TargetOpcode::G_ANYEXT
:
3692 case TargetOpcode::G_CONSTANT
:
3693 InSrcs
.insert(getDefIgnoringCopies(MI
.getOperand(Idx
).getReg(), MRI
));
3694 // Don't try to propagate if there are too many places to create new
3695 // extends, chances are it'll increase code size.
3696 if (InSrcs
.size() > 2)
3706 void CombinerHelper::applyExtendThroughPhis(MachineInstr
&MI
,
3707 MachineInstr
*&ExtMI
) {
3708 assert(MI
.getOpcode() == TargetOpcode::G_PHI
);
3709 Register DstReg
= ExtMI
->getOperand(0).getReg();
3710 LLT ExtTy
= MRI
.getType(DstReg
);
3712 // Propagate the extension into the block of each incoming reg's block.
3713 // Use a SetVector here because PHIs can have duplicate edges, and we want
3714 // deterministic iteration order.
3715 SmallSetVector
<MachineInstr
*, 8> SrcMIs
;
3716 SmallDenseMap
<MachineInstr
*, MachineInstr
*, 8> OldToNewSrcMap
;
3717 for (unsigned SrcIdx
= 1; SrcIdx
< MI
.getNumOperands(); SrcIdx
+= 2) {
3718 auto *SrcMI
= MRI
.getVRegDef(MI
.getOperand(SrcIdx
).getReg());
3719 if (!SrcMIs
.insert(SrcMI
))
3722 // Build an extend after each src inst.
3723 auto *MBB
= SrcMI
->getParent();
3724 MachineBasicBlock::iterator InsertPt
= ++SrcMI
->getIterator();
3725 if (InsertPt
!= MBB
->end() && InsertPt
->isPHI())
3726 InsertPt
= MBB
->getFirstNonPHI();
3728 Builder
.setInsertPt(*SrcMI
->getParent(), InsertPt
);
3729 Builder
.setDebugLoc(MI
.getDebugLoc());
3730 auto NewExt
= Builder
.buildExtOrTrunc(ExtMI
->getOpcode(), ExtTy
,
3731 SrcMI
->getOperand(0).getReg());
3732 OldToNewSrcMap
[SrcMI
] = NewExt
;
3735 // Create a new phi with the extended inputs.
3736 Builder
.setInstrAndDebugLoc(MI
);
3737 auto NewPhi
= Builder
.buildInstrNoInsert(TargetOpcode::G_PHI
);
3738 NewPhi
.addDef(DstReg
);
3739 for (const MachineOperand
&MO
: llvm::drop_begin(MI
.operands())) {
3741 NewPhi
.addMBB(MO
.getMBB());
3744 auto *NewSrc
= OldToNewSrcMap
[MRI
.getVRegDef(MO
.getReg())];
3745 NewPhi
.addUse(NewSrc
->getOperand(0).getReg());
3747 Builder
.insertInstr(NewPhi
);
3748 ExtMI
->eraseFromParent();
3751 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr
&MI
,
3753 assert(MI
.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT
);
3754 // If we have a constant index, look for a G_BUILD_VECTOR source
3755 // and find the source register that the index maps to.
3756 Register SrcVec
= MI
.getOperand(1).getReg();
3757 LLT SrcTy
= MRI
.getType(SrcVec
);
3759 auto Cst
= getIConstantVRegValWithLookThrough(MI
.getOperand(2).getReg(), MRI
);
3760 if (!Cst
|| Cst
->Value
.getZExtValue() >= SrcTy
.getNumElements())
3763 unsigned VecIdx
= Cst
->Value
.getZExtValue();
3765 // Check if we have a build_vector or build_vector_trunc with an optional
3767 MachineInstr
*SrcVecMI
= MRI
.getVRegDef(SrcVec
);
3768 if (SrcVecMI
->getOpcode() == TargetOpcode::G_TRUNC
) {
3769 SrcVecMI
= MRI
.getVRegDef(SrcVecMI
->getOperand(1).getReg());
3772 if (SrcVecMI
->getOpcode() != TargetOpcode::G_BUILD_VECTOR
&&
3773 SrcVecMI
->getOpcode() != TargetOpcode::G_BUILD_VECTOR_TRUNC
)
3776 EVT
Ty(getMVTForLLT(SrcTy
));
3777 if (!MRI
.hasOneNonDBGUse(SrcVec
) &&
3778 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty
))
3781 Reg
= SrcVecMI
->getOperand(VecIdx
+ 1).getReg();
3785 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr
&MI
,
3787 // Check the type of the register, since it may have come from a
3788 // G_BUILD_VECTOR_TRUNC.
3789 LLT ScalarTy
= MRI
.getType(Reg
);
3790 Register DstReg
= MI
.getOperand(0).getReg();
3791 LLT DstTy
= MRI
.getType(DstReg
);
3793 Builder
.setInstrAndDebugLoc(MI
);
3794 if (ScalarTy
!= DstTy
) {
3795 assert(ScalarTy
.getSizeInBits() > DstTy
.getSizeInBits());
3796 Builder
.buildTrunc(DstReg
, Reg
);
3797 MI
.eraseFromParent();
3800 replaceSingleDefInstWithReg(MI
, Reg
);
3803 bool CombinerHelper::matchExtractAllEltsFromBuildVector(
3805 SmallVectorImpl
<std::pair
<Register
, MachineInstr
*>> &SrcDstPairs
) {
3806 assert(MI
.getOpcode() == TargetOpcode::G_BUILD_VECTOR
);
3807 // This combine tries to find build_vector's which have every source element
3808 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like
3809 // the masked load scalarization is run late in the pipeline. There's already
3810 // a combine for a similar pattern starting from the extract, but that
3811 // doesn't attempt to do it if there are multiple uses of the build_vector,
3812 // which in this case is true. Starting the combine from the build_vector
3813 // feels more natural than trying to find sibling nodes of extracts.
3815 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4
3816 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0
3817 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1
3818 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2
3819 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3
3821 // replace ext{1,2,3,4} with %s{1,2,3,4}
3823 Register DstReg
= MI
.getOperand(0).getReg();
3824 LLT DstTy
= MRI
.getType(DstReg
);
3825 unsigned NumElts
= DstTy
.getNumElements();
3827 SmallBitVector
ExtractedElts(NumElts
);
3828 for (MachineInstr
&II
: MRI
.use_nodbg_instructions(DstReg
)) {
3829 if (II
.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT
)
3831 auto Cst
= getIConstantVRegVal(II
.getOperand(2).getReg(), MRI
);
3834 unsigned Idx
= Cst
->getZExtValue();
3836 return false; // Out of range.
3837 ExtractedElts
.set(Idx
);
3838 SrcDstPairs
.emplace_back(
3839 std::make_pair(MI
.getOperand(Idx
+ 1).getReg(), &II
));
3841 // Match if every element was extracted.
3842 return ExtractedElts
.all();
3845 void CombinerHelper::applyExtractAllEltsFromBuildVector(
3847 SmallVectorImpl
<std::pair
<Register
, MachineInstr
*>> &SrcDstPairs
) {
3848 assert(MI
.getOpcode() == TargetOpcode::G_BUILD_VECTOR
);
3849 for (auto &Pair
: SrcDstPairs
) {
3850 auto *ExtMI
= Pair
.second
;
3851 replaceRegWith(MRI
, ExtMI
->getOperand(0).getReg(), Pair
.first
);
3852 ExtMI
->eraseFromParent();
3854 MI
.eraseFromParent();
3857 void CombinerHelper::applyBuildFn(
3858 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
3859 Builder
.setInstrAndDebugLoc(MI
);
3861 MI
.eraseFromParent();
3864 void CombinerHelper::applyBuildFnNoErase(
3865 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
3866 Builder
.setInstrAndDebugLoc(MI
);
3870 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr
&MI
,
3871 BuildFnTy
&MatchInfo
) {
3872 assert(MI
.getOpcode() == TargetOpcode::G_OR
);
3874 Register Dst
= MI
.getOperand(0).getReg();
3875 LLT Ty
= MRI
.getType(Dst
);
3876 unsigned BitWidth
= Ty
.getScalarSizeInBits();
3878 Register ShlSrc
, ShlAmt
, LShrSrc
, LShrAmt
, Amt
;
3879 unsigned FshOpc
= 0;
3881 // Match (or (shl ...), (lshr ...)).
3882 if (!mi_match(Dst
, MRI
,
3883 // m_GOr() handles the commuted version as well.
3884 m_GOr(m_GShl(m_Reg(ShlSrc
), m_Reg(ShlAmt
)),
3885 m_GLShr(m_Reg(LShrSrc
), m_Reg(LShrAmt
)))))
3888 // Given constants C0 and C1 such that C0 + C1 is bit-width:
3889 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1)
3890 int64_t CstShlAmt
, CstLShrAmt
;
3891 if (mi_match(ShlAmt
, MRI
, m_ICstOrSplat(CstShlAmt
)) &&
3892 mi_match(LShrAmt
, MRI
, m_ICstOrSplat(CstLShrAmt
)) &&
3893 CstShlAmt
+ CstLShrAmt
== BitWidth
) {
3894 FshOpc
= TargetOpcode::G_FSHR
;
3897 } else if (mi_match(LShrAmt
, MRI
,
3898 m_GSub(m_SpecificICstOrSplat(BitWidth
), m_Reg(Amt
))) &&
3900 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt)
3901 FshOpc
= TargetOpcode::G_FSHL
;
3903 } else if (mi_match(ShlAmt
, MRI
,
3904 m_GSub(m_SpecificICstOrSplat(BitWidth
), m_Reg(Amt
))) &&
3906 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt)
3907 FshOpc
= TargetOpcode::G_FSHR
;
3913 LLT AmtTy
= MRI
.getType(Amt
);
3914 if (!isLegalOrBeforeLegalizer({FshOpc
, {Ty
, AmtTy
}}))
3917 MatchInfo
= [=](MachineIRBuilder
&B
) {
3918 B
.buildInstr(FshOpc
, {Dst
}, {ShlSrc
, LShrSrc
, Amt
});
3923 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate.
3924 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr
&MI
) {
3925 unsigned Opc
= MI
.getOpcode();
3926 assert(Opc
== TargetOpcode::G_FSHL
|| Opc
== TargetOpcode::G_FSHR
);
3927 Register X
= MI
.getOperand(1).getReg();
3928 Register Y
= MI
.getOperand(2).getReg();
3931 unsigned RotateOpc
=
3932 Opc
== TargetOpcode::G_FSHL
? TargetOpcode::G_ROTL
: TargetOpcode::G_ROTR
;
3933 return isLegalOrBeforeLegalizer({RotateOpc
, {MRI
.getType(X
), MRI
.getType(Y
)}});
3936 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr
&MI
) {
3937 unsigned Opc
= MI
.getOpcode();
3938 assert(Opc
== TargetOpcode::G_FSHL
|| Opc
== TargetOpcode::G_FSHR
);
3939 bool IsFSHL
= Opc
== TargetOpcode::G_FSHL
;
3940 Observer
.changingInstr(MI
);
3941 MI
.setDesc(Builder
.getTII().get(IsFSHL
? TargetOpcode::G_ROTL
3942 : TargetOpcode::G_ROTR
));
3943 MI
.removeOperand(2);
3944 Observer
.changedInstr(MI
);
3947 // Fold (rot x, c) -> (rot x, c % BitSize)
3948 bool CombinerHelper::matchRotateOutOfRange(MachineInstr
&MI
) {
3949 assert(MI
.getOpcode() == TargetOpcode::G_ROTL
||
3950 MI
.getOpcode() == TargetOpcode::G_ROTR
);
3952 MRI
.getType(MI
.getOperand(0).getReg()).getScalarSizeInBits();
3953 Register AmtReg
= MI
.getOperand(2).getReg();
3954 bool OutOfRange
= false;
3955 auto MatchOutOfRange
= [Bitsize
, &OutOfRange
](const Constant
*C
) {
3956 if (auto *CI
= dyn_cast
<ConstantInt
>(C
))
3957 OutOfRange
|= CI
->getValue().uge(Bitsize
);
3960 return matchUnaryPredicate(MRI
, AmtReg
, MatchOutOfRange
) && OutOfRange
;
3963 void CombinerHelper::applyRotateOutOfRange(MachineInstr
&MI
) {
3964 assert(MI
.getOpcode() == TargetOpcode::G_ROTL
||
3965 MI
.getOpcode() == TargetOpcode::G_ROTR
);
3967 MRI
.getType(MI
.getOperand(0).getReg()).getScalarSizeInBits();
3968 Builder
.setInstrAndDebugLoc(MI
);
3969 Register Amt
= MI
.getOperand(2).getReg();
3970 LLT AmtTy
= MRI
.getType(Amt
);
3971 auto Bits
= Builder
.buildConstant(AmtTy
, Bitsize
);
3972 Amt
= Builder
.buildURem(AmtTy
, MI
.getOperand(2).getReg(), Bits
).getReg(0);
3973 Observer
.changingInstr(MI
);
3974 MI
.getOperand(2).setReg(Amt
);
3975 Observer
.changedInstr(MI
);
3978 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr
&MI
,
3979 int64_t &MatchInfo
) {
3980 assert(MI
.getOpcode() == TargetOpcode::G_ICMP
);
3981 auto Pred
= static_cast<CmpInst::Predicate
>(MI
.getOperand(1).getPredicate());
3982 auto KnownLHS
= KB
->getKnownBits(MI
.getOperand(2).getReg());
3983 auto KnownRHS
= KB
->getKnownBits(MI
.getOperand(3).getReg());
3984 std::optional
<bool> KnownVal
;
3987 llvm_unreachable("Unexpected G_ICMP predicate?");
3988 case CmpInst::ICMP_EQ
:
3989 KnownVal
= KnownBits::eq(KnownLHS
, KnownRHS
);
3991 case CmpInst::ICMP_NE
:
3992 KnownVal
= KnownBits::ne(KnownLHS
, KnownRHS
);
3994 case CmpInst::ICMP_SGE
:
3995 KnownVal
= KnownBits::sge(KnownLHS
, KnownRHS
);
3997 case CmpInst::ICMP_SGT
:
3998 KnownVal
= KnownBits::sgt(KnownLHS
, KnownRHS
);
4000 case CmpInst::ICMP_SLE
:
4001 KnownVal
= KnownBits::sle(KnownLHS
, KnownRHS
);
4003 case CmpInst::ICMP_SLT
:
4004 KnownVal
= KnownBits::slt(KnownLHS
, KnownRHS
);
4006 case CmpInst::ICMP_UGE
:
4007 KnownVal
= KnownBits::uge(KnownLHS
, KnownRHS
);
4009 case CmpInst::ICMP_UGT
:
4010 KnownVal
= KnownBits::ugt(KnownLHS
, KnownRHS
);
4012 case CmpInst::ICMP_ULE
:
4013 KnownVal
= KnownBits::ule(KnownLHS
, KnownRHS
);
4015 case CmpInst::ICMP_ULT
:
4016 KnownVal
= KnownBits::ult(KnownLHS
, KnownRHS
);
4023 ? getICmpTrueVal(getTargetLowering(),
4025 MRI
.getType(MI
.getOperand(0).getReg()).isVector(),
4031 bool CombinerHelper::matchICmpToLHSKnownBits(
4032 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4033 assert(MI
.getOpcode() == TargetOpcode::G_ICMP
);
4036 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4037 // %cmp = G_ICMP ne %x, 0
4041 // %x = G_WHATEVER (... x is known to be 0 or 1 ...)
4042 // %cmp = G_ICMP eq %x, 1
4044 // We can replace %cmp with %x assuming true is 1 on the target.
4045 auto Pred
= static_cast<CmpInst::Predicate
>(MI
.getOperand(1).getPredicate());
4046 if (!CmpInst::isEquality(Pred
))
4048 Register Dst
= MI
.getOperand(0).getReg();
4049 LLT DstTy
= MRI
.getType(Dst
);
4050 if (getICmpTrueVal(getTargetLowering(), DstTy
.isVector(),
4051 /* IsFP = */ false) != 1)
4053 int64_t OneOrZero
= Pred
== CmpInst::ICMP_EQ
;
4054 if (!mi_match(MI
.getOperand(3).getReg(), MRI
, m_SpecificICst(OneOrZero
)))
4056 Register LHS
= MI
.getOperand(2).getReg();
4057 auto KnownLHS
= KB
->getKnownBits(LHS
);
4058 if (KnownLHS
.getMinValue() != 0 || KnownLHS
.getMaxValue() != 1)
4060 // Make sure replacing Dst with the LHS is a legal operation.
4061 LLT LHSTy
= MRI
.getType(LHS
);
4062 unsigned LHSSize
= LHSTy
.getSizeInBits();
4063 unsigned DstSize
= DstTy
.getSizeInBits();
4064 unsigned Op
= TargetOpcode::COPY
;
4065 if (DstSize
!= LHSSize
)
4066 Op
= DstSize
< LHSSize
? TargetOpcode::G_TRUNC
: TargetOpcode::G_ZEXT
;
4067 if (!isLegalOrBeforeLegalizer({Op
, {DstTy
, LHSTy
}}))
4069 MatchInfo
= [=](MachineIRBuilder
&B
) { B
.buildInstr(Op
, {Dst
}, {LHS
}); };
4073 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0
4074 bool CombinerHelper::matchAndOrDisjointMask(
4075 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4076 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
4078 // Ignore vector types to simplify matching the two constants.
4079 // TODO: do this for vectors and scalars via a demanded bits analysis.
4080 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
4085 Register AndMaskReg
;
4086 int64_t AndMaskBits
;
4088 if (!mi_match(MI
, MRI
,
4089 m_GAnd(m_GOr(m_Reg(Src
), m_ICst(OrMaskBits
)),
4090 m_all_of(m_ICst(AndMaskBits
), m_Reg(AndMaskReg
)))))
4093 // Check if OrMask could turn on any bits in Src.
4094 if (AndMaskBits
& OrMaskBits
)
4097 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4098 Observer
.changingInstr(MI
);
4099 // Canonicalize the result to have the constant on the RHS.
4100 if (MI
.getOperand(1).getReg() == AndMaskReg
)
4101 MI
.getOperand(2).setReg(AndMaskReg
);
4102 MI
.getOperand(1).setReg(Src
);
4103 Observer
.changedInstr(MI
);
4108 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift.
4109 bool CombinerHelper::matchBitfieldExtractFromSExtInReg(
4110 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4111 assert(MI
.getOpcode() == TargetOpcode::G_SEXT_INREG
);
4112 Register Dst
= MI
.getOperand(0).getReg();
4113 Register Src
= MI
.getOperand(1).getReg();
4114 LLT Ty
= MRI
.getType(Src
);
4115 LLT ExtractTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4116 if (!LI
|| !LI
->isLegalOrCustom({TargetOpcode::G_SBFX
, {Ty
, ExtractTy
}}))
4118 int64_t Width
= MI
.getOperand(2).getImm();
4123 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc
), m_ICst(ShiftImm
)),
4124 m_GLShr(m_Reg(ShiftSrc
), m_ICst(ShiftImm
))))))
4126 if (ShiftImm
< 0 || ShiftImm
+ Width
> Ty
.getScalarSizeInBits())
4129 MatchInfo
= [=](MachineIRBuilder
&B
) {
4130 auto Cst1
= B
.buildConstant(ExtractTy
, ShiftImm
);
4131 auto Cst2
= B
.buildConstant(ExtractTy
, Width
);
4132 B
.buildSbfx(Dst
, ShiftSrc
, Cst1
, Cst2
);
4137 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants.
4138 bool CombinerHelper::matchBitfieldExtractFromAnd(
4139 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4140 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
4141 Register Dst
= MI
.getOperand(0).getReg();
4142 LLT Ty
= MRI
.getType(Dst
);
4143 LLT ExtractTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4144 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4145 TargetOpcode::G_UBFX
, Ty
, ExtractTy
))
4148 int64_t AndImm
, LSBImm
;
4150 const unsigned Size
= Ty
.getScalarSizeInBits();
4151 if (!mi_match(MI
.getOperand(0).getReg(), MRI
,
4152 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc
), m_ICst(LSBImm
))),
4156 // The mask is a mask of the low bits iff imm & (imm+1) == 0.
4157 auto MaybeMask
= static_cast<uint64_t>(AndImm
);
4158 if (MaybeMask
& (MaybeMask
+ 1))
4161 // LSB must fit within the register.
4162 if (static_cast<uint64_t>(LSBImm
) >= Size
)
4165 uint64_t Width
= APInt(Size
, AndImm
).countr_one();
4166 MatchInfo
= [=](MachineIRBuilder
&B
) {
4167 auto WidthCst
= B
.buildConstant(ExtractTy
, Width
);
4168 auto LSBCst
= B
.buildConstant(ExtractTy
, LSBImm
);
4169 B
.buildInstr(TargetOpcode::G_UBFX
, {Dst
}, {ShiftSrc
, LSBCst
, WidthCst
});
4174 bool CombinerHelper::matchBitfieldExtractFromShr(
4175 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4176 const unsigned Opcode
= MI
.getOpcode();
4177 assert(Opcode
== TargetOpcode::G_ASHR
|| Opcode
== TargetOpcode::G_LSHR
);
4179 const Register Dst
= MI
.getOperand(0).getReg();
4181 const unsigned ExtrOpcode
= Opcode
== TargetOpcode::G_ASHR
4182 ? TargetOpcode::G_SBFX
4183 : TargetOpcode::G_UBFX
;
4185 // Check if the type we would use for the extract is legal
4186 LLT Ty
= MRI
.getType(Dst
);
4187 LLT ExtractTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4188 if (!LI
|| !LI
->isLegalOrCustom({ExtrOpcode
, {Ty
, ExtractTy
}}))
4194 const unsigned Size
= Ty
.getScalarSizeInBits();
4196 // Try to match shr (shl x, c1), c2
4197 if (!mi_match(Dst
, MRI
,
4199 m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc
), m_ICst(ShlAmt
))),
4203 // Make sure that the shift sizes can fit a bitfield extract
4204 if (ShlAmt
< 0 || ShlAmt
> ShrAmt
|| ShrAmt
>= Size
)
4207 // Skip this combine if the G_SEXT_INREG combine could handle it
4208 if (Opcode
== TargetOpcode::G_ASHR
&& ShlAmt
== ShrAmt
)
4211 // Calculate start position and width of the extract
4212 const int64_t Pos
= ShrAmt
- ShlAmt
;
4213 const int64_t Width
= Size
- ShrAmt
;
4215 MatchInfo
= [=](MachineIRBuilder
&B
) {
4216 auto WidthCst
= B
.buildConstant(ExtractTy
, Width
);
4217 auto PosCst
= B
.buildConstant(ExtractTy
, Pos
);
4218 B
.buildInstr(ExtrOpcode
, {Dst
}, {ShlSrc
, PosCst
, WidthCst
});
4223 bool CombinerHelper::matchBitfieldExtractFromShrAnd(
4224 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4225 const unsigned Opcode
= MI
.getOpcode();
4226 assert(Opcode
== TargetOpcode::G_LSHR
|| Opcode
== TargetOpcode::G_ASHR
);
4228 const Register Dst
= MI
.getOperand(0).getReg();
4229 LLT Ty
= MRI
.getType(Dst
);
4230 LLT ExtractTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4231 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal(
4232 TargetOpcode::G_UBFX
, Ty
, ExtractTy
))
4235 // Try to match shr (and x, c1), c2
4239 if (!mi_match(Dst
, MRI
,
4241 m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc
), m_ICst(SMask
))),
4245 const unsigned Size
= Ty
.getScalarSizeInBits();
4246 if (ShrAmt
< 0 || ShrAmt
>= Size
)
4249 // If the shift subsumes the mask, emit the 0 directly.
4250 if (0 == (SMask
>> ShrAmt
)) {
4251 MatchInfo
= [=](MachineIRBuilder
&B
) {
4252 B
.buildConstant(Dst
, 0);
4257 // Check that ubfx can do the extraction, with no holes in the mask.
4258 uint64_t UMask
= SMask
;
4259 UMask
|= maskTrailingOnes
<uint64_t>(ShrAmt
);
4260 UMask
&= maskTrailingOnes
<uint64_t>(Size
);
4261 if (!isMask_64(UMask
))
4264 // Calculate start position and width of the extract.
4265 const int64_t Pos
= ShrAmt
;
4266 const int64_t Width
= llvm::countr_one(UMask
) - ShrAmt
;
4268 // It's preferable to keep the shift, rather than form G_SBFX.
4269 // TODO: remove the G_AND via demanded bits analysis.
4270 if (Opcode
== TargetOpcode::G_ASHR
&& Width
+ ShrAmt
== Size
)
4273 MatchInfo
= [=](MachineIRBuilder
&B
) {
4274 auto WidthCst
= B
.buildConstant(ExtractTy
, Width
);
4275 auto PosCst
= B
.buildConstant(ExtractTy
, Pos
);
4276 B
.buildInstr(TargetOpcode::G_UBFX
, {Dst
}, {AndSrc
, PosCst
, WidthCst
});
4281 bool CombinerHelper::reassociationCanBreakAddressingModePattern(
4282 MachineInstr
&PtrAdd
) {
4283 assert(PtrAdd
.getOpcode() == TargetOpcode::G_PTR_ADD
);
4285 Register Src1Reg
= PtrAdd
.getOperand(1).getReg();
4286 MachineInstr
*Src1Def
= getOpcodeDef(TargetOpcode::G_PTR_ADD
, Src1Reg
, MRI
);
4290 Register Src2Reg
= PtrAdd
.getOperand(2).getReg();
4292 if (MRI
.hasOneNonDBGUse(Src1Reg
))
4295 auto C1
= getIConstantVRegVal(Src1Def
->getOperand(2).getReg(), MRI
);
4298 auto C2
= getIConstantVRegVal(Src2Reg
, MRI
);
4302 const APInt
&C1APIntVal
= *C1
;
4303 const APInt
&C2APIntVal
= *C2
;
4304 const int64_t CombinedValue
= (C1APIntVal
+ C2APIntVal
).getSExtValue();
4306 for (auto &UseMI
: MRI
.use_nodbg_instructions(Src1Reg
)) {
4307 // This combine may end up running before ptrtoint/inttoptr combines
4308 // manage to eliminate redundant conversions, so try to look through them.
4309 MachineInstr
*ConvUseMI
= &UseMI
;
4310 unsigned ConvUseOpc
= ConvUseMI
->getOpcode();
4311 while (ConvUseOpc
== TargetOpcode::G_INTTOPTR
||
4312 ConvUseOpc
== TargetOpcode::G_PTRTOINT
) {
4313 Register DefReg
= ConvUseMI
->getOperand(0).getReg();
4314 if (!MRI
.hasOneNonDBGUse(DefReg
))
4316 ConvUseMI
= &*MRI
.use_instr_nodbg_begin(DefReg
);
4317 ConvUseOpc
= ConvUseMI
->getOpcode();
4319 auto LoadStore
= ConvUseOpc
== TargetOpcode::G_LOAD
||
4320 ConvUseOpc
== TargetOpcode::G_STORE
;
4323 // Is x[offset2] already not a legal addressing mode? If so then
4324 // reassociating the constants breaks nothing (we test offset2 because
4325 // that's the one we hope to fold into the load or store).
4326 TargetLoweringBase::AddrMode AM
;
4327 AM
.HasBaseReg
= true;
4328 AM
.BaseOffs
= C2APIntVal
.getSExtValue();
4330 MRI
.getType(ConvUseMI
->getOperand(1).getReg()).getAddressSpace();
4332 getTypeForLLT(MRI
.getType(ConvUseMI
->getOperand(0).getReg()),
4333 PtrAdd
.getMF()->getFunction().getContext());
4334 const auto &TLI
= *PtrAdd
.getMF()->getSubtarget().getTargetLowering();
4335 if (!TLI
.isLegalAddressingMode(PtrAdd
.getMF()->getDataLayout(), AM
,
4339 // Would x[offset1+offset2] still be a legal addressing mode?
4340 AM
.BaseOffs
= CombinedValue
;
4341 if (!TLI
.isLegalAddressingMode(PtrAdd
.getMF()->getDataLayout(), AM
,
4349 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd
&MI
,
4351 BuildFnTy
&MatchInfo
) {
4352 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4353 Register Src1Reg
= MI
.getOperand(1).getReg();
4354 if (RHS
->getOpcode() != TargetOpcode::G_ADD
)
4356 auto C2
= getIConstantVRegVal(RHS
->getOperand(2).getReg(), MRI
);
4360 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4361 LLT PtrTy
= MRI
.getType(MI
.getOperand(0).getReg());
4364 Builder
.buildPtrAdd(PtrTy
, Src1Reg
, RHS
->getOperand(1).getReg());
4365 Observer
.changingInstr(MI
);
4366 MI
.getOperand(1).setReg(NewBase
.getReg(0));
4367 MI
.getOperand(2).setReg(RHS
->getOperand(2).getReg());
4368 Observer
.changedInstr(MI
);
4370 return !reassociationCanBreakAddressingModePattern(MI
);
4373 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd
&MI
,
4376 BuildFnTy
&MatchInfo
) {
4377 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C)
4378 // if and only if (G_PTR_ADD X, C) has one use.
4380 std::optional
<ValueAndVReg
> LHSCstOff
;
4381 if (!mi_match(MI
.getBaseReg(), MRI
,
4382 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase
), m_GCst(LHSCstOff
)))))
4385 auto *LHSPtrAdd
= cast
<GPtrAdd
>(LHS
);
4386 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4387 // When we change LHSPtrAdd's offset register we might cause it to use a reg
4388 // before its def. Sink the instruction so the outer PTR_ADD to ensure this
4390 LHSPtrAdd
->moveBefore(&MI
);
4391 Register RHSReg
= MI
.getOffsetReg();
4392 // set VReg will cause type mismatch if it comes from extend/trunc
4393 auto NewCst
= B
.buildConstant(MRI
.getType(RHSReg
), LHSCstOff
->Value
);
4394 Observer
.changingInstr(MI
);
4395 MI
.getOperand(2).setReg(NewCst
.getReg(0));
4396 Observer
.changedInstr(MI
);
4397 Observer
.changingInstr(*LHSPtrAdd
);
4398 LHSPtrAdd
->getOperand(2).setReg(RHSReg
);
4399 Observer
.changedInstr(*LHSPtrAdd
);
4401 return !reassociationCanBreakAddressingModePattern(MI
);
4404 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd
&MI
,
4407 BuildFnTy
&MatchInfo
) {
4408 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4409 auto *LHSPtrAdd
= dyn_cast
<GPtrAdd
>(LHS
);
4413 Register Src2Reg
= MI
.getOperand(2).getReg();
4414 Register LHSSrc1
= LHSPtrAdd
->getBaseReg();
4415 Register LHSSrc2
= LHSPtrAdd
->getOffsetReg();
4416 auto C1
= getIConstantVRegVal(LHSSrc2
, MRI
);
4419 auto C2
= getIConstantVRegVal(Src2Reg
, MRI
);
4423 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4424 auto NewCst
= B
.buildConstant(MRI
.getType(Src2Reg
), *C1
+ *C2
);
4425 Observer
.changingInstr(MI
);
4426 MI
.getOperand(1).setReg(LHSSrc1
);
4427 MI
.getOperand(2).setReg(NewCst
.getReg(0));
4428 Observer
.changedInstr(MI
);
4430 return !reassociationCanBreakAddressingModePattern(MI
);
4433 bool CombinerHelper::matchReassocPtrAdd(MachineInstr
&MI
,
4434 BuildFnTy
&MatchInfo
) {
4435 auto &PtrAdd
= cast
<GPtrAdd
>(MI
);
4436 // We're trying to match a few pointer computation patterns here for
4437 // re-association opportunities.
4438 // 1) Isolating a constant operand to be on the RHS, e.g.:
4439 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C)
4441 // 2) Folding two constants in each sub-tree as long as such folding
4442 // doesn't break a legal addressing mode.
4443 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2)
4445 // 3) Move a constant from the LHS of an inner op to the RHS of the outer.
4446 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C)
4447 // iif (G_PTR_ADD X, C) has one use.
4448 MachineInstr
*LHS
= MRI
.getVRegDef(PtrAdd
.getBaseReg());
4449 MachineInstr
*RHS
= MRI
.getVRegDef(PtrAdd
.getOffsetReg());
4451 // Try to match example 2.
4452 if (matchReassocFoldConstantsInSubTree(PtrAdd
, LHS
, RHS
, MatchInfo
))
4455 // Try to match example 3.
4456 if (matchReassocConstantInnerLHS(PtrAdd
, LHS
, RHS
, MatchInfo
))
4459 // Try to match example 1.
4460 if (matchReassocConstantInnerRHS(PtrAdd
, RHS
, MatchInfo
))
4465 bool CombinerHelper::tryReassocBinOp(unsigned Opc
, Register DstReg
,
4466 Register OpLHS
, Register OpRHS
,
4467 BuildFnTy
&MatchInfo
) {
4468 LLT OpRHSTy
= MRI
.getType(OpRHS
);
4469 MachineInstr
*OpLHSDef
= MRI
.getVRegDef(OpLHS
);
4471 if (OpLHSDef
->getOpcode() != Opc
)
4474 MachineInstr
*OpRHSDef
= MRI
.getVRegDef(OpRHS
);
4475 Register OpLHSLHS
= OpLHSDef
->getOperand(1).getReg();
4476 Register OpLHSRHS
= OpLHSDef
->getOperand(2).getReg();
4478 if (isConstantOrConstantSplatVector(*MRI
.getVRegDef(OpLHSRHS
), MRI
)) {
4479 if (isConstantOrConstantSplatVector(*OpRHSDef
, MRI
)) {
4480 // (Opc (Opc X, C1), C2) -> (Opc X, (Opc C1, C2))
4481 MatchInfo
= [=](MachineIRBuilder
&B
) {
4482 auto NewCst
= B
.buildInstr(Opc
, {OpRHSTy
}, {OpLHSRHS
, OpRHS
});
4483 B
.buildInstr(Opc
, {DstReg
}, {OpLHSLHS
, NewCst
});
4487 if (getTargetLowering().isReassocProfitable(MRI
, OpLHS
, OpRHS
)) {
4488 // Reassociate: (op (op x, c1), y) -> (op (op x, y), c1)
4489 // iff (op x, c1) has one use
4490 MatchInfo
= [=](MachineIRBuilder
&B
) {
4491 auto NewLHSLHS
= B
.buildInstr(Opc
, {OpRHSTy
}, {OpLHSLHS
, OpRHS
});
4492 B
.buildInstr(Opc
, {DstReg
}, {NewLHSLHS
, OpLHSRHS
});
4501 bool CombinerHelper::matchReassocCommBinOp(MachineInstr
&MI
,
4502 BuildFnTy
&MatchInfo
) {
4503 // We don't check if the reassociation will break a legal addressing mode
4504 // here since pointer arithmetic is handled by G_PTR_ADD.
4505 unsigned Opc
= MI
.getOpcode();
4506 Register DstReg
= MI
.getOperand(0).getReg();
4507 Register LHSReg
= MI
.getOperand(1).getReg();
4508 Register RHSReg
= MI
.getOperand(2).getReg();
4510 if (tryReassocBinOp(Opc
, DstReg
, LHSReg
, RHSReg
, MatchInfo
))
4512 if (tryReassocBinOp(Opc
, DstReg
, RHSReg
, LHSReg
, MatchInfo
))
4517 bool CombinerHelper::matchConstantFold(MachineInstr
&MI
, APInt
&MatchInfo
) {
4518 Register Op1
= MI
.getOperand(1).getReg();
4519 Register Op2
= MI
.getOperand(2).getReg();
4520 auto MaybeCst
= ConstantFoldBinOp(MI
.getOpcode(), Op1
, Op2
, MRI
);
4523 MatchInfo
= *MaybeCst
;
4527 bool CombinerHelper::matchNarrowBinopFeedingAnd(
4528 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
4529 // Look for a binop feeding into an AND with a mask:
4531 // %add = G_ADD %lhs, %rhs
4532 // %and = G_AND %add, 000...11111111
4534 // Check if it's possible to perform the binop at a narrower width and zext
4535 // back to the original width like so:
4537 // %narrow_lhs = G_TRUNC %lhs
4538 // %narrow_rhs = G_TRUNC %rhs
4539 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs
4540 // %new_add = G_ZEXT %narrow_add
4541 // %and = G_AND %new_add, 000...11111111
4543 // This can allow later combines to eliminate the G_AND if it turns out
4544 // that the mask is irrelevant.
4545 assert(MI
.getOpcode() == TargetOpcode::G_AND
);
4546 Register Dst
= MI
.getOperand(0).getReg();
4547 Register AndLHS
= MI
.getOperand(1).getReg();
4548 Register AndRHS
= MI
.getOperand(2).getReg();
4549 LLT WideTy
= MRI
.getType(Dst
);
4551 // If the potential binop has more than one use, then it's possible that one
4552 // of those uses will need its full width.
4553 if (!WideTy
.isScalar() || !MRI
.hasOneNonDBGUse(AndLHS
))
4556 // Check if the LHS feeding the AND is impacted by the high bits that we're
4559 // e.g. for 64-bit x, y:
4561 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535
4562 MachineInstr
*LHSInst
= getDefIgnoringCopies(AndLHS
, MRI
);
4565 unsigned LHSOpc
= LHSInst
->getOpcode();
4569 case TargetOpcode::G_ADD
:
4570 case TargetOpcode::G_SUB
:
4571 case TargetOpcode::G_MUL
:
4572 case TargetOpcode::G_AND
:
4573 case TargetOpcode::G_OR
:
4574 case TargetOpcode::G_XOR
:
4578 // Find the mask on the RHS.
4579 auto Cst
= getIConstantVRegValWithLookThrough(AndRHS
, MRI
);
4582 auto Mask
= Cst
->Value
;
4586 // No point in combining if there's nothing to truncate.
4587 unsigned NarrowWidth
= Mask
.countr_one();
4588 if (NarrowWidth
== WideTy
.getSizeInBits())
4590 LLT NarrowTy
= LLT::scalar(NarrowWidth
);
4592 // Check if adding the zext + truncates could be harmful.
4593 auto &MF
= *MI
.getMF();
4594 const auto &TLI
= getTargetLowering();
4595 LLVMContext
&Ctx
= MF
.getFunction().getContext();
4596 auto &DL
= MF
.getDataLayout();
4597 if (!TLI
.isTruncateFree(WideTy
, NarrowTy
, DL
, Ctx
) ||
4598 !TLI
.isZExtFree(NarrowTy
, WideTy
, DL
, Ctx
))
4600 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC
, {NarrowTy
, WideTy
}}) ||
4601 !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT
, {WideTy
, NarrowTy
}}))
4603 Register BinOpLHS
= LHSInst
->getOperand(1).getReg();
4604 Register BinOpRHS
= LHSInst
->getOperand(2).getReg();
4605 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4606 auto NarrowLHS
= Builder
.buildTrunc(NarrowTy
, BinOpLHS
);
4607 auto NarrowRHS
= Builder
.buildTrunc(NarrowTy
, BinOpRHS
);
4609 Builder
.buildInstr(LHSOpc
, {NarrowTy
}, {NarrowLHS
, NarrowRHS
});
4610 auto Ext
= Builder
.buildZExt(WideTy
, NarrowBinOp
);
4611 Observer
.changingInstr(MI
);
4612 MI
.getOperand(1).setReg(Ext
.getReg(0));
4613 Observer
.changedInstr(MI
);
4618 bool CombinerHelper::matchMulOBy2(MachineInstr
&MI
, BuildFnTy
&MatchInfo
) {
4619 unsigned Opc
= MI
.getOpcode();
4620 assert(Opc
== TargetOpcode::G_UMULO
|| Opc
== TargetOpcode::G_SMULO
);
4622 if (!mi_match(MI
.getOperand(3).getReg(), MRI
, m_SpecificICstOrSplat(2)))
4625 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
4626 Observer
.changingInstr(MI
);
4627 unsigned NewOpc
= Opc
== TargetOpcode::G_UMULO
? TargetOpcode::G_UADDO
4628 : TargetOpcode::G_SADDO
;
4629 MI
.setDesc(Builder
.getTII().get(NewOpc
));
4630 MI
.getOperand(3).setReg(MI
.getOperand(2).getReg());
4631 Observer
.changedInstr(MI
);
4636 bool CombinerHelper::matchMulOBy0(MachineInstr
&MI
, BuildFnTy
&MatchInfo
) {
4637 // (G_*MULO x, 0) -> 0 + no carry out
4638 assert(MI
.getOpcode() == TargetOpcode::G_UMULO
||
4639 MI
.getOpcode() == TargetOpcode::G_SMULO
);
4640 if (!mi_match(MI
.getOperand(3).getReg(), MRI
, m_SpecificICstOrSplat(0)))
4642 Register Dst
= MI
.getOperand(0).getReg();
4643 Register Carry
= MI
.getOperand(1).getReg();
4644 if (!isConstantLegalOrBeforeLegalizer(MRI
.getType(Dst
)) ||
4645 !isConstantLegalOrBeforeLegalizer(MRI
.getType(Carry
)))
4647 MatchInfo
= [=](MachineIRBuilder
&B
) {
4648 B
.buildConstant(Dst
, 0);
4649 B
.buildConstant(Carry
, 0);
4654 bool CombinerHelper::matchAddOBy0(MachineInstr
&MI
, BuildFnTy
&MatchInfo
) {
4655 // (G_*ADDO x, 0) -> x + no carry out
4656 assert(MI
.getOpcode() == TargetOpcode::G_UADDO
||
4657 MI
.getOpcode() == TargetOpcode::G_SADDO
);
4658 if (!mi_match(MI
.getOperand(3).getReg(), MRI
, m_SpecificICstOrSplat(0)))
4660 Register Carry
= MI
.getOperand(1).getReg();
4661 if (!isConstantLegalOrBeforeLegalizer(MRI
.getType(Carry
)))
4663 Register Dst
= MI
.getOperand(0).getReg();
4664 Register LHS
= MI
.getOperand(2).getReg();
4665 MatchInfo
= [=](MachineIRBuilder
&B
) {
4666 B
.buildCopy(Dst
, LHS
);
4667 B
.buildConstant(Carry
, 0);
4672 bool CombinerHelper::matchAddEToAddO(MachineInstr
&MI
, BuildFnTy
&MatchInfo
) {
4673 // (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
4674 // (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
4675 assert(MI
.getOpcode() == TargetOpcode::G_UADDE
||
4676 MI
.getOpcode() == TargetOpcode::G_SADDE
||
4677 MI
.getOpcode() == TargetOpcode::G_USUBE
||
4678 MI
.getOpcode() == TargetOpcode::G_SSUBE
);
4679 if (!mi_match(MI
.getOperand(4).getReg(), MRI
, m_SpecificICstOrSplat(0)))
4681 MatchInfo
= [&](MachineIRBuilder
&B
) {
4683 switch (MI
.getOpcode()) {
4684 case TargetOpcode::G_UADDE
:
4685 NewOpcode
= TargetOpcode::G_UADDO
;
4687 case TargetOpcode::G_SADDE
:
4688 NewOpcode
= TargetOpcode::G_SADDO
;
4690 case TargetOpcode::G_USUBE
:
4691 NewOpcode
= TargetOpcode::G_USUBO
;
4693 case TargetOpcode::G_SSUBE
:
4694 NewOpcode
= TargetOpcode::G_SSUBO
;
4697 Observer
.changingInstr(MI
);
4698 MI
.setDesc(B
.getTII().get(NewOpcode
));
4699 MI
.removeOperand(4);
4700 Observer
.changedInstr(MI
);
4705 bool CombinerHelper::matchSubAddSameReg(MachineInstr
&MI
,
4706 BuildFnTy
&MatchInfo
) {
4707 assert(MI
.getOpcode() == TargetOpcode::G_SUB
);
4708 Register Dst
= MI
.getOperand(0).getReg();
4709 // (x + y) - z -> x (if y == z)
4710 // (x + y) - z -> y (if x == z)
4712 if (mi_match(Dst
, MRI
, m_GSub(m_GAdd(m_Reg(X
), m_Reg(Y
)), m_Reg(Z
)))) {
4713 Register ReplaceReg
;
4715 if (Y
== Z
|| (mi_match(Y
, MRI
, m_ICstOrSplat(CstY
)) &&
4716 mi_match(Z
, MRI
, m_SpecificICstOrSplat(CstY
))))
4718 else if (X
== Z
|| (mi_match(X
, MRI
, m_ICstOrSplat(CstX
)) &&
4719 mi_match(Z
, MRI
, m_SpecificICstOrSplat(CstX
))))
4722 MatchInfo
= [=](MachineIRBuilder
&B
) { B
.buildCopy(Dst
, ReplaceReg
); };
4727 // x - (y + z) -> 0 - y (if x == z)
4728 // x - (y + z) -> 0 - z (if x == y)
4729 if (mi_match(Dst
, MRI
, m_GSub(m_Reg(X
), m_GAdd(m_Reg(Y
), m_Reg(Z
))))) {
4730 Register ReplaceReg
;
4732 if (X
== Z
|| (mi_match(X
, MRI
, m_ICstOrSplat(CstX
)) &&
4733 mi_match(Z
, MRI
, m_SpecificICstOrSplat(CstX
))))
4735 else if (X
== Y
|| (mi_match(X
, MRI
, m_ICstOrSplat(CstX
)) &&
4736 mi_match(Y
, MRI
, m_SpecificICstOrSplat(CstX
))))
4739 MatchInfo
= [=](MachineIRBuilder
&B
) {
4740 auto Zero
= B
.buildConstant(MRI
.getType(Dst
), 0);
4741 B
.buildSub(Dst
, Zero
, ReplaceReg
);
4749 MachineInstr
*CombinerHelper::buildUDivUsingMul(MachineInstr
&MI
) {
4750 assert(MI
.getOpcode() == TargetOpcode::G_UDIV
);
4751 auto &UDiv
= cast
<GenericMachineInstr
>(MI
);
4752 Register Dst
= UDiv
.getReg(0);
4753 Register LHS
= UDiv
.getReg(1);
4754 Register RHS
= UDiv
.getReg(2);
4755 LLT Ty
= MRI
.getType(Dst
);
4756 LLT ScalarTy
= Ty
.getScalarType();
4757 const unsigned EltBits
= ScalarTy
.getScalarSizeInBits();
4758 LLT ShiftAmtTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4759 LLT ScalarShiftAmtTy
= ShiftAmtTy
.getScalarType();
4760 auto &MIB
= Builder
;
4761 MIB
.setInstrAndDebugLoc(MI
);
4763 bool UseNPQ
= false;
4764 SmallVector
<Register
, 16> PreShifts
, PostShifts
, MagicFactors
, NPQFactors
;
4766 auto BuildUDIVPattern
= [&](const Constant
*C
) {
4767 auto *CI
= cast
<ConstantInt
>(C
);
4768 const APInt
&Divisor
= CI
->getValue();
4770 bool SelNPQ
= false;
4771 APInt
Magic(Divisor
.getBitWidth(), 0);
4772 unsigned PreShift
= 0, PostShift
= 0;
4774 // Magic algorithm doesn't work for division by 1. We need to emit a select
4776 // TODO: Use undef values for divisor of 1.
4777 if (!Divisor
.isOne()) {
4778 UnsignedDivisionByConstantInfo magics
=
4779 UnsignedDivisionByConstantInfo::get(Divisor
);
4781 Magic
= std::move(magics
.Magic
);
4783 assert(magics
.PreShift
< Divisor
.getBitWidth() &&
4784 "We shouldn't generate an undefined shift!");
4785 assert(magics
.PostShift
< Divisor
.getBitWidth() &&
4786 "We shouldn't generate an undefined shift!");
4787 assert((!magics
.IsAdd
|| magics
.PreShift
== 0) && "Unexpected pre-shift");
4788 PreShift
= magics
.PreShift
;
4789 PostShift
= magics
.PostShift
;
4790 SelNPQ
= magics
.IsAdd
;
4793 PreShifts
.push_back(
4794 MIB
.buildConstant(ScalarShiftAmtTy
, PreShift
).getReg(0));
4795 MagicFactors
.push_back(MIB
.buildConstant(ScalarTy
, Magic
).getReg(0));
4796 NPQFactors
.push_back(
4797 MIB
.buildConstant(ScalarTy
,
4798 SelNPQ
? APInt::getOneBitSet(EltBits
, EltBits
- 1)
4799 : APInt::getZero(EltBits
))
4801 PostShifts
.push_back(
4802 MIB
.buildConstant(ScalarShiftAmtTy
, PostShift
).getReg(0));
4807 // Collect the shifts/magic values from each element.
4808 bool Matched
= matchUnaryPredicate(MRI
, RHS
, BuildUDIVPattern
);
4810 assert(Matched
&& "Expected unary predicate match to succeed");
4812 Register PreShift
, PostShift
, MagicFactor
, NPQFactor
;
4813 auto *RHSDef
= getOpcodeDef
<GBuildVector
>(RHS
, MRI
);
4815 PreShift
= MIB
.buildBuildVector(ShiftAmtTy
, PreShifts
).getReg(0);
4816 MagicFactor
= MIB
.buildBuildVector(Ty
, MagicFactors
).getReg(0);
4817 NPQFactor
= MIB
.buildBuildVector(Ty
, NPQFactors
).getReg(0);
4818 PostShift
= MIB
.buildBuildVector(ShiftAmtTy
, PostShifts
).getReg(0);
4820 assert(MRI
.getType(RHS
).isScalar() &&
4821 "Non-build_vector operation should have been a scalar");
4822 PreShift
= PreShifts
[0];
4823 MagicFactor
= MagicFactors
[0];
4824 PostShift
= PostShifts
[0];
4828 Q
= MIB
.buildLShr(Ty
, Q
, PreShift
).getReg(0);
4830 // Multiply the numerator (operand 0) by the magic value.
4831 Q
= MIB
.buildUMulH(Ty
, Q
, MagicFactor
).getReg(0);
4834 Register NPQ
= MIB
.buildSub(Ty
, LHS
, Q
).getReg(0);
4836 // For vectors we might have a mix of non-NPQ/NPQ paths, so use
4837 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero.
4839 NPQ
= MIB
.buildUMulH(Ty
, NPQ
, NPQFactor
).getReg(0);
4841 NPQ
= MIB
.buildLShr(Ty
, NPQ
, MIB
.buildConstant(ShiftAmtTy
, 1)).getReg(0);
4843 Q
= MIB
.buildAdd(Ty
, NPQ
, Q
).getReg(0);
4846 Q
= MIB
.buildLShr(Ty
, Q
, PostShift
).getReg(0);
4847 auto One
= MIB
.buildConstant(Ty
, 1);
4848 auto IsOne
= MIB
.buildICmp(
4849 CmpInst::Predicate::ICMP_EQ
,
4850 Ty
.isScalar() ? LLT::scalar(1) : Ty
.changeElementSize(1), RHS
, One
);
4851 return MIB
.buildSelect(Ty
, IsOne
, LHS
, Q
);
4854 bool CombinerHelper::matchUDivByConst(MachineInstr
&MI
) {
4855 assert(MI
.getOpcode() == TargetOpcode::G_UDIV
);
4856 Register Dst
= MI
.getOperand(0).getReg();
4857 Register RHS
= MI
.getOperand(2).getReg();
4858 LLT DstTy
= MRI
.getType(Dst
);
4859 auto *RHSDef
= MRI
.getVRegDef(RHS
);
4860 if (!isConstantOrConstantVector(*RHSDef
, MRI
))
4863 auto &MF
= *MI
.getMF();
4864 AttributeList Attr
= MF
.getFunction().getAttributes();
4865 const auto &TLI
= getTargetLowering();
4866 LLVMContext
&Ctx
= MF
.getFunction().getContext();
4867 auto &DL
= MF
.getDataLayout();
4868 if (TLI
.isIntDivCheap(getApproximateEVTForLLT(DstTy
, DL
, Ctx
), Attr
))
4871 // Don't do this for minsize because the instruction sequence is usually
4873 if (MF
.getFunction().hasMinSize())
4876 // Don't do this if the types are not going to be legal.
4878 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL
, {DstTy
, DstTy
}}))
4880 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH
, {DstTy
}}))
4882 if (!isLegalOrBeforeLegalizer(
4883 {TargetOpcode::G_ICMP
,
4884 {DstTy
.isVector() ? DstTy
.changeElementSize(1) : LLT::scalar(1),
4889 auto CheckEltValue
= [&](const Constant
*C
) {
4890 if (auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
))
4891 return !CI
->isZero();
4894 return matchUnaryPredicate(MRI
, RHS
, CheckEltValue
);
4897 void CombinerHelper::applyUDivByConst(MachineInstr
&MI
) {
4898 auto *NewMI
= buildUDivUsingMul(MI
);
4899 replaceSingleDefInstWithReg(MI
, NewMI
->getOperand(0).getReg());
4902 bool CombinerHelper::matchSDivByConst(MachineInstr
&MI
) {
4903 assert(MI
.getOpcode() == TargetOpcode::G_SDIV
&& "Expected SDIV");
4904 Register Dst
= MI
.getOperand(0).getReg();
4905 Register RHS
= MI
.getOperand(2).getReg();
4906 LLT DstTy
= MRI
.getType(Dst
);
4908 auto &MF
= *MI
.getMF();
4909 AttributeList Attr
= MF
.getFunction().getAttributes();
4910 const auto &TLI
= getTargetLowering();
4911 LLVMContext
&Ctx
= MF
.getFunction().getContext();
4912 auto &DL
= MF
.getDataLayout();
4913 if (TLI
.isIntDivCheap(getApproximateEVTForLLT(DstTy
, DL
, Ctx
), Attr
))
4916 // Don't do this for minsize because the instruction sequence is usually
4918 if (MF
.getFunction().hasMinSize())
4921 // If the sdiv has an 'exact' flag we can use a simpler lowering.
4922 if (MI
.getFlag(MachineInstr::MIFlag::IsExact
)) {
4923 return matchUnaryPredicate(
4924 MRI
, RHS
, [](const Constant
*C
) { return C
&& !C
->isZeroValue(); });
4927 // Don't support the general case for now.
4931 void CombinerHelper::applySDivByConst(MachineInstr
&MI
) {
4932 auto *NewMI
= buildSDivUsingMul(MI
);
4933 replaceSingleDefInstWithReg(MI
, NewMI
->getOperand(0).getReg());
4936 MachineInstr
*CombinerHelper::buildSDivUsingMul(MachineInstr
&MI
) {
4937 assert(MI
.getOpcode() == TargetOpcode::G_SDIV
&& "Expected SDIV");
4938 auto &SDiv
= cast
<GenericMachineInstr
>(MI
);
4939 Register Dst
= SDiv
.getReg(0);
4940 Register LHS
= SDiv
.getReg(1);
4941 Register RHS
= SDiv
.getReg(2);
4942 LLT Ty
= MRI
.getType(Dst
);
4943 LLT ScalarTy
= Ty
.getScalarType();
4944 LLT ShiftAmtTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
4945 LLT ScalarShiftAmtTy
= ShiftAmtTy
.getScalarType();
4946 auto &MIB
= Builder
;
4947 MIB
.setInstrAndDebugLoc(MI
);
4949 bool UseSRA
= false;
4950 SmallVector
<Register
, 16> Shifts
, Factors
;
4952 auto *RHSDef
= cast
<GenericMachineInstr
>(getDefIgnoringCopies(RHS
, MRI
));
4953 bool IsSplat
= getIConstantSplatVal(*RHSDef
, MRI
).has_value();
4955 auto BuildSDIVPattern
= [&](const Constant
*C
) {
4956 // Don't recompute inverses for each splat element.
4957 if (IsSplat
&& !Factors
.empty()) {
4958 Shifts
.push_back(Shifts
[0]);
4959 Factors
.push_back(Factors
[0]);
4963 auto *CI
= cast
<ConstantInt
>(C
);
4964 APInt Divisor
= CI
->getValue();
4965 unsigned Shift
= Divisor
.countr_zero();
4967 Divisor
.ashrInPlace(Shift
);
4971 // Calculate the multiplicative inverse modulo BW.
4972 // 2^W requires W + 1 bits, so we have to extend and then truncate.
4973 unsigned W
= Divisor
.getBitWidth();
4974 APInt Factor
= Divisor
.zext(W
+ 1)
4975 .multiplicativeInverse(APInt::getSignedMinValue(W
+ 1))
4977 Shifts
.push_back(MIB
.buildConstant(ScalarShiftAmtTy
, Shift
).getReg(0));
4978 Factors
.push_back(MIB
.buildConstant(ScalarTy
, Factor
).getReg(0));
4982 // Collect all magic values from the build vector.
4983 bool Matched
= matchUnaryPredicate(MRI
, RHS
, BuildSDIVPattern
);
4985 assert(Matched
&& "Expected unary predicate match to succeed");
4987 Register Shift
, Factor
;
4988 if (Ty
.isVector()) {
4989 Shift
= MIB
.buildBuildVector(ShiftAmtTy
, Shifts
).getReg(0);
4990 Factor
= MIB
.buildBuildVector(Ty
, Factors
).getReg(0);
4993 Factor
= Factors
[0];
4999 Res
= MIB
.buildAShr(Ty
, Res
, Shift
, MachineInstr::IsExact
).getReg(0);
5001 return MIB
.buildMul(Ty
, Res
, Factor
);
5004 bool CombinerHelper::matchUMulHToLShr(MachineInstr
&MI
) {
5005 assert(MI
.getOpcode() == TargetOpcode::G_UMULH
);
5006 Register RHS
= MI
.getOperand(2).getReg();
5007 Register Dst
= MI
.getOperand(0).getReg();
5008 LLT Ty
= MRI
.getType(Dst
);
5009 LLT ShiftAmtTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
5010 auto MatchPow2ExceptOne
= [&](const Constant
*C
) {
5011 if (auto *CI
= dyn_cast
<ConstantInt
>(C
))
5012 return CI
->getValue().isPowerOf2() && !CI
->getValue().isOne();
5015 if (!matchUnaryPredicate(MRI
, RHS
, MatchPow2ExceptOne
, false))
5017 return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR
, {Ty
, ShiftAmtTy
}});
5020 void CombinerHelper::applyUMulHToLShr(MachineInstr
&MI
) {
5021 Register LHS
= MI
.getOperand(1).getReg();
5022 Register RHS
= MI
.getOperand(2).getReg();
5023 Register Dst
= MI
.getOperand(0).getReg();
5024 LLT Ty
= MRI
.getType(Dst
);
5025 LLT ShiftAmtTy
= getTargetLowering().getPreferredShiftAmountTy(Ty
);
5026 unsigned NumEltBits
= Ty
.getScalarSizeInBits();
5028 Builder
.setInstrAndDebugLoc(MI
);
5029 auto LogBase2
= buildLogBase2(RHS
, Builder
);
5031 Builder
.buildSub(Ty
, Builder
.buildConstant(Ty
, NumEltBits
), LogBase2
);
5032 auto Trunc
= Builder
.buildZExtOrTrunc(ShiftAmtTy
, ShiftAmt
);
5033 Builder
.buildLShr(Dst
, LHS
, Trunc
);
5034 MI
.eraseFromParent();
5037 bool CombinerHelper::matchRedundantNegOperands(MachineInstr
&MI
,
5038 BuildFnTy
&MatchInfo
) {
5039 unsigned Opc
= MI
.getOpcode();
5040 assert(Opc
== TargetOpcode::G_FADD
|| Opc
== TargetOpcode::G_FSUB
||
5041 Opc
== TargetOpcode::G_FMUL
|| Opc
== TargetOpcode::G_FDIV
||
5042 Opc
== TargetOpcode::G_FMAD
|| Opc
== TargetOpcode::G_FMA
);
5044 Register Dst
= MI
.getOperand(0).getReg();
5045 Register X
= MI
.getOperand(1).getReg();
5046 Register Y
= MI
.getOperand(2).getReg();
5047 LLT Type
= MRI
.getType(Dst
);
5049 // fold (fadd x, fneg(y)) -> (fsub x, y)
5050 // fold (fadd fneg(y), x) -> (fsub x, y)
5051 // G_ADD is commutative so both cases are checked by m_GFAdd
5052 if (mi_match(Dst
, MRI
, m_GFAdd(m_Reg(X
), m_GFNeg(m_Reg(Y
)))) &&
5053 isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB
, {Type
}})) {
5054 Opc
= TargetOpcode::G_FSUB
;
5056 /// fold (fsub x, fneg(y)) -> (fadd x, y)
5057 else if (mi_match(Dst
, MRI
, m_GFSub(m_Reg(X
), m_GFNeg(m_Reg(Y
)))) &&
5058 isLegalOrBeforeLegalizer({TargetOpcode::G_FADD
, {Type
}})) {
5059 Opc
= TargetOpcode::G_FADD
;
5061 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y)
5062 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
5063 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
5064 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z)
5065 else if ((Opc
== TargetOpcode::G_FMUL
|| Opc
== TargetOpcode::G_FDIV
||
5066 Opc
== TargetOpcode::G_FMAD
|| Opc
== TargetOpcode::G_FMA
) &&
5067 mi_match(X
, MRI
, m_GFNeg(m_Reg(X
))) &&
5068 mi_match(Y
, MRI
, m_GFNeg(m_Reg(Y
)))) {
5073 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5074 Observer
.changingInstr(MI
);
5075 MI
.setDesc(B
.getTII().get(Opc
));
5076 MI
.getOperand(1).setReg(X
);
5077 MI
.getOperand(2).setReg(Y
);
5078 Observer
.changedInstr(MI
);
5083 bool CombinerHelper::matchFsubToFneg(MachineInstr
&MI
, Register
&MatchInfo
) {
5084 assert(MI
.getOpcode() == TargetOpcode::G_FSUB
);
5086 Register LHS
= MI
.getOperand(1).getReg();
5087 MatchInfo
= MI
.getOperand(2).getReg();
5088 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
5090 const auto LHSCst
= Ty
.isVector()
5091 ? getFConstantSplat(LHS
, MRI
, /* allowUndef */ true)
5092 : getFConstantVRegValWithLookThrough(LHS
, MRI
);
5096 // -0.0 is always allowed
5097 if (LHSCst
->Value
.isNegZero())
5100 // +0.0 is only allowed if nsz is set.
5101 if (LHSCst
->Value
.isPosZero())
5102 return MI
.getFlag(MachineInstr::FmNsz
);
5107 void CombinerHelper::applyFsubToFneg(MachineInstr
&MI
, Register
&MatchInfo
) {
5108 Builder
.setInstrAndDebugLoc(MI
);
5109 Register Dst
= MI
.getOperand(0).getReg();
5111 Dst
, Builder
.buildFCanonicalize(MRI
.getType(Dst
), MatchInfo
).getReg(0));
5115 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either
5116 /// due to global flags or MachineInstr flags.
5117 static bool isContractableFMul(MachineInstr
&MI
, bool AllowFusionGlobally
) {
5118 if (MI
.getOpcode() != TargetOpcode::G_FMUL
)
5120 return AllowFusionGlobally
|| MI
.getFlag(MachineInstr::MIFlag::FmContract
);
5123 static bool hasMoreUses(const MachineInstr
&MI0
, const MachineInstr
&MI1
,
5124 const MachineRegisterInfo
&MRI
) {
5125 return std::distance(MRI
.use_instr_nodbg_begin(MI0
.getOperand(0).getReg()),
5126 MRI
.use_instr_nodbg_end()) >
5127 std::distance(MRI
.use_instr_nodbg_begin(MI1
.getOperand(0).getReg()),
5128 MRI
.use_instr_nodbg_end());
5131 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr
&MI
,
5132 bool &AllowFusionGlobally
,
5133 bool &HasFMAD
, bool &Aggressive
,
5134 bool CanReassociate
) {
5136 auto *MF
= MI
.getMF();
5137 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
5138 const TargetOptions
&Options
= MF
->getTarget().Options
;
5139 LLT DstType
= MRI
.getType(MI
.getOperand(0).getReg());
5141 if (CanReassociate
&&
5142 !(Options
.UnsafeFPMath
|| MI
.getFlag(MachineInstr::MIFlag::FmReassoc
)))
5145 // Floating-point multiply-add with intermediate rounding.
5146 HasFMAD
= (!isPreLegalize() && TLI
.isFMADLegal(MI
, DstType
));
5147 // Floating-point multiply-add without intermediate rounding.
5148 bool HasFMA
= TLI
.isFMAFasterThanFMulAndFAdd(*MF
, DstType
) &&
5149 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA
, {DstType
}});
5150 // No valid opcode, do not combine.
5151 if (!HasFMAD
&& !HasFMA
)
5154 AllowFusionGlobally
= Options
.AllowFPOpFusion
== FPOpFusion::Fast
||
5155 Options
.UnsafeFPMath
|| HasFMAD
;
5156 // If the addition is not contractable, do not combine.
5157 if (!AllowFusionGlobally
&& !MI
.getFlag(MachineInstr::MIFlag::FmContract
))
5160 Aggressive
= TLI
.enableAggressiveFMAFusion(DstType
);
5164 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA(
5165 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5166 assert(MI
.getOpcode() == TargetOpcode::G_FADD
);
5168 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5169 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5172 Register Op1
= MI
.getOperand(1).getReg();
5173 Register Op2
= MI
.getOperand(2).getReg();
5174 DefinitionAndSourceRegister LHS
= {MRI
.getVRegDef(Op1
), Op1
};
5175 DefinitionAndSourceRegister RHS
= {MRI
.getVRegDef(Op2
), Op2
};
5176 unsigned PreferredFusedOpcode
=
5177 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5179 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5180 // prefer to fold the multiply with fewer uses.
5181 if (Aggressive
&& isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5182 isContractableFMul(*RHS
.MI
, AllowFusionGlobally
)) {
5183 if (hasMoreUses(*LHS
.MI
, *RHS
.MI
, MRI
))
5184 std::swap(LHS
, RHS
);
5187 // fold (fadd (fmul x, y), z) -> (fma x, y, z)
5188 if (isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5189 (Aggressive
|| MRI
.hasOneNonDBGUse(LHS
.Reg
))) {
5190 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5191 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5192 {LHS
.MI
->getOperand(1).getReg(),
5193 LHS
.MI
->getOperand(2).getReg(), RHS
.Reg
});
5198 // fold (fadd x, (fmul y, z)) -> (fma y, z, x)
5199 if (isContractableFMul(*RHS
.MI
, AllowFusionGlobally
) &&
5200 (Aggressive
|| MRI
.hasOneNonDBGUse(RHS
.Reg
))) {
5201 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5202 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5203 {RHS
.MI
->getOperand(1).getReg(),
5204 RHS
.MI
->getOperand(2).getReg(), LHS
.Reg
});
5212 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA(
5213 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5214 assert(MI
.getOpcode() == TargetOpcode::G_FADD
);
5216 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5217 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5220 const auto &TLI
= *MI
.getMF()->getSubtarget().getTargetLowering();
5221 Register Op1
= MI
.getOperand(1).getReg();
5222 Register Op2
= MI
.getOperand(2).getReg();
5223 DefinitionAndSourceRegister LHS
= {MRI
.getVRegDef(Op1
), Op1
};
5224 DefinitionAndSourceRegister RHS
= {MRI
.getVRegDef(Op2
), Op2
};
5225 LLT DstType
= MRI
.getType(MI
.getOperand(0).getReg());
5227 unsigned PreferredFusedOpcode
=
5228 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5230 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5231 // prefer to fold the multiply with fewer uses.
5232 if (Aggressive
&& isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5233 isContractableFMul(*RHS
.MI
, AllowFusionGlobally
)) {
5234 if (hasMoreUses(*LHS
.MI
, *RHS
.MI
, MRI
))
5235 std::swap(LHS
, RHS
);
5238 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
5239 MachineInstr
*FpExtSrc
;
5240 if (mi_match(LHS
.Reg
, MRI
, m_GFPExt(m_MInstr(FpExtSrc
))) &&
5241 isContractableFMul(*FpExtSrc
, AllowFusionGlobally
) &&
5242 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5243 MRI
.getType(FpExtSrc
->getOperand(1).getReg()))) {
5244 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5245 auto FpExtX
= B
.buildFPExt(DstType
, FpExtSrc
->getOperand(1).getReg());
5246 auto FpExtY
= B
.buildFPExt(DstType
, FpExtSrc
->getOperand(2).getReg());
5247 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5248 {FpExtX
.getReg(0), FpExtY
.getReg(0), RHS
.Reg
});
5253 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z)
5254 // Note: Commutes FADD operands.
5255 if (mi_match(RHS
.Reg
, MRI
, m_GFPExt(m_MInstr(FpExtSrc
))) &&
5256 isContractableFMul(*FpExtSrc
, AllowFusionGlobally
) &&
5257 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5258 MRI
.getType(FpExtSrc
->getOperand(1).getReg()))) {
5259 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5260 auto FpExtX
= B
.buildFPExt(DstType
, FpExtSrc
->getOperand(1).getReg());
5261 auto FpExtY
= B
.buildFPExt(DstType
, FpExtSrc
->getOperand(2).getReg());
5262 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5263 {FpExtX
.getReg(0), FpExtY
.getReg(0), LHS
.Reg
});
5271 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA(
5272 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5273 assert(MI
.getOpcode() == TargetOpcode::G_FADD
);
5275 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5276 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
, true))
5279 Register Op1
= MI
.getOperand(1).getReg();
5280 Register Op2
= MI
.getOperand(2).getReg();
5281 DefinitionAndSourceRegister LHS
= {MRI
.getVRegDef(Op1
), Op1
};
5282 DefinitionAndSourceRegister RHS
= {MRI
.getVRegDef(Op2
), Op2
};
5283 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
5285 unsigned PreferredFusedOpcode
=
5286 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5288 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5289 // prefer to fold the multiply with fewer uses.
5290 if (Aggressive
&& isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5291 isContractableFMul(*RHS
.MI
, AllowFusionGlobally
)) {
5292 if (hasMoreUses(*LHS
.MI
, *RHS
.MI
, MRI
))
5293 std::swap(LHS
, RHS
);
5296 MachineInstr
*FMA
= nullptr;
5298 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
5299 if (LHS
.MI
->getOpcode() == PreferredFusedOpcode
&&
5300 (MRI
.getVRegDef(LHS
.MI
->getOperand(3).getReg())->getOpcode() ==
5301 TargetOpcode::G_FMUL
) &&
5302 MRI
.hasOneNonDBGUse(LHS
.MI
->getOperand(0).getReg()) &&
5303 MRI
.hasOneNonDBGUse(LHS
.MI
->getOperand(3).getReg())) {
5307 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z))
5308 else if (RHS
.MI
->getOpcode() == PreferredFusedOpcode
&&
5309 (MRI
.getVRegDef(RHS
.MI
->getOperand(3).getReg())->getOpcode() ==
5310 TargetOpcode::G_FMUL
) &&
5311 MRI
.hasOneNonDBGUse(RHS
.MI
->getOperand(0).getReg()) &&
5312 MRI
.hasOneNonDBGUse(RHS
.MI
->getOperand(3).getReg())) {
5318 MachineInstr
*FMulMI
= MRI
.getVRegDef(FMA
->getOperand(3).getReg());
5319 Register X
= FMA
->getOperand(1).getReg();
5320 Register Y
= FMA
->getOperand(2).getReg();
5321 Register U
= FMulMI
->getOperand(1).getReg();
5322 Register V
= FMulMI
->getOperand(2).getReg();
5324 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5325 Register InnerFMA
= MRI
.createGenericVirtualRegister(DstTy
);
5326 B
.buildInstr(PreferredFusedOpcode
, {InnerFMA
}, {U
, V
, Z
});
5327 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5336 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
5337 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5338 assert(MI
.getOpcode() == TargetOpcode::G_FADD
);
5340 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5341 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5347 const auto &TLI
= *MI
.getMF()->getSubtarget().getTargetLowering();
5348 LLT DstType
= MRI
.getType(MI
.getOperand(0).getReg());
5349 Register Op1
= MI
.getOperand(1).getReg();
5350 Register Op2
= MI
.getOperand(2).getReg();
5351 DefinitionAndSourceRegister LHS
= {MRI
.getVRegDef(Op1
), Op1
};
5352 DefinitionAndSourceRegister RHS
= {MRI
.getVRegDef(Op2
), Op2
};
5354 unsigned PreferredFusedOpcode
=
5355 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5357 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5358 // prefer to fold the multiply with fewer uses.
5359 if (Aggressive
&& isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5360 isContractableFMul(*RHS
.MI
, AllowFusionGlobally
)) {
5361 if (hasMoreUses(*LHS
.MI
, *RHS
.MI
, MRI
))
5362 std::swap(LHS
, RHS
);
5365 // Builds: (fma x, y, (fma (fpext u), (fpext v), z))
5366 auto buildMatchInfo
= [=, &MI
](Register U
, Register V
, Register Z
, Register X
,
5367 Register Y
, MachineIRBuilder
&B
) {
5368 Register FpExtU
= B
.buildFPExt(DstType
, U
).getReg(0);
5369 Register FpExtV
= B
.buildFPExt(DstType
, V
).getReg(0);
5371 B
.buildInstr(PreferredFusedOpcode
, {DstType
}, {FpExtU
, FpExtV
, Z
})
5373 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5377 MachineInstr
*FMulMI
, *FMAMI
;
5378 // fold (fadd (fma x, y, (fpext (fmul u, v))), z)
5379 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5380 if (LHS
.MI
->getOpcode() == PreferredFusedOpcode
&&
5381 mi_match(LHS
.MI
->getOperand(3).getReg(), MRI
,
5382 m_GFPExt(m_MInstr(FMulMI
))) &&
5383 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5384 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5385 MRI
.getType(FMulMI
->getOperand(0).getReg()))) {
5386 MatchInfo
= [=](MachineIRBuilder
&B
) {
5387 buildMatchInfo(FMulMI
->getOperand(1).getReg(),
5388 FMulMI
->getOperand(2).getReg(), RHS
.Reg
,
5389 LHS
.MI
->getOperand(1).getReg(),
5390 LHS
.MI
->getOperand(2).getReg(), B
);
5395 // fold (fadd (fpext (fma x, y, (fmul u, v))), z)
5396 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5397 // FIXME: This turns two single-precision and one double-precision
5398 // operation into two double-precision operations, which might not be
5399 // interesting for all targets, especially GPUs.
5400 if (mi_match(LHS
.Reg
, MRI
, m_GFPExt(m_MInstr(FMAMI
))) &&
5401 FMAMI
->getOpcode() == PreferredFusedOpcode
) {
5402 MachineInstr
*FMulMI
= MRI
.getVRegDef(FMAMI
->getOperand(3).getReg());
5403 if (isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5404 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5405 MRI
.getType(FMAMI
->getOperand(0).getReg()))) {
5406 MatchInfo
= [=](MachineIRBuilder
&B
) {
5407 Register X
= FMAMI
->getOperand(1).getReg();
5408 Register Y
= FMAMI
->getOperand(2).getReg();
5409 X
= B
.buildFPExt(DstType
, X
).getReg(0);
5410 Y
= B
.buildFPExt(DstType
, Y
).getReg(0);
5411 buildMatchInfo(FMulMI
->getOperand(1).getReg(),
5412 FMulMI
->getOperand(2).getReg(), RHS
.Reg
, X
, Y
, B
);
5419 // fold (fadd z, (fma x, y, (fpext (fmul u, v)))
5420 // -> (fma x, y, (fma (fpext u), (fpext v), z))
5421 if (RHS
.MI
->getOpcode() == PreferredFusedOpcode
&&
5422 mi_match(RHS
.MI
->getOperand(3).getReg(), MRI
,
5423 m_GFPExt(m_MInstr(FMulMI
))) &&
5424 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5425 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5426 MRI
.getType(FMulMI
->getOperand(0).getReg()))) {
5427 MatchInfo
= [=](MachineIRBuilder
&B
) {
5428 buildMatchInfo(FMulMI
->getOperand(1).getReg(),
5429 FMulMI
->getOperand(2).getReg(), LHS
.Reg
,
5430 RHS
.MI
->getOperand(1).getReg(),
5431 RHS
.MI
->getOperand(2).getReg(), B
);
5436 // fold (fadd z, (fpext (fma x, y, (fmul u, v)))
5437 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z))
5438 // FIXME: This turns two single-precision and one double-precision
5439 // operation into two double-precision operations, which might not be
5440 // interesting for all targets, especially GPUs.
5441 if (mi_match(RHS
.Reg
, MRI
, m_GFPExt(m_MInstr(FMAMI
))) &&
5442 FMAMI
->getOpcode() == PreferredFusedOpcode
) {
5443 MachineInstr
*FMulMI
= MRI
.getVRegDef(FMAMI
->getOperand(3).getReg());
5444 if (isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5445 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstType
,
5446 MRI
.getType(FMAMI
->getOperand(0).getReg()))) {
5447 MatchInfo
= [=](MachineIRBuilder
&B
) {
5448 Register X
= FMAMI
->getOperand(1).getReg();
5449 Register Y
= FMAMI
->getOperand(2).getReg();
5450 X
= B
.buildFPExt(DstType
, X
).getReg(0);
5451 Y
= B
.buildFPExt(DstType
, Y
).getReg(0);
5452 buildMatchInfo(FMulMI
->getOperand(1).getReg(),
5453 FMulMI
->getOperand(2).getReg(), LHS
.Reg
, X
, Y
, B
);
5462 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA(
5463 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5464 assert(MI
.getOpcode() == TargetOpcode::G_FSUB
);
5466 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5467 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5470 Register Op1
= MI
.getOperand(1).getReg();
5471 Register Op2
= MI
.getOperand(2).getReg();
5472 DefinitionAndSourceRegister LHS
= {MRI
.getVRegDef(Op1
), Op1
};
5473 DefinitionAndSourceRegister RHS
= {MRI
.getVRegDef(Op2
), Op2
};
5474 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
5476 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
5477 // prefer to fold the multiply with fewer uses.
5478 int FirstMulHasFewerUses
= true;
5479 if (isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5480 isContractableFMul(*RHS
.MI
, AllowFusionGlobally
) &&
5481 hasMoreUses(*LHS
.MI
, *RHS
.MI
, MRI
))
5482 FirstMulHasFewerUses
= false;
5484 unsigned PreferredFusedOpcode
=
5485 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5487 // fold (fsub (fmul x, y), z) -> (fma x, y, -z)
5488 if (FirstMulHasFewerUses
&&
5489 (isContractableFMul(*LHS
.MI
, AllowFusionGlobally
) &&
5490 (Aggressive
|| MRI
.hasOneNonDBGUse(LHS
.Reg
)))) {
5491 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5492 Register NegZ
= B
.buildFNeg(DstTy
, RHS
.Reg
).getReg(0);
5493 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5494 {LHS
.MI
->getOperand(1).getReg(),
5495 LHS
.MI
->getOperand(2).getReg(), NegZ
});
5499 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x)
5500 else if ((isContractableFMul(*RHS
.MI
, AllowFusionGlobally
) &&
5501 (Aggressive
|| MRI
.hasOneNonDBGUse(RHS
.Reg
)))) {
5502 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5504 B
.buildFNeg(DstTy
, RHS
.MI
->getOperand(1).getReg()).getReg(0);
5505 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5506 {NegY
, RHS
.MI
->getOperand(2).getReg(), LHS
.Reg
});
5514 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA(
5515 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5516 assert(MI
.getOpcode() == TargetOpcode::G_FSUB
);
5518 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5519 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5522 Register LHSReg
= MI
.getOperand(1).getReg();
5523 Register RHSReg
= MI
.getOperand(2).getReg();
5524 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
5526 unsigned PreferredFusedOpcode
=
5527 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5529 MachineInstr
*FMulMI
;
5530 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z))
5531 if (mi_match(LHSReg
, MRI
, m_GFNeg(m_MInstr(FMulMI
))) &&
5532 (Aggressive
|| (MRI
.hasOneNonDBGUse(LHSReg
) &&
5533 MRI
.hasOneNonDBGUse(FMulMI
->getOperand(0).getReg()))) &&
5534 isContractableFMul(*FMulMI
, AllowFusionGlobally
)) {
5535 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5537 B
.buildFNeg(DstTy
, FMulMI
->getOperand(1).getReg()).getReg(0);
5538 Register NegZ
= B
.buildFNeg(DstTy
, RHSReg
).getReg(0);
5539 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5540 {NegX
, FMulMI
->getOperand(2).getReg(), NegZ
});
5545 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
5546 if (mi_match(RHSReg
, MRI
, m_GFNeg(m_MInstr(FMulMI
))) &&
5547 (Aggressive
|| (MRI
.hasOneNonDBGUse(RHSReg
) &&
5548 MRI
.hasOneNonDBGUse(FMulMI
->getOperand(0).getReg()))) &&
5549 isContractableFMul(*FMulMI
, AllowFusionGlobally
)) {
5550 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5551 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5552 {FMulMI
->getOperand(1).getReg(),
5553 FMulMI
->getOperand(2).getReg(), LHSReg
});
5561 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA(
5562 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5563 assert(MI
.getOpcode() == TargetOpcode::G_FSUB
);
5565 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5566 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5569 Register LHSReg
= MI
.getOperand(1).getReg();
5570 Register RHSReg
= MI
.getOperand(2).getReg();
5571 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
5573 unsigned PreferredFusedOpcode
=
5574 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5576 MachineInstr
*FMulMI
;
5577 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z))
5578 if (mi_match(LHSReg
, MRI
, m_GFPExt(m_MInstr(FMulMI
))) &&
5579 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5580 (Aggressive
|| MRI
.hasOneNonDBGUse(LHSReg
))) {
5581 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5583 B
.buildFPExt(DstTy
, FMulMI
->getOperand(1).getReg()).getReg(0);
5585 B
.buildFPExt(DstTy
, FMulMI
->getOperand(2).getReg()).getReg(0);
5586 Register NegZ
= B
.buildFNeg(DstTy
, RHSReg
).getReg(0);
5587 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5588 {FpExtX
, FpExtY
, NegZ
});
5593 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x)
5594 if (mi_match(RHSReg
, MRI
, m_GFPExt(m_MInstr(FMulMI
))) &&
5595 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5596 (Aggressive
|| MRI
.hasOneNonDBGUse(RHSReg
))) {
5597 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5599 B
.buildFPExt(DstTy
, FMulMI
->getOperand(1).getReg()).getReg(0);
5600 Register NegY
= B
.buildFNeg(DstTy
, FpExtY
).getReg(0);
5602 B
.buildFPExt(DstTy
, FMulMI
->getOperand(2).getReg()).getReg(0);
5603 B
.buildInstr(PreferredFusedOpcode
, {MI
.getOperand(0).getReg()},
5604 {NegY
, FpExtZ
, LHSReg
});
5612 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA(
5613 MachineInstr
&MI
, std::function
<void(MachineIRBuilder
&)> &MatchInfo
) {
5614 assert(MI
.getOpcode() == TargetOpcode::G_FSUB
);
5616 bool AllowFusionGlobally
, HasFMAD
, Aggressive
;
5617 if (!canCombineFMadOrFMA(MI
, AllowFusionGlobally
, HasFMAD
, Aggressive
))
5620 const auto &TLI
= *MI
.getMF()->getSubtarget().getTargetLowering();
5621 LLT DstTy
= MRI
.getType(MI
.getOperand(0).getReg());
5622 Register LHSReg
= MI
.getOperand(1).getReg();
5623 Register RHSReg
= MI
.getOperand(2).getReg();
5625 unsigned PreferredFusedOpcode
=
5626 HasFMAD
? TargetOpcode::G_FMAD
: TargetOpcode::G_FMA
;
5628 auto buildMatchInfo
= [=](Register Dst
, Register X
, Register Y
, Register Z
,
5629 MachineIRBuilder
&B
) {
5630 Register FpExtX
= B
.buildFPExt(DstTy
, X
).getReg(0);
5631 Register FpExtY
= B
.buildFPExt(DstTy
, Y
).getReg(0);
5632 B
.buildInstr(PreferredFusedOpcode
, {Dst
}, {FpExtX
, FpExtY
, Z
});
5635 MachineInstr
*FMulMI
;
5636 // fold (fsub (fpext (fneg (fmul x, y))), z) ->
5637 // (fneg (fma (fpext x), (fpext y), z))
5638 // fold (fsub (fneg (fpext (fmul x, y))), z) ->
5639 // (fneg (fma (fpext x), (fpext y), z))
5640 if ((mi_match(LHSReg
, MRI
, m_GFPExt(m_GFNeg(m_MInstr(FMulMI
)))) ||
5641 mi_match(LHSReg
, MRI
, m_GFNeg(m_GFPExt(m_MInstr(FMulMI
))))) &&
5642 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5643 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstTy
,
5644 MRI
.getType(FMulMI
->getOperand(0).getReg()))) {
5645 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5646 Register FMAReg
= MRI
.createGenericVirtualRegister(DstTy
);
5647 buildMatchInfo(FMAReg
, FMulMI
->getOperand(1).getReg(),
5648 FMulMI
->getOperand(2).getReg(), RHSReg
, B
);
5649 B
.buildFNeg(MI
.getOperand(0).getReg(), FMAReg
);
5654 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5655 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x)
5656 if ((mi_match(RHSReg
, MRI
, m_GFPExt(m_GFNeg(m_MInstr(FMulMI
)))) ||
5657 mi_match(RHSReg
, MRI
, m_GFNeg(m_GFPExt(m_MInstr(FMulMI
))))) &&
5658 isContractableFMul(*FMulMI
, AllowFusionGlobally
) &&
5659 TLI
.isFPExtFoldable(MI
, PreferredFusedOpcode
, DstTy
,
5660 MRI
.getType(FMulMI
->getOperand(0).getReg()))) {
5661 MatchInfo
= [=, &MI
](MachineIRBuilder
&B
) {
5662 buildMatchInfo(MI
.getOperand(0).getReg(), FMulMI
->getOperand(1).getReg(),
5663 FMulMI
->getOperand(2).getReg(), LHSReg
, B
);
5671 bool CombinerHelper::matchSelectToLogical(MachineInstr
&MI
,
5672 BuildFnTy
&MatchInfo
) {
5673 GSelect
&Sel
= cast
<GSelect
>(MI
);
5674 Register DstReg
= Sel
.getReg(0);
5675 Register Cond
= Sel
.getCondReg();
5676 Register TrueReg
= Sel
.getTrueReg();
5677 Register FalseReg
= Sel
.getFalseReg();
5679 auto *TrueDef
= getDefIgnoringCopies(TrueReg
, MRI
);
5680 auto *FalseDef
= getDefIgnoringCopies(FalseReg
, MRI
);
5682 const LLT CondTy
= MRI
.getType(Cond
);
5683 const LLT OpTy
= MRI
.getType(TrueReg
);
5684 if (CondTy
!= OpTy
|| OpTy
.getScalarSizeInBits() != 1)
5687 // We have a boolean select.
5689 // select Cond, Cond, F --> or Cond, F
5690 // select Cond, 1, F --> or Cond, F
5691 auto MaybeCstTrue
= isConstantOrConstantSplatVector(*TrueDef
, MRI
);
5692 if (Cond
== TrueReg
|| (MaybeCstTrue
&& MaybeCstTrue
->isOne())) {
5693 MatchInfo
= [=](MachineIRBuilder
&MIB
) {
5694 MIB
.buildOr(DstReg
, Cond
, FalseReg
);
5699 // select Cond, T, Cond --> and Cond, T
5700 // select Cond, T, 0 --> and Cond, T
5701 auto MaybeCstFalse
= isConstantOrConstantSplatVector(*FalseDef
, MRI
);
5702 if (Cond
== FalseReg
|| (MaybeCstFalse
&& MaybeCstFalse
->isZero())) {
5703 MatchInfo
= [=](MachineIRBuilder
&MIB
) {
5704 MIB
.buildAnd(DstReg
, Cond
, TrueReg
);
5709 // select Cond, T, 1 --> or (not Cond), T
5710 if (MaybeCstFalse
&& MaybeCstFalse
->isOne()) {
5711 MatchInfo
= [=](MachineIRBuilder
&MIB
) {
5712 MIB
.buildOr(DstReg
, MIB
.buildNot(OpTy
, Cond
), TrueReg
);
5717 // select Cond, 0, F --> and (not Cond), F
5718 if (MaybeCstTrue
&& MaybeCstTrue
->isZero()) {
5719 MatchInfo
= [=](MachineIRBuilder
&MIB
) {
5720 MIB
.buildAnd(DstReg
, MIB
.buildNot(OpTy
, Cond
), FalseReg
);
5727 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr
&MI
,
5728 unsigned &IdxToPropagate
) {
5730 switch (MI
.getOpcode()) {
5733 case TargetOpcode::G_FMINNUM
:
5734 case TargetOpcode::G_FMAXNUM
:
5735 PropagateNaN
= false;
5737 case TargetOpcode::G_FMINIMUM
:
5738 case TargetOpcode::G_FMAXIMUM
:
5739 PropagateNaN
= true;
5743 auto MatchNaN
= [&](unsigned Idx
) {
5744 Register MaybeNaNReg
= MI
.getOperand(Idx
).getReg();
5745 const ConstantFP
*MaybeCst
= getConstantFPVRegVal(MaybeNaNReg
, MRI
);
5746 if (!MaybeCst
|| !MaybeCst
->getValueAPF().isNaN())
5748 IdxToPropagate
= PropagateNaN
? Idx
: (Idx
== 1 ? 2 : 1);
5752 return MatchNaN(1) || MatchNaN(2);
5755 bool CombinerHelper::matchAddSubSameReg(MachineInstr
&MI
, Register
&Src
) {
5756 assert(MI
.getOpcode() == TargetOpcode::G_ADD
&& "Expected a G_ADD");
5757 Register LHS
= MI
.getOperand(1).getReg();
5758 Register RHS
= MI
.getOperand(2).getReg();
5760 // Helper lambda to check for opportunities for
5763 auto CheckFold
= [&](Register MaybeSub
, Register MaybeSameReg
) {
5765 return mi_match(MaybeSub
, MRI
, m_GSub(m_Reg(Src
), m_Reg(Reg
))) &&
5766 Reg
== MaybeSameReg
;
5768 return CheckFold(LHS
, RHS
) || CheckFold(RHS
, LHS
);
5771 bool CombinerHelper::matchBuildVectorIdentityFold(MachineInstr
&MI
,
5772 Register
&MatchInfo
) {
5773 // This combine folds the following patterns:
5775 // G_BUILD_VECTOR_TRUNC (G_BITCAST(x), G_LSHR(G_BITCAST(x), k))
5776 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), G_TRUNC(G_LSHR(G_BITCAST(x), k)))
5780 // k == sizeof(VecEltTy)/2
5781 // type(x) == type(dst)
5783 // G_BUILD_VECTOR(G_TRUNC(G_BITCAST(x)), undef)
5787 // type(x) == type(dst)
5789 LLT DstVecTy
= MRI
.getType(MI
.getOperand(0).getReg());
5790 LLT DstEltTy
= DstVecTy
.getElementType();
5796 m_GBuildVector(m_GTrunc(m_GBitcast(m_Reg(Lo
))), m_GImplicitDef()))) {
5798 return MRI
.getType(MatchInfo
) == DstVecTy
;
5801 std::optional
<ValueAndVReg
> ShiftAmount
;
5802 const auto LoPattern
= m_GBitcast(m_Reg(Lo
));
5803 const auto HiPattern
= m_GLShr(m_GBitcast(m_Reg(Hi
)), m_GCst(ShiftAmount
));
5806 m_any_of(m_GBuildVectorTrunc(LoPattern
, HiPattern
),
5807 m_GBuildVector(m_GTrunc(LoPattern
), m_GTrunc(HiPattern
))))) {
5808 if (Lo
== Hi
&& ShiftAmount
->Value
== DstEltTy
.getSizeInBits()) {
5810 return MRI
.getType(MatchInfo
) == DstVecTy
;
5817 bool CombinerHelper::matchTruncBuildVectorFold(MachineInstr
&MI
,
5818 Register
&MatchInfo
) {
5819 // Replace (G_TRUNC (G_BITCAST (G_BUILD_VECTOR x, y)) with just x
5820 // if type(x) == type(G_TRUNC)
5821 if (!mi_match(MI
.getOperand(1).getReg(), MRI
,
5822 m_GBitcast(m_GBuildVector(m_Reg(MatchInfo
), m_Reg()))))
5825 return MRI
.getType(MatchInfo
) == MRI
.getType(MI
.getOperand(0).getReg());
5828 bool CombinerHelper::matchTruncLshrBuildVectorFold(MachineInstr
&MI
,
5829 Register
&MatchInfo
) {
5830 // Replace (G_TRUNC (G_LSHR (G_BITCAST (G_BUILD_VECTOR x, y)), K)) with
5831 // y if K == size of vector element type
5832 std::optional
<ValueAndVReg
> ShiftAmt
;
5833 if (!mi_match(MI
.getOperand(1).getReg(), MRI
,
5834 m_GLShr(m_GBitcast(m_GBuildVector(m_Reg(), m_Reg(MatchInfo
))),
5838 LLT MatchTy
= MRI
.getType(MatchInfo
);
5839 return ShiftAmt
->Value
.getZExtValue() == MatchTy
.getSizeInBits() &&
5840 MatchTy
== MRI
.getType(MI
.getOperand(0).getReg());
5843 unsigned CombinerHelper::getFPMinMaxOpcForSelect(
5844 CmpInst::Predicate Pred
, LLT DstTy
,
5845 SelectPatternNaNBehaviour VsNaNRetVal
) const {
5846 assert(VsNaNRetVal
!= SelectPatternNaNBehaviour::NOT_APPLICABLE
&&
5847 "Expected a NaN behaviour?");
5848 // Choose an opcode based off of legality or the behaviour when one of the
5849 // LHS/RHS may be NaN.
5853 case CmpInst::FCMP_UGT
:
5854 case CmpInst::FCMP_UGE
:
5855 case CmpInst::FCMP_OGT
:
5856 case CmpInst::FCMP_OGE
:
5857 if (VsNaNRetVal
== SelectPatternNaNBehaviour::RETURNS_OTHER
)
5858 return TargetOpcode::G_FMAXNUM
;
5859 if (VsNaNRetVal
== SelectPatternNaNBehaviour::RETURNS_NAN
)
5860 return TargetOpcode::G_FMAXIMUM
;
5861 if (isLegal({TargetOpcode::G_FMAXNUM
, {DstTy
}}))
5862 return TargetOpcode::G_FMAXNUM
;
5863 if (isLegal({TargetOpcode::G_FMAXIMUM
, {DstTy
}}))
5864 return TargetOpcode::G_FMAXIMUM
;
5866 case CmpInst::FCMP_ULT
:
5867 case CmpInst::FCMP_ULE
:
5868 case CmpInst::FCMP_OLT
:
5869 case CmpInst::FCMP_OLE
:
5870 if (VsNaNRetVal
== SelectPatternNaNBehaviour::RETURNS_OTHER
)
5871 return TargetOpcode::G_FMINNUM
;
5872 if (VsNaNRetVal
== SelectPatternNaNBehaviour::RETURNS_NAN
)
5873 return TargetOpcode::G_FMINIMUM
;
5874 if (isLegal({TargetOpcode::G_FMINNUM
, {DstTy
}}))
5875 return TargetOpcode::G_FMINNUM
;
5876 if (!isLegal({TargetOpcode::G_FMINIMUM
, {DstTy
}}))
5878 return TargetOpcode::G_FMINIMUM
;
5882 CombinerHelper::SelectPatternNaNBehaviour
5883 CombinerHelper::computeRetValAgainstNaN(Register LHS
, Register RHS
,
5884 bool IsOrderedComparison
) const {
5885 bool LHSSafe
= isKnownNeverNaN(LHS
, MRI
);
5886 bool RHSSafe
= isKnownNeverNaN(RHS
, MRI
);
5887 // Completely unsafe.
5888 if (!LHSSafe
&& !RHSSafe
)
5889 return SelectPatternNaNBehaviour::NOT_APPLICABLE
;
5890 if (LHSSafe
&& RHSSafe
)
5891 return SelectPatternNaNBehaviour::RETURNS_ANY
;
5892 // An ordered comparison will return false when given a NaN, so it
5894 if (IsOrderedComparison
)
5895 return LHSSafe
? SelectPatternNaNBehaviour::RETURNS_NAN
5896 : SelectPatternNaNBehaviour::RETURNS_OTHER
;
5897 // An unordered comparison will return true when given a NaN, so it
5899 return LHSSafe
? SelectPatternNaNBehaviour::RETURNS_OTHER
5900 : SelectPatternNaNBehaviour::RETURNS_NAN
;
5903 bool CombinerHelper::matchFPSelectToMinMax(Register Dst
, Register Cond
,
5904 Register TrueVal
, Register FalseVal
,
5905 BuildFnTy
&MatchInfo
) {
5906 // Match: select (fcmp cond x, y) x, y
5907 // select (fcmp cond x, y) y, x
5908 // And turn it into fminnum/fmaxnum or fmin/fmax based off of the condition.
5909 LLT DstTy
= MRI
.getType(Dst
);
5910 // Bail out early on pointers, since we'll never want to fold to a min/max.
5911 if (DstTy
.isPointer())
5913 // Match a floating point compare with a less-than/greater-than predicate.
5914 // TODO: Allow multiple users of the compare if they are all selects.
5915 CmpInst::Predicate Pred
;
5916 Register CmpLHS
, CmpRHS
;
5917 if (!mi_match(Cond
, MRI
,
5919 m_GFCmp(m_Pred(Pred
), m_Reg(CmpLHS
), m_Reg(CmpRHS
)))) ||
5920 CmpInst::isEquality(Pred
))
5922 SelectPatternNaNBehaviour ResWithKnownNaNInfo
=
5923 computeRetValAgainstNaN(CmpLHS
, CmpRHS
, CmpInst::isOrdered(Pred
));
5924 if (ResWithKnownNaNInfo
== SelectPatternNaNBehaviour::NOT_APPLICABLE
)
5926 if (TrueVal
== CmpRHS
&& FalseVal
== CmpLHS
) {
5927 std::swap(CmpLHS
, CmpRHS
);
5928 Pred
= CmpInst::getSwappedPredicate(Pred
);
5929 if (ResWithKnownNaNInfo
== SelectPatternNaNBehaviour::RETURNS_NAN
)
5930 ResWithKnownNaNInfo
= SelectPatternNaNBehaviour::RETURNS_OTHER
;
5931 else if (ResWithKnownNaNInfo
== SelectPatternNaNBehaviour::RETURNS_OTHER
)
5932 ResWithKnownNaNInfo
= SelectPatternNaNBehaviour::RETURNS_NAN
;
5934 if (TrueVal
!= CmpLHS
|| FalseVal
!= CmpRHS
)
5936 // Decide what type of max/min this should be based off of the predicate.
5937 unsigned Opc
= getFPMinMaxOpcForSelect(Pred
, DstTy
, ResWithKnownNaNInfo
);
5938 if (!Opc
|| !isLegal({Opc
, {DstTy
}}))
5940 // Comparisons between signed zero and zero may have different results...
5941 // unless we have fmaximum/fminimum. In that case, we know -0 < 0.
5942 if (Opc
!= TargetOpcode::G_FMAXIMUM
&& Opc
!= TargetOpcode::G_FMINIMUM
) {
5943 // We don't know if a comparison between two 0s will give us a consistent
5944 // result. Be conservative and only proceed if at least one side is
5946 auto KnownNonZeroSide
= getFConstantVRegValWithLookThrough(CmpLHS
, MRI
);
5947 if (!KnownNonZeroSide
|| !KnownNonZeroSide
->Value
.isNonZero()) {
5948 KnownNonZeroSide
= getFConstantVRegValWithLookThrough(CmpRHS
, MRI
);
5949 if (!KnownNonZeroSide
|| !KnownNonZeroSide
->Value
.isNonZero())
5953 MatchInfo
= [=](MachineIRBuilder
&B
) {
5954 B
.buildInstr(Opc
, {Dst
}, {CmpLHS
, CmpRHS
});
5959 bool CombinerHelper::matchSimplifySelectToMinMax(MachineInstr
&MI
,
5960 BuildFnTy
&MatchInfo
) {
5961 // TODO: Handle integer cases.
5962 assert(MI
.getOpcode() == TargetOpcode::G_SELECT
);
5963 // Condition may be fed by a truncated compare.
5964 Register Cond
= MI
.getOperand(1).getReg();
5965 Register MaybeTrunc
;
5966 if (mi_match(Cond
, MRI
, m_OneNonDBGUse(m_GTrunc(m_Reg(MaybeTrunc
)))))
5968 Register Dst
= MI
.getOperand(0).getReg();
5969 Register TrueVal
= MI
.getOperand(2).getReg();
5970 Register FalseVal
= MI
.getOperand(3).getReg();
5971 return matchFPSelectToMinMax(Dst
, Cond
, TrueVal
, FalseVal
, MatchInfo
);
5974 bool CombinerHelper::matchRedundantBinOpInEquality(MachineInstr
&MI
,
5975 BuildFnTy
&MatchInfo
) {
5976 assert(MI
.getOpcode() == TargetOpcode::G_ICMP
);
5977 // (X + Y) == X --> Y == 0
5978 // (X + Y) != X --> Y != 0
5979 // (X - Y) == X --> Y == 0
5980 // (X - Y) != X --> Y != 0
5981 // (X ^ Y) == X --> Y == 0
5982 // (X ^ Y) != X --> Y != 0
5983 Register Dst
= MI
.getOperand(0).getReg();
5984 CmpInst::Predicate Pred
;
5985 Register X
, Y
, OpLHS
, OpRHS
;
5986 bool MatchedSub
= mi_match(
5988 m_c_GICmp(m_Pred(Pred
), m_Reg(X
), m_GSub(m_Reg(OpLHS
), m_Reg(Y
))));
5989 if (MatchedSub
&& X
!= OpLHS
)
5992 if (!mi_match(Dst
, MRI
,
5993 m_c_GICmp(m_Pred(Pred
), m_Reg(X
),
5994 m_any_of(m_GAdd(m_Reg(OpLHS
), m_Reg(OpRHS
)),
5995 m_GXor(m_Reg(OpLHS
), m_Reg(OpRHS
))))))
5997 Y
= X
== OpLHS
? OpRHS
: X
== OpRHS
? OpLHS
: Register();
5999 MatchInfo
= [=](MachineIRBuilder
&B
) {
6000 auto Zero
= B
.buildConstant(MRI
.getType(Y
), 0);
6001 B
.buildICmp(Pred
, Dst
, Y
, Zero
);
6003 return CmpInst::isEquality(Pred
) && Y
.isValid();
6006 bool CombinerHelper::matchShiftsTooBig(MachineInstr
&MI
) {
6007 Register ShiftReg
= MI
.getOperand(2).getReg();
6008 LLT ResTy
= MRI
.getType(MI
.getOperand(0).getReg());
6009 auto IsShiftTooBig
= [&](const Constant
*C
) {
6010 auto *CI
= dyn_cast
<ConstantInt
>(C
);
6011 return CI
&& CI
->uge(ResTy
.getScalarSizeInBits());
6013 return matchUnaryPredicate(MRI
, ShiftReg
, IsShiftTooBig
);
6016 bool CombinerHelper::tryCombine(MachineInstr
&MI
) {
6017 if (tryCombineCopy(MI
))
6019 if (tryCombineExtendingLoads(MI
))
6021 if (tryCombineIndexedLoadStore(MI
))